code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#!/usr/bin/env python
"""Check Identity class"""
from matplotlib import pyplot as plt
import numpy as N
from load import ROOT as R
from matplotlib.ticker import MaxNLocator
from gna import constructors as C
from gna.bindings import DataType
from gna.unittest import *
from gna import context
#
# Create the matrix
#
def test_io(opts):
print('Test inputs/outputs (Identity)')
mat = N.arange(12, dtype='d').reshape(3, 4)
print( 'Input matrix (numpy)' )
print( mat )
print()
#
# Create transformations
#
points = C.Points(mat)
identity = R.Identity()
identity.identity.switchFunction('identity_gpuargs_h')
points.points.points >> identity.identity.source
identity.print()
res = identity.identity.target.data()
dt = identity.identity.target.datatype()
assert N.allclose(mat, res), "C++ and Python results doesn't match"
#
# Dump
#
print( 'Eigen dump (C++)' )
identity.dump()
print()
print( 'Result (C++ Data to numpy)' )
print( res )
print()
print( 'Datatype:', str(dt) )
def gpuargs_make(nsname, mat1, mat2):
from gna.env import env
ns = env.globalns(nsname)
ns.reqparameter('par1', central=1.0, fixed=True, label='Dummy parameter 1')
ns.reqparameter('par2', central=1.5, fixed=True, label='Dummy parameter 2')
ns.reqparameter('par3', central=1.01e5, fixed=True, label='Dummy parameter 3')
ns.printparameters(labels=True)
points1, points2 = C.Points(mat1), C.Points(mat2)
with ns:
dummy = C.Dummy(4, "dummy", ['par1', 'par2', 'par3'])
return dummy, points1, points2, ns
@floatcopy(globals(), addname=True)
def test_vars_01_local(opts, function_name):
print('Test inputs/outputs/variables (Dummy)')
mat1 = N.arange(12, dtype='d').reshape(3, 4)
mat2 = N.arange(15, dtype='d').reshape(5, 3)
dummy, points1, points2, ns = gpuargs_make(function_name, mat1, mat2)
dummy.dummy.switchFunction('dummy_gpuargs_h_local')
dummy.add_input(points1, 'input1')
dummy.add_input(points2, 'input2')
dummy.add_output('out1')
dummy.add_output('out2')
dummy.print()
res1 = dummy.dummy.out1.data()
res2 = dummy.dummy.out2.data()
dt1 = dummy.dummy.out1.datatype()
dt2 = dummy.dummy.out2.datatype()
assert N.allclose(res1, 0.0), "C++ and Python results doesn't match"
assert N.allclose(res2, 1.0), "C++ and Python results doesn't match"
print( 'Result (C++ Data to numpy)' )
print( res1 )
print( res2 )
print()
print( 'Datatype:', str(dt1) )
print( 'Datatype:', str(dt2) )
print('Change 3d variable')
ns['par3'].set(-1.0)
res1 = dummy.dummy.out1.data()
@floatcopy(globals(), addname=True)
def test_vars_02(opts, function_name):
print('Test inputs/outputs/variables (Dummy)')
mat1 = N.arange(12, dtype='d').reshape(3, 4)
mat2 = N.arange(15, dtype='d').reshape(5, 3)
with context.manager(100) as manager:
dummy, points1, points2, ns = gpuargs_make(function_name, mat1, mat2)
manager.setVariables(C.stdvector([par.getVariable() for (name, par) in ns.walknames()]))
dummy.dummy.switchFunction('dummy_gpuargs_h')
dummy.add_input(points1, 'input1')
dummy.add_input(points2, 'input2')
dummy.add_output('out1')
dummy.add_output('out2')
dummy.print()
res1 = dummy.dummy.out1.data()
res2 = dummy.dummy.out2.data()
dt1 = dummy.dummy.out1.datatype()
dt2 = dummy.dummy.out2.datatype()
assert N.allclose(res1, 0.0), "C++ and Python results doesn't match"
assert N.allclose(res2, 1.0), "C++ and Python results doesn't match"
print( 'Result (C++ Data to numpy)' )
print( res1 )
print( res2 )
print()
print( 'Datatype:', str(dt1) )
print( 'Datatype:', str(dt2) )
print('Change 3d variable')
ns['par3'].set(-1.0)
res1 = dummy.dummy.out1.data()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
# parser.add_argument('-g', '--gpuargs', action='store_true')
run_unittests(globals(), parser.parse_args())
|
[
"argparse.ArgumentParser",
"numpy.allclose",
"gna.constructors.Points",
"gna.env.env.globalns",
"load.ROOT.Identity",
"numpy.arange",
"gna.constructors.Dummy",
"gna.context.manager"
] |
[((551, 564), 'gna.constructors.Points', 'C.Points', (['mat'], {}), '(mat)\n', (559, 564), True, 'from gna import constructors as C\n'), ((580, 592), 'load.ROOT.Identity', 'R.Identity', ([], {}), '()\n', (590, 592), True, 'from load import ROOT as R\n'), ((829, 849), 'numpy.allclose', 'N.allclose', (['mat', 'res'], {}), '(mat, res)\n', (839, 849), True, 'import numpy as N\n'), ((1161, 1181), 'gna.env.env.globalns', 'env.globalns', (['nsname'], {}), '(nsname)\n', (1173, 1181), False, 'from gna.env import env\n'), ((2316, 2337), 'numpy.allclose', 'N.allclose', (['res1', '(0.0)'], {}), '(res1, 0.0)\n', (2326, 2337), True, 'import numpy as N\n'), ((2389, 2410), 'numpy.allclose', 'N.allclose', (['res2', '(1.0)'], {}), '(res2, 1.0)\n', (2399, 2410), True, 'import numpy as N\n'), ((3516, 3537), 'numpy.allclose', 'N.allclose', (['res1', '(0.0)'], {}), '(res1, 0.0)\n', (3526, 3537), True, 'import numpy as N\n'), ((3589, 3610), 'numpy.allclose', 'N.allclose', (['res2', '(1.0)'], {}), '(res2, 1.0)\n', (3599, 3610), True, 'import numpy as N\n'), ((3987, 4003), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4001, 4003), False, 'from argparse import ArgumentParser\n'), ((1491, 1505), 'gna.constructors.Points', 'C.Points', (['mat1'], {}), '(mat1)\n', (1499, 1505), True, 'from gna import constructors as C\n'), ((1507, 1521), 'gna.constructors.Points', 'C.Points', (['mat2'], {}), '(mat2)\n', (1515, 1521), True, 'from gna import constructors as C\n'), ((1551, 1596), 'gna.constructors.Dummy', 'C.Dummy', (['(4)', '"""dummy"""', "['par1', 'par2', 'par3']"], {}), "(4, 'dummy', ['par1', 'par2', 'par3'])\n", (1558, 1596), True, 'from gna import constructors as C\n'), ((2941, 2961), 'gna.context.manager', 'context.manager', (['(100)'], {}), '(100)\n', (2956, 2961), False, 'from gna import context\n'), ((392, 415), 'numpy.arange', 'N.arange', (['(12)'], {'dtype': '"""d"""'}), "(12, dtype='d')\n", (400, 415), True, 'import numpy as N\n'), ((1781, 1804), 'numpy.arange', 'N.arange', (['(12)'], {'dtype': '"""d"""'}), "(12, dtype='d')\n", (1789, 1804), True, 'import numpy as N\n'), ((1830, 1853), 'numpy.arange', 'N.arange', (['(15)'], {'dtype': '"""d"""'}), "(15, dtype='d')\n", (1838, 1853), True, 'import numpy as N\n'), ((2844, 2867), 'numpy.arange', 'N.arange', (['(12)'], {'dtype': '"""d"""'}), "(12, dtype='d')\n", (2852, 2867), True, 'import numpy as N\n'), ((2893, 2916), 'numpy.arange', 'N.arange', (['(15)'], {'dtype': '"""d"""'}), "(15, dtype='d')\n", (2901, 2916), True, 'import numpy as N\n')]
|
"""
Combines predictions based on votes by a set of answer files.
"""
import re
from os import listdir
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from sklearn.metrics import accuracy_score
from .constants import LABEL
from .preprocessing import get_train_dev_test
def vote(y_true, y_pred, conf):
""" confidence vote """
conf_argmax = np.argmax(conf, axis=0)
conf_vote = y_pred.T[np.arange(len(y_pred.T)), conf_argmax]
acc_conf = accuracy_score(y_true=y_true, y_pred=conf_vote)
""" majority vote """
pred = np.mean(y_pred, axis=0)
# in case of a tie use the predictions from the confidence vote
tie = np.isclose(pred, 0.5)
pred[tie] = conf_vote[tie]
pred = (pred >= 0.5)
acc_major = accuracy_score(y_true=y_true, y_pred=pred)
return acc_conf, acc_major
def plot_axis(df, ax, legend_pos='orig1'):
df.plot(x=np.arange(1, len(df) + 1), ax=ax, use_index=False, xlim=[-25, len(df) + 25], ylim=[0.5, 0.775],
style=['-', '-', '-', '-'], lw=1.5,
yticks=[.5, .525, .55, .575, .6, .625, .65, .675, .7, .725, .75, .775])
ax.lines[1].set_linewidth(0.9) # 1.15
ax.lines[3].set_linewidth(0.9) # 1.15
col1 = ax.lines[0].get_color()
col2 = ax.lines[2].get_color()
ax.lines[1].set_color(tuple(1.3*c for c in col1)) # 1.1*
ax.lines[3].set_color(tuple(1.3*c for c in col2)) # 1.1*
ax.grid(b=True, which='major', linestyle='-', linewidth=0.85)
ax.grid(b=True, which='minor', linestyle=':', linewidth=0.75)
if legend_pos == 'orig1':
ax.legend(loc='center', bbox_to_anchor=(0.5, 0.365))
elif legend_pos == 'orig2':
ax.legend().remove()
elif legend_pos == 'alt1':
ax.legend(loc='center', bbox_to_anchor=(0.5, 0.14))
elif legend_pos == 'alt2':
ax.legend(loc='lower left', bbox_to_anchor=(0.02, 0))
else:
ax.legend()
ax.set_xlabel('number of models', weight='bold')
ax.set_ylabel('accuracy', weight='bold')
# majorLocator_x = MultipleLocator(500)
majorLocator_y = MultipleLocator(.05)
majorFormatter_y = FormatStrFormatter('%.2f')
minorLocator_y = MultipleLocator(.025)
# ax.xaxis.set_major_locator(majorLocator_x)
ax.yaxis.set_major_locator(majorLocator_y)
ax.yaxis.set_major_formatter(majorFormatter_y)
ax.yaxis.set_minor_locator(minorLocator_y)
def plot_figure(dfs: list, name, show=True, save=False, legend_pos: list=None, align='h'):
length = len(dfs)
if legend_pos is None:
legend_pos = [''] * length
sns.set(color_codes=True, font_scale=1)
sns.set_style("whitegrid", {'legend.frameon': True})
sns.set_palette("deep")
if align == 'h':
fig, ax = plt.subplots(ncols=length, figsize=(5*length, 5), sharey=True)
else:
fig, ax = plt.subplots(nrows=length, figsize=(5, 5*length))
if length > 1:
for i, df in enumerate(dfs):
plot_axis(df, ax[i], legend_pos=legend_pos[i])
ax[0].set_title('original dataset')
ax[1].set_title('alternative (randomized) data split')
else:
plot_axis(dfs[0], ax, legend_pos=legend_pos[0])
fig.tight_layout()
if show:
plt.show()
if save:
fig.savefig(name + '.pdf', bbox_inches='tight')
plt.close('all')
def build_df(files, y_true):
probs_ser_lst = [pd.Series(np.load(f).flatten(), name=f[-48:-4].replace(' ', '0')) for f in files]
probs_df = pd.DataFrame(probs_ser_lst)
preds_df = probs_df.applymap(lambda x: x >= 0.5)
confs_df = probs_df.apply(lambda x: np.abs(x - 0.5))
accs_ser = preds_df.apply(lambda row: accuracy_score(y_true=y_true, y_pred=row), axis=1)
df = pd.concat([accs_ser, preds_df, probs_df, confs_df], axis=1,
keys=['acc', 'pred', 'prob', 'conf'])
return df
def main():
names = {
# 'tensorL05con2redo2': '/media/andreas/Linux_Data/hpc-semeval/tensorL05con2redo2/out/',
'alt_split_odd': '/media/andreas/Linux_Data/hpc-semeval/alt_split_odd_both/',
}
_, df_dev_data, df_tst_data = get_train_dev_test(options=dict(alt_split=True))
dev_true = df_dev_data[LABEL].values.flatten()
tst_true = df_tst_data[LABEL].values.flatten()
for k, d in names.items():
directory = listdir(d)
dev_files = [d + f for f in directory if re.match(r'^probabilities-' + 'dev', f)]
tst_files = [d + f for f in directory if re.match(r'^probabilities-' + 'tst', f)]
df_dev = build_df(dev_files, dev_true)
df_tst = build_df(tst_files, tst_true)
df = pd.concat([df_dev, df_tst], axis=1, keys=['dev', 'tst'])
df = df.sort_values(('dev', 'acc', 0), ascending=False)
dev_acc_filter = 0.
if dev_acc_filter:
row_filter = df['dev', 'acc', 0] >= dev_acc_filter
df = df[row_filter.values]
print('filtered for dev accuracies >=', dev_acc_filter)
dev_mean = np.mean(df['dev', 'acc', 0].values)
tst_mean = np.mean(df['tst', 'acc', 0].values)
dev_preds_np = df['dev', 'pred'].values
dev_confs_np = df['dev', 'conf'].values
tst_preds_np = df['tst', 'pred'].values
tst_confs_np = df['tst', 'conf'].values
# print more stats
if False:
pd.set_option('display.float_format', lambda x: '%.6f' % x)
print('dev:\n', pd.Series(dev_mean).describe())
print('test:\n', pd.Series(tst_mean).describe())
dev_conf_scores = list()
tst_conf_scores = list()
dev_major_scores = list()
tst_major_scores = list()
for i in range(1, len(df)+1):
acc_conf_dev, acc_major_dev = vote(dev_true, y_pred=dev_preds_np[:i], conf=dev_confs_np[:i])
acc_conf_tst, acc_major_tst = vote(tst_true, y_pred=tst_preds_np[:i], conf=tst_confs_np[:i])
dev_conf_scores.append(acc_conf_dev)
tst_conf_scores.append(acc_conf_tst)
dev_major_scores.append(acc_major_dev)
tst_major_scores.append(acc_major_tst)
mtrx = {
# 'dev: confidence vote': dev_conf_scores,
# 'test: confidence vote': tst_conf_scores,
'test: mean accuracy': tst_mean,
'dev: sorted accuracy': df['dev', 'acc', 0],
'dev: majority vote': dev_major_scores,
'test: majority vote': tst_major_scores,
# 'dev: mean accuracy': dev_mean,
}
df = pd.DataFrame(mtrx)
plot_figure([df], k + 'all_')
# df.to_csv('../out/alt-split_2560.csv', sep='\t')
if __name__ == '__main__':
# TODO: clean up code or add argument flags
# main()
# df1 = pd.read_csv('../out/orig-split.csv', sep='\t')
df2 = pd.read_csv('../out/alt-split_2560.csv', sep='\t')
# plot_figure([df1], '../out/orig-split_2', save=True, legend_pos=['orig1'])
plot_figure([df2], '../out/alt-split_2560', save=True, legend_pos=['alt1'])
# plot_figure([df1, df2], '../out/ensemble_h_2', save=True, legend_pos=['orig2', 'alt2'], align='h')
# plot_figure([df1, df2], '../out/ensemble_v_2', save=True, legend_pos=['orig2', 'alt2'], align='v')
|
[
"numpy.load",
"numpy.abs",
"numpy.argmax",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"numpy.isclose",
"numpy.mean",
"pandas.set_option",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots",
"seaborn.set",
"pandas.concat",
"seaborn.set_style",
"matplotlib.pyplot.show",
"re.match",
"pandas.Series",
"seaborn.set_palette",
"os.listdir"
] |
[((463, 486), 'numpy.argmax', 'np.argmax', (['conf'], {'axis': '(0)'}), '(conf, axis=0)\n', (472, 486), True, 'import numpy as np\n'), ((566, 613), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_true', 'y_pred': 'conf_vote'}), '(y_true=y_true, y_pred=conf_vote)\n', (580, 613), False, 'from sklearn.metrics import accuracy_score\n'), ((652, 675), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (659, 675), True, 'import numpy as np\n'), ((754, 775), 'numpy.isclose', 'np.isclose', (['pred', '(0.5)'], {}), '(pred, 0.5)\n', (764, 775), True, 'import numpy as np\n'), ((848, 890), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_true', 'y_pred': 'pred'}), '(y_true=y_true, y_pred=pred)\n', (862, 890), False, 'from sklearn.metrics import accuracy_score\n'), ((2155, 2176), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.05)'], {}), '(0.05)\n', (2170, 2176), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((2199, 2225), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (2217, 2225), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((2247, 2269), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.025)'], {}), '(0.025)\n', (2262, 2269), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((2646, 2685), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)', 'font_scale': '(1)'}), '(color_codes=True, font_scale=1)\n', (2653, 2685), True, 'import seaborn as sns\n'), ((2690, 2742), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'legend.frameon': True}"], {}), "('whitegrid', {'legend.frameon': True})\n", (2703, 2742), True, 'import seaborn as sns\n'), ((2747, 2770), 'seaborn.set_palette', 'sns.set_palette', (['"""deep"""'], {}), "('deep')\n", (2762, 2770), True, 'import seaborn as sns\n'), ((3370, 3386), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3379, 3386), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['probs_ser_lst'], {}), '(probs_ser_lst)\n', (3548, 3563), True, 'import pandas as pd\n'), ((3776, 3877), 'pandas.concat', 'pd.concat', (['[accs_ser, preds_df, probs_df, confs_df]'], {'axis': '(1)', 'keys': "['acc', 'pred', 'prob', 'conf']"}), "([accs_ser, preds_df, probs_df, confs_df], axis=1, keys=['acc',\n 'pred', 'prob', 'conf'])\n", (3785, 3877), True, 'import pandas as pd\n'), ((6818, 6868), 'pandas.read_csv', 'pd.read_csv', (['"""../out/alt-split_2560.csv"""'], {'sep': '"""\t"""'}), "('../out/alt-split_2560.csv', sep='\\t')\n", (6829, 6868), True, 'import pandas as pd\n'), ((2811, 2875), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'length', 'figsize': '(5 * length, 5)', 'sharey': '(True)'}), '(ncols=length, figsize=(5 * length, 5), sharey=True)\n', (2823, 2875), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2953), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'length', 'figsize': '(5, 5 * length)'}), '(nrows=length, figsize=(5, 5 * length))\n', (2914, 2953), True, 'import matplotlib.pyplot as plt\n'), ((3286, 3296), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3294, 3296), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4373), 'os.listdir', 'listdir', (['d'], {}), '(d)\n', (4370, 4373), False, 'from os import listdir\n'), ((4663, 4719), 'pandas.concat', 'pd.concat', (['[df_dev, df_tst]'], {'axis': '(1)', 'keys': "['dev', 'tst']"}), "([df_dev, df_tst], axis=1, keys=['dev', 'tst'])\n", (4672, 4719), True, 'import pandas as pd\n'), ((5030, 5065), 'numpy.mean', 'np.mean', (["df['dev', 'acc', 0].values"], {}), "(df['dev', 'acc', 0].values)\n", (5037, 5065), True, 'import numpy as np\n'), ((5085, 5120), 'numpy.mean', 'np.mean', (["df['tst', 'acc', 0].values"], {}), "(df['tst', 'acc', 0].values)\n", (5092, 5120), True, 'import numpy as np\n'), ((6543, 6561), 'pandas.DataFrame', 'pd.DataFrame', (['mtrx'], {}), '(mtrx)\n', (6555, 6561), True, 'import pandas as pd\n'), ((3657, 3672), 'numpy.abs', 'np.abs', (['(x - 0.5)'], {}), '(x - 0.5)\n', (3663, 3672), True, 'import numpy as np\n'), ((3716, 3757), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'y_true', 'y_pred': 'row'}), '(y_true=y_true, y_pred=row)\n', (3730, 3757), False, 'from sklearn.metrics import accuracy_score\n'), ((5372, 5431), 'pandas.set_option', 'pd.set_option', (['"""display.float_format"""', "(lambda x: '%.6f' % x)"], {}), "('display.float_format', lambda x: '%.6f' % x)\n", (5385, 5431), True, 'import pandas as pd\n'), ((4423, 4461), 're.match', 're.match', (["('^probabilities-' + 'dev')", 'f'], {}), "('^probabilities-' + 'dev', f)\n", (4431, 4461), False, 'import re\n'), ((4513, 4551), 're.match', 're.match', (["('^probabilities-' + 'tst')", 'f'], {}), "('^probabilities-' + 'tst', f)\n", (4521, 4551), False, 'import re\n'), ((3449, 3459), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (3456, 3459), True, 'import numpy as np\n'), ((5460, 5479), 'pandas.Series', 'pd.Series', (['dev_mean'], {}), '(dev_mean)\n', (5469, 5479), True, 'import pandas as pd\n'), ((5521, 5540), 'pandas.Series', 'pd.Series', (['tst_mean'], {}), '(tst_mean)\n', (5530, 5540), True, 'import pandas as pd\n')]
|
# Databricks notebook source
import numpy as np
import pandas as pd
from scipy import stats
# COMMAND ----------
# Simulate original ice cream dataset
df = pd.DataFrame()
df['temperature'] = np.random.uniform(60, 80, 1000)
df['number_of_cones_sold'] = np.random.uniform(0, 20, 1000)
flavors = ["Vanilla"] * 300 + ['Chocolate'] * 200 + ['Cookie Dough'] * 300 + ['Coffee'] * 200
np.random.shuffle(flavors)
df['most_popular_ice_cream_flavor'] = flavors
df['number_bowls_sold'] = np.random.uniform(0, 20, 1000)
sorbet = ["Raspberry "] * 250 + ['Lemon'] * 250 + ['Lime'] * 250 + ['Orange'] * 250
np.random.shuffle(sorbet)
df['most_popular_sorbet_flavor'] = sorbet
df['total_store_sales'] = np.random.normal(100, 10, 1000)
df['total_sales_predicted'] = np.random.normal(100, 10, 1000)
# Simulate new ice cream dataset
df2 = pd.DataFrame()
df2['temperature'] = (df['temperature'] - 32) * (5/9) # F -> C
df2['number_of_cones_sold'] = np.random.uniform(0, 20, 1000) #stay same
flavors = ["Vanilla"] * 100 + ['Chocolate'] * 300 + ['Cookie Dough'] * 400 + ['Coffee'] * 200
np.random.shuffle(flavors)
df2['most_popular_ice_cream_flavor'] = flavors
df2['number_bowls_sold'] = np.random.uniform(10, 30, 1000)
sorbet = ["Raspberry "] * 200 + ['Lemon'] * 200 + ['Lime'] * 200 + ['Orange'] * 200 + [None] * 200
np.random.shuffle(sorbet)
df2['most_popular_sorbet_flavor'] = sorbet
df2['total_store_sales'] = np.random.normal(150, 10, 1000) # increased
df2['total_sales_predicted'] = np.random.normal(80, 10, 1000) # decreased
|
[
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.random.normal",
"numpy.random.shuffle"
] |
[((160, 174), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (172, 174), True, 'import pandas as pd\n'), ((195, 226), 'numpy.random.uniform', 'np.random.uniform', (['(60)', '(80)', '(1000)'], {}), '(60, 80, 1000)\n', (212, 226), True, 'import numpy as np\n'), ((256, 286), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (273, 286), True, 'import numpy as np\n'), ((382, 408), 'numpy.random.shuffle', 'np.random.shuffle', (['flavors'], {}), '(flavors)\n', (399, 408), True, 'import numpy as np\n'), ((481, 511), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (498, 511), True, 'import numpy as np\n'), ((597, 622), 'numpy.random.shuffle', 'np.random.shuffle', (['sorbet'], {}), '(sorbet)\n', (614, 622), True, 'import numpy as np\n'), ((691, 722), 'numpy.random.normal', 'np.random.normal', (['(100)', '(10)', '(1000)'], {}), '(100, 10, 1000)\n', (707, 722), True, 'import numpy as np\n'), ((753, 784), 'numpy.random.normal', 'np.random.normal', (['(100)', '(10)', '(1000)'], {}), '(100, 10, 1000)\n', (769, 784), True, 'import numpy as np\n'), ((825, 839), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (837, 839), True, 'import pandas as pd\n'), ((933, 963), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(20)', '(1000)'], {}), '(0, 20, 1000)\n', (950, 963), True, 'import numpy as np\n'), ((1069, 1095), 'numpy.random.shuffle', 'np.random.shuffle', (['flavors'], {}), '(flavors)\n', (1086, 1095), True, 'import numpy as np\n'), ((1170, 1201), 'numpy.random.uniform', 'np.random.uniform', (['(10)', '(30)', '(1000)'], {}), '(10, 30, 1000)\n', (1187, 1201), True, 'import numpy as np\n'), ((1301, 1326), 'numpy.random.shuffle', 'np.random.shuffle', (['sorbet'], {}), '(sorbet)\n', (1318, 1326), True, 'import numpy as np\n'), ((1397, 1428), 'numpy.random.normal', 'np.random.normal', (['(150)', '(10)', '(1000)'], {}), '(150, 10, 1000)\n', (1413, 1428), True, 'import numpy as np\n'), ((1472, 1502), 'numpy.random.normal', 'np.random.normal', (['(80)', '(10)', '(1000)'], {}), '(80, 10, 1000)\n', (1488, 1502), True, 'import numpy as np\n')]
|
from __future__ import division
__author__ = '<NAME>'
import numpy as np
from scipy.stats import norm
import string
import bottleneck as bn
import math
# paa tranformation, window = incoming data, string_length = length of outcoming data
class sax():
def process(self, window, output_length, sax_vocab):
sax = to_sax(to_paa(normalize(window),output_length),sax_vocab)
#return vocabToCoordinates(len(window),output_length,sax[0],4)
return vocabToCoordinates(output_length,output_length,sax[0],sax_vocab)
def getConfigurationParams(self):
return {"output_length":"100","sax_vocab":"4"}
def normalize(data):
data2 = np.array(data)
data2 = data2 - (np.mean(data))
data2 = data2 /data2.std()
return data2
def to_paa(data,string_length):
data = np.array_split(data, string_length)
return [np.mean(section) for section in data]
def gen_breakpoints(symbol_count):
breakpoints = norm.ppf(np.linspace(1. / symbol_count, 1 - 1. / symbol_count, symbol_count - 1))
breakpoints = np.concatenate((breakpoints, np.array([np.Inf])))
return breakpoints
def to_sax(data,symbol_count):
breakpoints = gen_breakpoints(symbol_count)
locations = [np.where(breakpoints > section_mean)[0][0] for section_mean in data]
return [''.join([string.ascii_letters[ind] for ind in locations])]
def vocabToCoordinates(time_window, phrase_length, phrases, symbol_count):
breakpoints = gen_breakpoints(symbol_count)
newCutlines = breakpoints.tolist()
max_value = breakpoints[symbol_count - 2] + ((breakpoints[symbol_count - 2] - breakpoints[symbol_count - 3]) * 2)
# HERE IS SOMETHING WRONG // ONLY IN VISUALISATION
min_value = breakpoints[0] - ((breakpoints[1] - breakpoints[0]) * 2)
infi = newCutlines.pop()
newCutlines.append(max_value)
newCutlines.append(infi)
newCutlines.insert(0, min_value)
#newCutlines.insert(0,-np.Inf)
co1 = time_window / float(phrase_length)
g = 0
retList = []
for s in phrases:
if s is "#":
for i in range(int(co1)):
retList.append(np.NaN)
g+=1
else:
for i in range(int(co1)):
retList.append(newCutlines[ord(s) - 97])
g+=1
#print co1,time_window,phrase_length,g,len(phrases)
return retList
def convertSaxBackToContinious(string_length, symbol_count, data):
points, phrases = norm(data,string_length, symbol_count)
retList = vocabToCoordinates(data, string_length, phrases, points, symbol_count)
#print phrases[0]
return retList
def saxDistance(w1, w2,original_length,symbol_count):
if len(w1) != len(w2):
raise Exception("not equal string length")
string_length=len(w1)
dist = 0
for (l, k) in zip(w1, w2):
dist += saxDistanceLetter(l, k,symbol_count)
result = np.sqrt(dist) * np.sqrt(np.divide(original_length, string_length))
return result
def saxDistanceLetter(w1, w2, symbol_count):
n1 = ord(w1) - 97
n2 = ord(w2) - 97
lookupTable= createLookup(symbol_count,gen_breakpoints(symbol_count))
if n1 > symbol_count:
raise Exception(" letter not in Dictionary " + w1)
if n2 > symbol_count:
raise Exception(" letter not in Dictionary " + w2)
return lookupTable[n1][n2]
def createLookup(symbol_count, breakpoints):
return make_matrix(symbol_count, symbol_count, breakpoints)
def make_list(row, size, breakpoints):
mylist = []
for i in range(size):
i = i + 1
if abs(row - i) <= 1:
mylist.append(0)
else:
v = breakpoints[(max(row, i) - 2)] - breakpoints[min(row, i) - 1]
mylist.append(v)
return mylist
def make_matrix(rows, cols, breakpoints):
matrix = []
for i in range(rows):
i = i + 1
matrix.append(make_list(i, cols, breakpoints))
return matrix
|
[
"numpy.divide",
"scipy.stats.norm",
"numpy.mean",
"numpy.array",
"numpy.where",
"numpy.linspace",
"numpy.array_split",
"numpy.sqrt"
] |
[((661, 675), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (669, 675), True, 'import numpy as np\n'), ((804, 839), 'numpy.array_split', 'np.array_split', (['data', 'string_length'], {}), '(data, string_length)\n', (818, 839), True, 'import numpy as np\n'), ((2446, 2485), 'scipy.stats.norm', 'norm', (['data', 'string_length', 'symbol_count'], {}), '(data, string_length, symbol_count)\n', (2450, 2485), False, 'from scipy.stats import norm\n'), ((697, 710), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (704, 710), True, 'import numpy as np\n'), ((853, 869), 'numpy.mean', 'np.mean', (['section'], {}), '(section)\n', (860, 869), True, 'import numpy as np\n'), ((955, 1028), 'numpy.linspace', 'np.linspace', (['(1.0 / symbol_count)', '(1 - 1.0 / symbol_count)', '(symbol_count - 1)'], {}), '(1.0 / symbol_count, 1 - 1.0 / symbol_count, symbol_count - 1)\n', (966, 1028), True, 'import numpy as np\n'), ((2880, 2893), 'numpy.sqrt', 'np.sqrt', (['dist'], {}), '(dist)\n', (2887, 2893), True, 'import numpy as np\n'), ((1075, 1093), 'numpy.array', 'np.array', (['[np.Inf]'], {}), '([np.Inf])\n', (1083, 1093), True, 'import numpy as np\n'), ((2904, 2945), 'numpy.divide', 'np.divide', (['original_length', 'string_length'], {}), '(original_length, string_length)\n', (2913, 2945), True, 'import numpy as np\n'), ((1218, 1254), 'numpy.where', 'np.where', (['(breakpoints > section_mean)'], {}), '(breakpoints > section_mean)\n', (1226, 1254), True, 'import numpy as np\n')]
|
import numpy as np
from astropy.table import Table
import glob
models = ['MIST_v1.2_feh_m4.00_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_m4.00_afe_p0.0_vvcrit0.4_EEPS',
'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.4_EEPS',
'MIST_v1.2_feh_p0.50_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_p0.50_afe_p0.0_vvcrit0.4_EEPS']
for model in models:
print(model)
initial_mass = []
ms_age = []
for file in list(glob.glob(model+'/*.txt')):
table = np.loadtxt(file)
n = len(table[:,0])
initial_mass.append(table[0,1])
ms_age.append(table[n-1,0])
summary = Table()
summary['initial_mass'] = initial_mass
summary['ms_age'] = ms_age
summary.write(model+'_sum.csv')
|
[
"astropy.table.Table",
"numpy.loadtxt",
"glob.glob"
] |
[((820, 827), 'astropy.table.Table', 'Table', ([], {}), '()\n', (825, 827), False, 'from astropy.table import Table\n'), ((640, 667), 'glob.glob', 'glob.glob', (["(model + '/*.txt')"], {}), "(model + '/*.txt')\n", (649, 667), False, 'import glob\n'), ((684, 700), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (694, 700), True, 'import numpy as np\n')]
|
import json
import csv
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
import os
import shutil
def get_dtype_groups(data_types):
float_unis = []
object_unis = []
int_unis = []
for i, v in data_types.items():
if i == np.dtype('float64') or i == np.dtype('float32'):
float_unis.append(v)
if i == np.dtype('O'):
object_unis.append(v)
if i == np.dtype('int64') or i == np.dtype('int32') or i == np.dtype('int16'):
int_unis.append(v)
return float_unis, object_unis, int_unis
def findsubsets(s, n):
return list(itertools.permutations(s, n))
def plot_3d(data, headers, data_types, filename):
dirpath = 'saved_plots/{}_Misc_Plots'.format(filename)
sub_folders = ['scatter_3dPlots']
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.makedirs(dirpath)
for i in sub_folders:
if os.path.exists(i) and os.path.isdir(i):
shutil.rmtree(i)
os.makedirs(dirpath+'/'+i)
fig = plt.figure()
ax = plt.axes(projection='3d')
float_unis, object_unis, int_unis = get_dtype_groups(data_types)
palette = itertools.cycle(sns.color_palette())
if len(float_unis) > 0:
if len(int_unis) > 0:
cols = float_unis[0]+int_unis[0]
if len(cols) > 4:
cols = cols[:4]
pairs_3d = findsubsets(cols, 3)
else:
cols = float_unis[0]
if len(cols) > 4:
cols = cols[:4]
pairs_3d = findsubsets(cols, 3)
try:
for j in pairs_3d:
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(data[j[0]], data[j[1]],
data[j[2]], color=next(palette))
ax.legend()
x = j[0]
ax.set_xlabel(x, fontsize=20)
ax.set_ylabel(j[1], fontsize=20)
ax.set_zlabel(j[2], fontsize=20, rotation=0)
fig.set_size_inches(18.5, 10.5)
plt.savefig(
'./{}/scatter_3dPlots/{}_{}_{}_set.png'.format(dirpath, j[0], j[1], j[2]))
except Exception as e:
print(e)
print('error occured while plotting {} columns.'.format(j))
def plot_groupby(data, headers, data_types, filename):
dirpath = 'saved_plots/{}_Misc_Plots'.format(filename)
float_unis, object_unis, int_unis = get_dtype_groups(data_types)
try:
if len(object_unis) > 0:
for j in object_unis[0]:
df = data.groupby(j).mean()
# print(df)
fig, ax = plt.subplots()
df.plot(kind='bar')
fig.set_size_inches(18.5, 10.5)
unique_values = len(pd.unique(data[j]))
if unique_values > 30:
continue
plt.savefig('./{}/groupby_{}_bar_plot.png'.format(dirpath, j))
except Exception as e:
print(e)
print('error occured while plotting groupby by {} column.'.format(j))
|
[
"os.makedirs",
"matplotlib.pyplot.axes",
"os.path.isdir",
"itertools.permutations",
"numpy.dtype",
"os.path.exists",
"pandas.unique",
"matplotlib.pyplot.figure",
"seaborn.color_palette",
"shutil.rmtree",
"matplotlib.pyplot.subplots"
] |
[((933, 953), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (944, 953), False, 'import os\n'), ((1107, 1119), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1117, 1119), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1154), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1137, 1154), True, 'import matplotlib.pyplot as plt\n'), ((658, 686), 'itertools.permutations', 'itertools.permutations', (['s', 'n'], {}), '(s, n)\n', (680, 686), False, 'import itertools\n'), ((845, 868), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (859, 868), False, 'import os\n'), ((873, 895), 'os.path.isdir', 'os.path.isdir', (['dirpath'], {}), '(dirpath)\n', (886, 895), False, 'import os\n'), ((905, 927), 'shutil.rmtree', 'shutil.rmtree', (['dirpath'], {}), '(dirpath)\n', (918, 927), False, 'import shutil\n'), ((1069, 1099), 'os.makedirs', 'os.makedirs', (["(dirpath + '/' + i)"], {}), "(dirpath + '/' + i)\n", (1080, 1099), False, 'import os\n'), ((1255, 1274), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (1272, 1274), True, 'import seaborn as sns\n'), ((403, 416), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (411, 416), True, 'import numpy as np\n'), ((991, 1008), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (1005, 1008), False, 'import os\n'), ((1013, 1029), 'os.path.isdir', 'os.path.isdir', (['i'], {}), '(i)\n', (1026, 1029), False, 'import os\n'), ((1043, 1059), 'shutil.rmtree', 'shutil.rmtree', (['i'], {}), '(i)\n', (1056, 1059), False, 'import shutil\n'), ((305, 324), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (313, 324), True, 'import numpy as np\n'), ((333, 352), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (341, 352), True, 'import numpy as np\n'), ((468, 485), 'numpy.dtype', 'np.dtype', (['"""int64"""'], {}), "('int64')\n", (476, 485), True, 'import numpy as np\n'), ((494, 511), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (502, 511), True, 'import numpy as np\n'), ((520, 537), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (528, 537), True, 'import numpy as np\n'), ((1706, 1718), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1716, 1718), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1765), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (1748, 1765), True, 'import matplotlib.pyplot as plt\n'), ((2744, 2758), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2756, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2897), 'pandas.unique', 'pd.unique', (['data[j]'], {}), '(data[j])\n', (2888, 2897), True, 'import pandas as pd\n')]
|
import numpy as np
from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward
def initialize_params(layer_dims):
"""Create and initialize the params of an L-layer neural network.
Parameters
----------
layer_dims : list or tuple of int
The number of neurons in each layer of the network.
Returns
-------
params : dict of {str: ndarray}
Initialized parameters for each layer, l, of the L-layer network.
Wl : ndarray
Weights matrix of shape (`layer_dims[l]`, `layer_dims[l-1]`).
bl : ndarray
Biases vector of shape (`layer_dims[l]`, 1).
"""
params = {}
L = len(layer_dims)
for l in range(1, L):
params['W' + str(l)] = (
np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])
)
params['b' + str(l)] = np.zeros((layer_dims[l], 1))
return params
def linear_forward(A, W, b):
"""Calculate the linear part of forward propagation for the current layer.
.. math:: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}, where $A^{[0]} = X$
Parameters
----------
A : ndarray
Activations from the previous layer, of shape (size of previous layer,
number of examples).
W : ndarray
Weights matrix of shape (size of current layer, size of previous layer).
b : ndarray
Bias vector of shape (size of current layer, 1).
Returns
-------
Z : ndarray
Input of the activation function, also called pre-activation parameter,
of shape (size of current layer, number of examples).
cache : tuple of ndarray
Store `A`, `W`, and `b` for computing the backward pass efficiently.
"""
Z = np.dot(W, A) + b
cache = (A, W, b)
return Z, cache
def layer_forward(A_prev, W, b, activation):
"""Compute forward propagation for a single layer.
Parameters
----------
A_prev : ndarray
Activations from the previous layer of shape (size of previous layer,
number of examples).
W : ndarray
Weights matrix of shape (size of current layer, size of previous layer).
b : ndarray
Bias vector of shape (size of the current layer, 1).
activation : str {"sigmoid", "relu"}
Activation function to be used in this layer.
Returns
-------
A : ndarray
Output of the activation function of shape (size of current layer,
number of examples).
cache : tuple of (tuple of ndarray, ndarray)
Stored for computing the backward pass efficiently.
linear_cache : tuple of ndarray
Stores `cache` returned by `linear_forward()`.
activation_cache : ndarray
Stores `Z` returned by 'linear_forward()`.
"""
Z, linear_cache = linear_forward(A_prev, W, b)
if activation == "sigmoid":
A, activation_cache = sigmoid(Z)
elif activation == "relu":
A, activation_cache = relu(Z)
cache = (linear_cache, activation_cache)
return A, cache
def model_forward(X, parameters):
"""Compute forward propagation for [LINEAR->RELU]*(L-1) -> [LINEAR->SIGMOID].
Parameters
----------
X : ndarray
Input data of shape (input size, number of examples)
parameters : dict of {str: ndarray}
Output of initialize_parameters_deep()
Returns
-------
Y_hat : ndarray
Vector of prediction probabilities of shape (1, number of
examples).
caches : list of (tuple of (tuple of ndarray, ndarray))
The L `cache` results from `layer_forward()`.
"""
caches = []
A = X
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = layer_forward(
A_prev,
parameters["W" + str(l)],
parameters["b" + str(l)],
"relu"
)
caches.append(cache)
Y_hat, cache = layer_forward(
A,
parameters["W" + str(L)],
parameters["b" + str(L)],
"sigmoid"
)
caches.append(cache)
return Y_hat, caches
def compute_cost(Y_hat, Y):
"""Compute the cross-entropy cost.
.. math:: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right))
Parameters
----------
Y_hat : ndarray
Vector of prediction probabilities from `model_forward()` of shape
(1, number of examples).
Y : ndarray
Vector of true values of shape (1, number of examples).
Returns
-------
cost : list of int
Cross-entropy cost.
"""
m = Y.shape[1]
cost = (1./m) * (-np.dot(Y, np.log(Y_hat).T) - np.dot(1-Y, np.log(1-Y_hat).T))
cost = np.squeeze(cost)
return cost
def linear_backward(dZ, cache):
"""Calculate the linear portion of backward propagation for a single layer.
Parameters
----------
dZ : ndarray
Gradient of the cost with respect to the linear output of layer l.
cache : tuple of ndarray
Stored `A`, `W`, `b` from `linear_forward()`.
Returns
-------
dA_prev : ndarray
Gradient of the cost with respect to the activation of the previous
layer, l-1. Shape of `cache['A']`.
dW : ndarray
Gradient of the cost with respect to W for the current layer, l. Shape
of `cache['W']`.
db : ndarray
Gradient of the cost with respect to b for the current layer, l. Shape
of `cache['b']`.
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = (1/m) * np.dot(dZ, A_prev.T)
db = (1/m) * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
return dA_prev, dW, db
def layer_backward(dA, cache, activation):
"""Compute backward propagation for a single layer.
Parameters
----------
dA: ndarray
Post-activation gradient for current layer, l.
cache : tuple of (tuple of ndarray, ndarray)
Stored `(linear_cache, activation_cache)` from `layer_forward()`.
activation : str {"relu", "sigmoid"}
Activation function to be used in this layer.
Returns
-------
dA_prev : ndarray
Gradient of the cost with respect to the activation of the previous
layer, l-1. Shape of `cache['A']`.
dW : ndarray
Gradient of the cost with respect to W for the current layer, l. Shape
of `cache['W']`.
db : ndarray
Gradient of the cost with respect to b for the current layer, l. Shape
of `cache['b']`.
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def model_backward(Y_hat, Y, caches):
"""Compute backward propagation for [LINEAR->RELU]*(L-1) -> [LINEAR->SIGMOID].
Parameters
----------
Y_hat : ndarray
Vector of prediction probabilities from `model_forward()` of shape
(1, number of examples).
Y : ndarray
Vector of true values of shape (1, number of examples).
caches : list of (tuple of (tuple of ndarray, ndarray))
Stored results of `model_forward()`.
Returns
-------
grads : dict of {str: ndarray}
Gradients for layer `l` in `range(L-1)`.
dAl : ndarray
Gradient of the activations for layer `l`.
dWl : ndarray
Gradient of the weights for layer `l`.
dbl : ndarray
Gradient of the biases for layer `l`.
"""
grads = {}
L = len(caches)
m = Y_hat.shape[1]
Y = Y.reshape(Y_hat.shape)
dY_hat = -(np.divide(Y, Y_hat) - np.divide(1-Y, 1-Y_hat))
current_cache = caches[L-1]
grads["dA" + str(L-1)], grads["dW" + str(L)], grads["db" + str(L)] = (
layer_backward(dY_hat, current_cache, "sigmoid")
)
for l in reversed(range(L-1)):
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = (
layer_backward(grads["dA" + str(l+1)], current_cache, "relu")
)
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_params(params, grads, learning_rate):
"""Update parameters using gradient descent.
Parameters
----------
params : dict of {str: ndarray}
Initialized parameters from `intialize_params()`.
grads : dict of {str: ndarray}
Gradients from `model_backward()`.
learning_rate : float in (0, 1)
Learning rate for the model.
Returns
-------
params : dict of {str: ndarray}
Updated parameters.
`Wl` : ndarray
Updated weights matrix.
`bl` : ndarray
Updated biases vector.
"""
L = len(params) // 2
for l in range(L):
params["W" + str(l+1)] -= learning_rate * grads["dW" + str(l+1)]
params["b" + str(l+1)] -= learning_rate * grads["db" + str(l+1)]
return params
|
[
"numpy.divide",
"deepen.activation.relu",
"numpy.sum",
"numpy.log",
"numpy.random.randn",
"numpy.zeros",
"deepen.activation.sigmoid",
"numpy.squeeze",
"numpy.dot",
"deepen.activation.relu_backward",
"deepen.activation.sigmoid_backward",
"numpy.sqrt"
] |
[((4728, 4744), 'numpy.squeeze', 'np.squeeze', (['cost'], {}), '(cost)\n', (4738, 4744), True, 'import numpy as np\n'), ((5651, 5666), 'numpy.dot', 'np.dot', (['W.T', 'dZ'], {}), '(W.T, dZ)\n', (5657, 5666), True, 'import numpy as np\n'), ((877, 905), 'numpy.zeros', 'np.zeros', (['(layer_dims[l], 1)'], {}), '((layer_dims[l], 1))\n', (885, 905), True, 'import numpy as np\n'), ((1739, 1751), 'numpy.dot', 'np.dot', (['W', 'A'], {}), '(W, A)\n', (1745, 1751), True, 'import numpy as np\n'), ((2896, 2906), 'deepen.activation.sigmoid', 'sigmoid', (['Z'], {}), '(Z)\n', (2903, 2906), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((5565, 5585), 'numpy.dot', 'np.dot', (['dZ', 'A_prev.T'], {}), '(dZ, A_prev.T)\n', (5571, 5585), True, 'import numpy as np\n'), ((5603, 5636), 'numpy.sum', 'np.sum', (['dZ'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ, axis=1, keepdims=True)\n', (5609, 5636), True, 'import numpy as np\n'), ((6618, 6653), 'deepen.activation.relu_backward', 'relu_backward', (['dA', 'activation_cache'], {}), '(dA, activation_cache)\n', (6631, 6653), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((761, 810), 'numpy.random.randn', 'np.random.randn', (['layer_dims[l]', 'layer_dims[l - 1]'], {}), '(layer_dims[l], layer_dims[l - 1])\n', (776, 810), True, 'import numpy as np\n'), ((811, 837), 'numpy.sqrt', 'np.sqrt', (['layer_dims[l - 1]'], {}), '(layer_dims[l - 1])\n', (818, 837), True, 'import numpy as np\n'), ((2968, 2975), 'deepen.activation.relu', 'relu', (['Z'], {}), '(Z)\n', (2972, 2975), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((6761, 6799), 'deepen.activation.sigmoid_backward', 'sigmoid_backward', (['dA', 'activation_cache'], {}), '(dA, activation_cache)\n', (6777, 6799), False, 'from deepen.activation import relu, relu_backward, sigmoid, sigmoid_backward\n'), ((7800, 7819), 'numpy.divide', 'np.divide', (['Y', 'Y_hat'], {}), '(Y, Y_hat)\n', (7809, 7819), True, 'import numpy as np\n'), ((7822, 7849), 'numpy.divide', 'np.divide', (['(1 - Y)', '(1 - Y_hat)'], {}), '(1 - Y, 1 - Y_hat)\n', (7831, 7849), True, 'import numpy as np\n'), ((4697, 4714), 'numpy.log', 'np.log', (['(1 - Y_hat)'], {}), '(1 - Y_hat)\n', (4703, 4714), True, 'import numpy as np\n'), ((4666, 4679), 'numpy.log', 'np.log', (['Y_hat'], {}), '(Y_hat)\n', (4672, 4679), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import random
from collections import namedtuple
def plot_winsratio(
wins: list,
title: str,
start_idx: int = 0,
wsize_mean: int = 100,
wsize_means_mean: int = 1000,
opponent_update_idxs=None,
):
"""Winrate plotting function, plots both a the WR over the last wsize_mean episodes and
a WR mean over the last wsize_means_mean wsize_mean episodes
Args:
wins (list): Wins vector. Contains 0 or 1 for each loss or victory
title (str): Title to use in the plot
start_idx (int, optional): Start for the x labels. Defaults to 0.
wsize_mean (int, optional): Window size to compute the Winrate. Defaults to 100.
wsize_means_mean (int, optional): Window size to compute the mean over the winrates. Defaults to 1000.
opponent_updates_idxs (list, optional): List of indexes where the update of the opponent state dict has happened in self play. Default None.
"""
if len(wins) >= wsize_mean:
# Take 100 episode averages
means = np.cumsum(wins, dtype=float)
means[wsize_mean:] = means[wsize_mean:] - means[:-wsize_mean]
means = means[wsize_mean - 1 :] / wsize_mean
idxs = [i + start_idx + wsize_mean - 1 for i in range(len(means))]
plt.plot(idxs, means, label=f"Running {wsize_mean} average WR")
# Take 20 episode averages of the 100 running average
if len(means) >= wsize_means_mean:
means_mean = np.cumsum(means)
means_mean[wsize_means_mean:] = (
means_mean[wsize_means_mean:] - means_mean[:-wsize_means_mean]
)
means_mean = means_mean[wsize_means_mean - 1 :] / wsize_means_mean
idxs_mean = [
i + start_idx + wsize_mean + wsize_means_mean - 2
for i in range(len(means_mean))
]
plt.plot(
idxs_mean,
means_mean,
label=f"Running {wsize_mean} average WR mean",
)
# add vertical lines for opponent update during self play
if opponent_update_idxs != None:
for x in opponent_update_idxs:
if x >= wsize_mean:
plt.axvline(x=x, c="red")
plt.legend()
plt.title(f"Training {title}")
plt.savefig("imgs/train_ai.png")
plt.close()
def rgb2grayscale(rgb: np.ndarray) -> np.ndarray:
"""Transform RGB image to grayscale
Args:
rgb (np.ndarray): RGB image to transform
Returns:
np.ndarray: Grayscale image
"""
# transform to rgb
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
grayscale = 0.2989 * r + 0.5870 * g + 0.1140 * b
return grayscale
Transition = namedtuple("Transition", ("ob", "action", "next_ob", "rew", "done"))
class ReplayMemory(object):
"""
Replay memory used for experience replay.
It stores transitions.
"""
def __init__(
self,
memory_capacity: int,
train_buffer_capacity: int,
test_buffer_capacity: int,
) -> None:
"""Initialization of the replay memory
Args:
memory_capacity (int): Maximum number of elements to fit in the memory
train_buffer_capacity (int): Maximum number of elements to fit in the train buffer
test_buffer_capacity (int): Maximum number of elements to fit in the test buffer
"""
self.memory_capacity = memory_capacity
self.train_buffer_capacity = train_buffer_capacity
self.test_buffer_capacity = test_buffer_capacity
self.memory = []
self.train_buffer = []
self.test_buffer = []
self.memory_position = 0
def push_to_memory(self, *args) -> None:
"""Save a transition to memory"""
if len(self.memory) < self.memory_capacity:
self.memory.append(None)
self.memory[self.memory_position] = Transition(*args)
self.memory_position = (self.memory_position + 1) % self.memory_capacity
def push_to_train_buffer(self, *args) -> None:
"""Save a transition to train buffer"""
self.train_buffer.append(Transition(*args))
if len(self.train_buffer) > self.train_buffer_capacity:
raise Exception("Error: capacity of the train_buffer exceded")
def push_to_test_buffer(self, ob: np.ndarray) -> None:
"""Save an observation to test buffer
Args:
ob (np.ndarray): Observation/state to push into the buffer
"""
self.test_buffer.append(ob)
if len(self.test_buffer) > self.test_buffer_capacity:
raise Exception("Error: capacity of the test_buffer exceded")
def sample(self, batch_size: int) -> np.ndarray:
"""Sample batch_size random elements from memory
Args:
batch_size (int): Number of elements to sample
Returns:
np.ndarray: Sampled elements
"""
return random.sample(self.memory, batch_size)
def __len__(self) -> int:
"""Overwrite of the len function for the object
Returns:
int: Length of the memory
"""
return len(self.memory)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.plot",
"random.sample",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.cumsum",
"collections.namedtuple",
"matplotlib.pyplot.savefig"
] |
[((2784, 2852), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('ob', 'action', 'next_ob', 'rew', 'done')"], {}), "('Transition', ('ob', 'action', 'next_ob', 'rew', 'done'))\n", (2794, 2852), False, 'from collections import namedtuple\n'), ((1078, 1106), 'numpy.cumsum', 'np.cumsum', (['wins'], {'dtype': 'float'}), '(wins, dtype=float)\n', (1087, 1106), True, 'import numpy as np\n'), ((1313, 1376), 'matplotlib.pyplot.plot', 'plt.plot', (['idxs', 'means'], {'label': 'f"""Running {wsize_mean} average WR"""'}), "(idxs, means, label=f'Running {wsize_mean} average WR')\n", (1321, 1376), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2303, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2314, 2344), 'matplotlib.pyplot.title', 'plt.title', (['f"""Training {title}"""'], {}), "(f'Training {title}')\n", (2323, 2344), True, 'import matplotlib.pyplot as plt\n'), ((2353, 2385), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""imgs/train_ai.png"""'], {}), "('imgs/train_ai.png')\n", (2364, 2385), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2405), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2403, 2405), True, 'import matplotlib.pyplot as plt\n'), ((5006, 5044), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (5019, 5044), False, 'import random\n'), ((1508, 1524), 'numpy.cumsum', 'np.cumsum', (['means'], {}), '(means)\n', (1517, 1524), True, 'import numpy as np\n'), ((1909, 1987), 'matplotlib.pyplot.plot', 'plt.plot', (['idxs_mean', 'means_mean'], {'label': 'f"""Running {wsize_mean} average WR mean"""'}), "(idxs_mean, means_mean, label=f'Running {wsize_mean} average WR mean')\n", (1917, 1987), True, 'import matplotlib.pyplot as plt\n'), ((2258, 2283), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'x', 'c': '"""red"""'}), "(x=x, c='red')\n", (2269, 2283), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
def movingAverage(x, window):
ret = np.zeros_like(x)
for i in range(len(x)):
idx1 = max(0, i - (window - 1) // 2)
idx2 = min(len(x), i + (window - 1) // 2 + (2 - (window % 2)))
ret[i] = np.mean(x[idx1:idx2])
return ret
def computeAverage(x, window, idx):
min_idx = max(0, idx - window - 1)
return np.mean(x[min_idx:idx])
def plot(predict_values, gt):
fig, ax = plt.subplots()
ax.plot(np.arange(len(gt)), gt, label='ground truth')
ax.plot(np.arange(len(predict_values)), np.array(predict_values), label='predict')
start, end = ax.get_xlim()
ax.yaxis.set_ticks(np.arange(0, max(gt) +10, 5.0))
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
ax.legend(loc='upper left')
plt.xlabel('Frame num.')
plt.ylabel('Speed [mph]')
# ax.figure.savefig('result.png', bbox_inches='tight')
plt.show()
|
[
"numpy.zeros_like",
"matplotlib.pyplot.show",
"numpy.mean",
"numpy.array",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] |
[((168, 184), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (181, 184), True, 'import numpy as np\n'), ((458, 481), 'numpy.mean', 'np.mean', (['x[min_idx:idx]'], {}), '(x[min_idx:idx])\n', (465, 481), True, 'import numpy as np\n'), ((527, 541), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (539, 541), True, 'from matplotlib import pyplot as plt\n'), ((879, 903), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frame num."""'], {}), "('Frame num.')\n", (889, 903), True, 'from matplotlib import pyplot as plt\n'), ((908, 933), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Speed [mph]"""'], {}), "('Speed [mph]')\n", (918, 933), True, 'from matplotlib import pyplot as plt\n'), ((997, 1007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1005, 1007), True, 'from matplotlib import pyplot as plt\n'), ((334, 355), 'numpy.mean', 'np.mean', (['x[idx1:idx2]'], {}), '(x[idx1:idx2])\n', (341, 355), True, 'import numpy as np\n'), ((644, 668), 'numpy.array', 'np.array', (['predict_values'], {}), '(predict_values)\n', (652, 668), True, 'import numpy as np\n'), ((807, 841), 'matplotlib.ticker.FormatStrFormatter', 'ticker.FormatStrFormatter', (['"""%0.1f"""'], {}), "('%0.1f')\n", (832, 841), True, 'import matplotlib.ticker as ticker\n')]
|
# SPDX-License-Identifier: Apache-2.0
"""
Tests scikit-normalizer converter.
"""
import unittest
import numpy
from sklearn.preprocessing import Normalizer
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import (
Int64TensorType, FloatTensorType, DoubleTensorType)
from test_utils import dump_data_and_model, TARGET_OPSET
class TestSklearnNormalizerConverter(unittest.TestCase):
def test_model_normalizer(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", Int64TensorType([None, 1]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
def test_model_normalizer_blackop(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET,
black_op={"Normalizer"})
self.assertNotIn('op_type: "Normalizer', str(model_onnx))
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1BlackOp-SkipDim1")
def test_model_normalizer_float_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL1-SkipDim1")
def test_model_normalizer_float_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2-SkipDim1")
def test_model_normalizer_double_l1(self):
model = Normalizer(norm="l1")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL1Double-SkipDim1")
def test_model_normalizer_double_l2(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", DoubleTensorType([None, 3]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64),
model, model_onnx,
basename="SklearnNormalizerL2Double-SkipDim1")
def test_model_normalizer_float_noshape(self):
model = Normalizer(norm="l2")
model_onnx = convert_sklearn(
model, "scikit-learn normalizer",
[("input", FloatTensorType([]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
self.assertTrue(len(model_onnx.graph.node) == 1)
dump_data_and_model(
numpy.array([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32),
model, model_onnx,
basename="SklearnNormalizerL2NoShape-SkipDim1")
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"skl2onnx.common.data_types.DoubleTensorType",
"skl2onnx.common.data_types.Int64TensorType",
"skl2onnx.common.data_types.FloatTensorType",
"numpy.array",
"sklearn.preprocessing.Normalizer"
] |
[((4006, 4021), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4019, 4021), False, 'import unittest\n'), ((459, 480), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (469, 480), False, 'from sklearn.preprocessing import Normalizer\n'), ((824, 845), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (834, 845), False, 'from sklearn.preprocessing import Normalizer\n'), ((1379, 1400), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (1389, 1400), False, 'from sklearn.preprocessing import Normalizer\n'), ((1929, 1950), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (1939, 1950), False, 'from sklearn.preprocessing import Normalizer\n'), ((2480, 2501), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (2490, 2501), False, 'from sklearn.preprocessing import Normalizer\n'), ((2981, 3002), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (2991, 3002), False, 'from sklearn.preprocessing import Normalizer\n'), ((3486, 3507), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (3496, 3507), False, 'from sklearn.preprocessing import Normalizer\n'), ((1166, 1223), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (1177, 1223), False, 'import numpy\n'), ((1723, 1780), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (1734, 1780), False, 'import numpy\n'), ((2273, 2330), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (2284, 2330), False, 'import numpy\n'), ((2768, 2825), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float64'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64)\n', (2779, 2825), False, 'import numpy\n'), ((3269, 3326), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float64'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float64)\n', (3280, 3326), False, 'import numpy\n'), ((3823, 3880), 'numpy.array', 'numpy.array', (['[[1, -1, 3], [3, 1, 2]]'], {'dtype': 'numpy.float32'}), '([[1, -1, 3], [3, 1, 2]], dtype=numpy.float32)\n', (3834, 3880), False, 'import numpy\n'), ((588, 614), 'skl2onnx.common.data_types.Int64TensorType', 'Int64TensorType', (['[None, 1]'], {}), '([None, 1])\n', (603, 614), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((953, 979), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, 3]'], {}), '([None, 3])\n', (968, 979), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((1508, 1534), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, 3]'], {}), '([None, 3])\n', (1523, 1534), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((2058, 2084), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[None, 3]'], {}), '([None, 3])\n', (2073, 2084), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((2609, 2636), 'skl2onnx.common.data_types.DoubleTensorType', 'DoubleTensorType', (['[None, 3]'], {}), '([None, 3])\n', (2625, 2636), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((3110, 3137), 'skl2onnx.common.data_types.DoubleTensorType', 'DoubleTensorType', (['[None, 3]'], {}), '([None, 3])\n', (3126, 3137), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n'), ((3615, 3634), 'skl2onnx.common.data_types.FloatTensorType', 'FloatTensorType', (['[]'], {}), '([])\n', (3630, 3634), False, 'from skl2onnx.common.data_types import Int64TensorType, FloatTensorType, DoubleTensorType\n')]
|
import numpy as np
from flask import Flask, session,abort,request, jsonify, render_template,redirect,url_for,flash
import pickle
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from keras.models import load_model
import os
import stripe
import datetime
import keras
from keras import optimizers
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dropout, Activation
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/heartAttack',methods=['POST'])
def heartAttack():
model = load_model('models/heart_disease_model.h5')
int_features = [[int(x) for x in request.form.values()]]
final_features = [np.array(int_features)]
prediction_proba = model.predict(final_features)
prediction = (prediction_proba > 0.5)
return render_template('index.html', prediction_text='THANK YOU FOR YOUR PURCHASE, \n FOR THE DATA YOU ENTERED \n IT IS PREDICTED {} \n THAT THE PATIENT WILL HAVE A STROKE WITHIN \n THE NEXT 10 YEARS.'.format(prediction))
if __name__ == "__main__":
app.run(debug=True, port=8080) #debug=True,host="0.0.0.0",port=50000
|
[
"keras.models.load_model",
"flask.request.form.values",
"flask.Flask",
"numpy.array",
"flask.render_template"
] |
[((610, 625), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (615, 625), False, 'from flask import Flask, session, abort, request, jsonify, render_template, redirect, url_for, flash\n'), ((670, 699), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (685, 699), False, 'from flask import Flask, session, abort, request, jsonify, render_template, redirect, url_for, flash\n'), ((780, 823), 'keras.models.load_model', 'load_model', (['"""models/heart_disease_model.h5"""'], {}), "('models/heart_disease_model.h5')\n", (790, 823), False, 'from keras.models import load_model\n'), ((909, 931), 'numpy.array', 'np.array', (['int_features'], {}), '(int_features)\n', (917, 931), True, 'import numpy as np\n'), ((862, 883), 'flask.request.form.values', 'request.form.values', ([], {}), '()\n', (881, 883), False, 'from flask import Flask, session, abort, request, jsonify, render_template, redirect, url_for, flash\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/forks/forks_pow.csv')
x = np.arange(0.0, 100, 1)
data = df[['T1', 'T2','T3','T4', 'T5','T6','T7', 'T8','T9','T10', 'T11','T12','T13', 'T14','T15','T16', 'T17','T18','T19', 'T20','T21','T21', 'T22','T23','T24', 'T25','T26','T27', 'T28','T29','T30']]
fig, ax = plt.subplots(figsize=(8,5))
ax.errorbar(x, np.log10(data.mean(axis=1)), yerr=np.log10(data.std(axis=1)*1.96/np.sqrt(30)) , fmt='.')
plt.xlabel('# blocks', fontsize=16)
plt.ylabel('log (average # forks ' + '$f_b$)', fontsize=16)
plt.grid(linestyle=':',linewidth=1.5)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.tick_params(axis='both', which='major', labelsize=16)
ax.legend(loc=1,prop={'size': 16})
ax.set_xlim(xmin=0, xmax=100)
ax.set_ylim(ymin=-1, ymax=3)
plt.savefig('ev_forks_pow.png')
plt.show()
|
[
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.sqrt",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((78, 209), 'pandas.read_csv', 'pd.read_csv', (['"""/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/forks/forks_pow.csv"""'], {}), "(\n '/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/forks/forks_pow.csv'\n )\n", (89, 209), True, 'import pandas as pd\n'), ((205, 227), 'numpy.arange', 'np.arange', (['(0.0)', '(100)', '(1)'], {}), '(0.0, 100, 1)\n', (214, 227), True, 'import numpy as np\n'), ((440, 468), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (452, 468), True, 'import matplotlib.pyplot as plt\n'), ((575, 610), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""# blocks"""'], {'fontsize': '(16)'}), "('# blocks', fontsize=16)\n", (585, 610), True, 'import matplotlib.pyplot as plt\n'), ((611, 670), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('log (average # forks ' + '$f_b$)')"], {'fontsize': '(16)'}), "('log (average # forks ' + '$f_b$)', fontsize=16)\n", (621, 670), True, 'import matplotlib.pyplot as plt\n'), ((671, 709), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '""":"""', 'linewidth': '(1.5)'}), "(linestyle=':', linewidth=1.5)\n", (679, 709), True, 'import matplotlib.pyplot as plt\n'), ((818, 875), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'labelsize': '(16)'}), "(axis='both', which='major', labelsize=16)\n", (833, 875), True, 'import matplotlib.pyplot as plt\n'), ((972, 1003), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ev_forks_pow.png"""'], {}), "('ev_forks_pow.png')\n", (983, 1003), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1014), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1012, 1014), True, 'import matplotlib.pyplot as plt\n'), ((550, 561), 'numpy.sqrt', 'np.sqrt', (['(30)'], {}), '(30)\n', (557, 561), True, 'import numpy as np\n')]
|
import collections
import math
import numpy as np
import mlpy
class TermFrequencyAnalyzer(object):
def __init__(self, *documents):
self.idf = self.compute_idf(*documents)
def compute_idf(self, *documents):
# document frequency
df = collections.defaultdict(int)
for tokens in documents:
for token in set(tokens):
df[token] += 1
# idf
idf = dict()
for token, count in df.iteritems():
idf[token] = math.log(float(len(documents)) / float(count))
return idf
def get_similarity(self, *strings):
if len(strings) <= 1:
return 0.0
counts = [collections.defaultdict(int) for _ in strings]
for index, tokens in enumerate(strings):
for token in tokens:
counts[index][token] += 1
score = 0.0
# intercept of the tokens
for token in set.intersection(*[set(tokens) for tokens in strings]):
# term frequency
tf = float(sum([count[token] for count in counts]))
score += tf * self.idf[token]
return score
class LongestAnalyzer(object):
def __init__(self, *documents):
pass
def get_similarity(self, a, b):
#return self.lcs(a, b)
a = np.array(list(a), dtype='U1').view(np.uint32)
b = np.array(list(b), dtype='U1').view(np.uint32)
length, path = mlpy.lcs_std(a, b)
return length
def lcs(self, a, b):
a = a[:200]
b = b[:200]
if (len(a) < len(b)):
a, b = b, a
M = len(a)
N = len(b)
arr = np.zeros((2, N + 1))
for i in range(1, M + 1):
curIdx = i % 2
prevIdx = 1 - curIdx
ai = a[i - 1]
for j in range(1, N + 1):
bj = b[j - 1]
if (ai == bj):
arr[curIdx][j] = 1 + arr[prevIdx][j - 1]
else:
arr[curIdx][j] = max(arr[curIdx][j - 1], arr[prevIdx][j])
return arr[M % 2][N]
|
[
"collections.defaultdict",
"numpy.zeros",
"mlpy.lcs_std"
] |
[((268, 296), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (291, 296), False, 'import collections\n'), ((1437, 1455), 'mlpy.lcs_std', 'mlpy.lcs_std', (['a', 'b'], {}), '(a, b)\n', (1449, 1455), False, 'import mlpy\n'), ((1660, 1680), 'numpy.zeros', 'np.zeros', (['(2, N + 1)'], {}), '((2, N + 1))\n', (1668, 1680), True, 'import numpy as np\n'), ((684, 712), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (707, 712), False, 'import collections\n')]
|
import os, glob, sys
from turbo_seti.find_event.plot_dat import plot_dat
from turbo_seti import find_event as find
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.getcwd())
parser.add_argument('--minHit', type=float, default=None)
parser.add_argument('--maxHit', type=float, default=None)
args = parser.parse_args()
path = args.dir
dat_files = glob.glob(path + "*.dat")
min_hit = 1e9
max_hit = 0
if args.minHit == None or args.maxHit == None:
for file in dat_files:
tbl = find.read_dat(file)
min_freq, max_freq = min(tbl["Freq"]), max(tbl["Freq"])
if min_freq < min_hit:
min_hit = min_freq
if max_freq > max_hit:
max_hit = max_freq
else:
min_hit = args.minHit
max_hit = args.maxHit # set min and max hits by hand just to get this image
print("Lowest frequency hit: ", min_hit)
print("Highext frequency hit: ", max_hit)
plot_range = 2000*1e-6 # a 2000Hz width, adjusted to be in units of MHz
freq_range = np.arange(np.round(min_hit, 2), np.round(max_hit), plot_range)
outDir = path + "bautista-analysis/"
if not os.path.exists(outDir):
os.mkdir(outDir)
for center in freq_range:
plot_dat(path + "dat-list.lst",
path + "h5-list.lst",
path + "events-list.csv",
outdir=outDir,
check_zero_drift=False,
alpha=0.65,
color="black",
window=(center-0.001, center+0.001))
if __name__ == '__main__':
sys.exit(main())
|
[
"os.mkdir",
"turbo_seti.find_event.read_dat",
"argparse.ArgumentParser",
"os.getcwd",
"os.path.exists",
"glob.glob",
"numpy.round",
"turbo_seti.find_event.plot_dat.plot_dat"
] |
[((180, 205), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (203, 205), False, 'import argparse\n'), ((453, 478), 'glob.glob', 'glob.glob', (["(path + '*.dat')"], {}), "(path + '*.dat')\n", (462, 478), False, 'import os, glob, sys\n'), ((1166, 1186), 'numpy.round', 'np.round', (['min_hit', '(2)'], {}), '(min_hit, 2)\n', (1174, 1186), True, 'import numpy as np\n'), ((1188, 1205), 'numpy.round', 'np.round', (['max_hit'], {}), '(max_hit)\n', (1196, 1205), True, 'import numpy as np\n'), ((1272, 1294), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (1286, 1294), False, 'import os, glob, sys\n'), ((1304, 1320), 'os.mkdir', 'os.mkdir', (['outDir'], {}), '(outDir)\n', (1312, 1320), False, 'import os, glob, sys\n'), ((1360, 1554), 'turbo_seti.find_event.plot_dat.plot_dat', 'plot_dat', (["(path + 'dat-list.lst')", "(path + 'h5-list.lst')", "(path + 'events-list.csv')"], {'outdir': 'outDir', 'check_zero_drift': '(False)', 'alpha': '(0.65)', 'color': '"""black"""', 'window': '(center - 0.001, center + 0.001)'}), "(path + 'dat-list.lst', path + 'h5-list.lst', path +\n 'events-list.csv', outdir=outDir, check_zero_drift=False, alpha=0.65,\n color='black', window=(center - 0.001, center + 0.001))\n", (1368, 1554), False, 'from turbo_seti.find_event.plot_dat import plot_dat\n'), ((247, 258), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (256, 258), False, 'import os, glob, sys\n'), ((615, 634), 'turbo_seti.find_event.read_dat', 'find.read_dat', (['file'], {}), '(file)\n', (628, 634), True, 'from turbo_seti import find_event as find\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `marble` package."""
import unittest
import marble
import numpy as np
import sympl as sp
test_era5_filename = '/home/twine/data/era5/era5-interp-2016.nc'
def get_test_state(pc_value=0.):
n_features = marble.components.marble.name_feature_counts
state = {
'time': sp.timedelta(0),
'liquid_water_static_energy_components': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('sl_latent',), attrs={'units': ''}),
'total_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rt']]) * pc_value,
dims=('rt_latent',), attrs={'units': ''}),
'cloud_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rcld']]) * pc_value,
dims=('rcld_latent',), attrs={'units': ''}),
'rain_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rrain']]) * pc_value,
dims=('rrain_latent',), attrs={'units': ''}),
'cloud_fraction_components': sp.DataArray(
np.ones([n_features['cld']]) * pc_value,
dims=('cld_latent',), attrs={'units': ''}),
'liquid_water_static_energy_components_horizontal_advective_tendency': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('sl_latent',), attrs={'units': ''}),
'total_water_mixing_ratio_components_horizontal_advective_tendency': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('rt_latent',), attrs={'units': ''}),
'vertical_wind_components': sp.DataArray(
np.ones([n_features['w']]) * pc_value,
dims=('w_latent',), attrs={'units': ''}),
}
return state
class TestPrincipalComponentConversions(unittest.TestCase):
"""Tests for `marble` package."""
def test_convert_input_zero_latent_to_height_and_back(self):
state = get_test_state(pc_value=0.)
converter = marble.InputPrincipalComponentsToHeight()
inverse_converter = marble.InputHeightToPrincipalComponents()
intermediate = converter(state)
intermediate['time'] = state['time']
result = inverse_converter(intermediate)
for name in result.keys():
self.assertIn(name, state)
self.assertEqual(result[name].shape, state[name].shape, name)
self.assertTrue(np.allclose(result[name].values, state[name].values), name)
def test_convert_input_nonzero_latent_to_height_and_back(self):
state = get_test_state(pc_value=0.6)
converter = marble.InputPrincipalComponentsToHeight()
inverse_converter = marble.InputHeightToPrincipalComponents()
intermediate = converter(state)
intermediate['time'] = state['time']
result = inverse_converter(intermediate)
for name in result.keys():
self.assertIn(name, state)
self.assertEqual(result[name].shape, state[name].shape, name)
self.assertTrue(np.allclose(result[name].values, state[name].values), name)
def test_convert_diagnostic_zero_latent_to_height(self):
"""
This only tests that the conversion runs without errors, it does not
check anything about the output value.
"""
state = get_test_state(pc_value=0.)
converter = marble.DiagnosticPrincipalComponentsToHeight()
result = converter(state)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"marble.DiagnosticPrincipalComponentsToHeight",
"numpy.allclose",
"sympl.timedelta",
"numpy.ones",
"marble.InputPrincipalComponentsToHeight",
"marble.InputHeightToPrincipalComponents"
] |
[((3497, 3512), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3510, 3512), False, 'import unittest\n'), ((344, 359), 'sympl.timedelta', 'sp.timedelta', (['(0)'], {}), '(0)\n', (356, 359), True, 'import sympl as sp\n'), ((2010, 2051), 'marble.InputPrincipalComponentsToHeight', 'marble.InputPrincipalComponentsToHeight', ([], {}), '()\n', (2049, 2051), False, 'import marble\n'), ((2080, 2121), 'marble.InputHeightToPrincipalComponents', 'marble.InputHeightToPrincipalComponents', ([], {}), '()\n', (2119, 2121), False, 'import marble\n'), ((2626, 2667), 'marble.InputPrincipalComponentsToHeight', 'marble.InputPrincipalComponentsToHeight', ([], {}), '()\n', (2665, 2667), False, 'import marble\n'), ((2696, 2737), 'marble.InputHeightToPrincipalComponents', 'marble.InputHeightToPrincipalComponents', ([], {}), '()\n', (2735, 2737), False, 'import marble\n'), ((3382, 3428), 'marble.DiagnosticPrincipalComponentsToHeight', 'marble.DiagnosticPrincipalComponentsToHeight', ([], {}), '()\n', (3426, 3428), False, 'import marble\n'), ((436, 463), 'numpy.ones', 'np.ones', (["[n_features['sl']]"], {}), "([n_features['sl']])\n", (443, 463), True, 'import numpy as np\n'), ((604, 631), 'numpy.ones', 'np.ones', (["[n_features['rt']]"], {}), "([n_features['rt']])\n", (611, 631), True, 'import numpy as np\n'), ((772, 801), 'numpy.ones', 'np.ones', (["[n_features['rcld']]"], {}), "([n_features['rcld']])\n", (779, 801), True, 'import numpy as np\n'), ((943, 973), 'numpy.ones', 'np.ones', (["[n_features['rrain']]"], {}), "([n_features['rrain']])\n", (950, 973), True, 'import numpy as np\n'), ((1107, 1135), 'numpy.ones', 'np.ones', (["[n_features['cld']]"], {}), "([n_features['cld']])\n", (1114, 1135), True, 'import numpy as np\n'), ((1309, 1336), 'numpy.ones', 'np.ones', (["[n_features['sl']]"], {}), "([n_features['sl']])\n", (1316, 1336), True, 'import numpy as np\n'), ((1507, 1534), 'numpy.ones', 'np.ones', (["[n_features['sl']]"], {}), "([n_features['sl']])\n", (1514, 1534), True, 'import numpy as np\n'), ((1664, 1690), 'numpy.ones', 'np.ones', (["[n_features['w']]"], {}), "([n_features['w']])\n", (1671, 1690), True, 'import numpy as np\n'), ((2432, 2484), 'numpy.allclose', 'np.allclose', (['result[name].values', 'state[name].values'], {}), '(result[name].values, state[name].values)\n', (2443, 2484), True, 'import numpy as np\n'), ((3048, 3100), 'numpy.allclose', 'np.allclose', (['result[name].values', 'state[name].values'], {}), '(result[name].values, state[name].values)\n', (3059, 3100), True, 'import numpy as np\n')]
|
### Figure 5 C and E - Obenhaus et al.
# Figure S6 A, C, E and F - Obenhaus et al.
#
# NN distance analysis
# Pairwise distance analysis
#
import sys, os
import os.path
import numpy as np
import pandas as pd
import datajoint as dj
import cmasher as cmr
from tabulate import tabulate
import itertools
# Make plots pretty
import seaborn as sns
sns.set(style='white')
# Prevent bug in figure export as pdf:
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
##### IMPORTS ###########################################################################
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from general import print_mannwhitneyu, print_wilcoxon
from dj_plotter.helpers.plotting_helpers import make_linear_colormap
from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary
##### LOAD SCHEMA COMPONENTS #############################################################
from dj_schemas.dj_conn import *
##### EXPORT LOCATION ####################################################################
figure_dir = 'YOUR_EXPORT_DIRECTORY/'
def pairw_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell='standard',
region='MEC',
pairwise_dist_param='A',
cutoff_n_starters=0,
plot=True
):
# Print col_dict
print(f'\nReceived the following column dictionary \n{col_dict}\n')
# Brain region filter
assert region in ['MEC','PAS'], f'Region "{region}" not understood. Choose "MEC" or "PAS"'
all_sessions = (Session.proj('animal_name') * FilteredSessions
& [f'animal_name = "{animal}"' for animal in animals]
& f'param_hash_session = "{param_hash_session}"'
)
# Print pairw dist. parameter
score_, score_cutoff_ = (PairwDistParams & f'pairwise_dist_param = "{pairwise_dist_param}"').fetch1('score','score_cutoff')
print(f'Filtering pairwise distances by {score_} > {score_cutoff_}')
pairw = (Session.proj('animal_name') * PairwDist.Cells * PairwDist.PairwD
& all_sessions.proj() \
& f'param_hash_id_cell = "{param_hash_id_cell}"'
& f'pairwise_dist_param = "{pairwise_dist_param}"'
& f'region = "{region}"'
& f'n_startr > {cutoff_n_starters}')
pairw_df = pd.DataFrame(pairw.fetch(as_dict=True))
pairw_df.dropna(inplace=True)
colors = make_linear_colormap(pairw_df.animal_name, categorical=True, cmap='cmr.guppy')
### COLS TO NORMALIZE #################################################################################
cols_to_norm = col_dict['cols_to_norm'] # ['mean_pairw_dist_shuffref', 'mean_pairw_dist']
cols_to_norm_label = col_dict['cols_to_norm_label'] # ['Ref', 'Data']
norm_to = col_dict['norm_to'] # mean_pairw_dist_shuffall'
cols = col_dict['cols'] # 'animal_name'
# Normalize
pairw_df_norm = norm_pairw_nn_df(pairw_df, cols_to_norm, cols, norm_to)
pairw_df_norm.reset_index(drop=True, inplace=True)
# Plot
if plot:
plot_pairw_nn_summary(pairw_df_norm,
cols_to_norm,
colors=colors,
xlabels=cols_to_norm_label,
save_path=figure_dir,
label='PairwD')
# Print statistics
print(f'Data over {len(pairw_df.session_name)} datasets (careful! Can be multiplane!) ({len(set(pairw_df.animal_name))} animals)')
print(f'{set(pairw_df.animal_name)}')
# Calculate p values MannWhithney and 1 sample Wilcoxon rank
pairw_df_norm_ = pairw_df_norm[cols_to_norm]
results = pd.DataFrame(columns = pairw_df_norm_.columns,
index = pairw_df_norm_.columns)
for (label1, column1), (label2, column2) in itertools.combinations(pairw_df_norm_.items(), 2):
_ ,results.loc[label1, label2] = _ ,results.loc[label2, label1] = print_mannwhitneyu(column1, column2, label_A=label1, label_B=label2)
#print(tabulate(results, headers='keys', tablefmt='psql'))
print('\nWilcoxon signed rank test (against 1.):')
for col in cols_to_norm:
try:
print_wilcoxon(pairw_df_norm[col] - 1., label=col)
except ValueError:
print(f'Skipping column {col} (all zero?)')
# Print some more stats
print('Mean and SEM for PairwDist results')
for col in cols_to_norm:
mean_col, sem_col = np.nanmean(pairw_df_norm[col]), np.std(pairw_df_norm[col]) / np.sqrt(len(pairw_df_norm[col]))
print(f'{col:<30} | Mean ± SEM: {mean_col:.2f} ± {sem_col:.2f}')
return pairw_df_norm, len(set(pairw_df.animal_name)), len(pairw_df.session_name)
def group_nn_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell = 'standard',
region='MEC',
pairwise_dist_param='A',
cutoff_n_starters=0,
nn_group_number=5,
plot=True
):
'''
Like pairw_dist() but for PairwDist.NN instead of PairwDist.PairwD, i.e. grouped NN results
nn_group_number : default 5 : Number of NN to consider (group size).
Careful: Zero indexed! 0 = first nearest neighbour
'''
# Print col_dict
print(f'\nReceived the following column dictionary \n{col_dict}\n')
# Brain region filter
assert region in ['MEC','PAS'], f'Region "{region}" not understood. Choose "MEC" or "PAS"'
all_sessions = (Session.proj('animal_name') * FilteredSessions
& [f'animal_name = "{animal}"' for animal in animals]
& f'param_hash_session = "{param_hash_session}"'
)
# Print pairw dist. parameter
score_, score_cutoff_ = (PairwDistParams & f'pairwise_dist_param = "{pairwise_dist_param}"').fetch1('score','score_cutoff')
print(f'Filtering pairwise distances by {score_} > {score_cutoff_}')
nn = (Session.proj('animal_name') * PairwDist.Cells * PairwDist.NN
& all_sessions.proj()
& f'param_hash_id_cell = "{param_hash_id_cell}"'
& f'pairwise_dist_param = "{pairwise_dist_param}"'
& f'region = "{region}"'
& f'n_startr > {cutoff_n_starters}')
nn_df = pd.DataFrame(nn.fetch(as_dict=True))
nn_df.dropna(inplace=True) # Important here because apparently some of the stuff can be None
colors = make_linear_colormap(nn_df.animal_name, categorical=True, cmap='cmr.guppy')
# Subselect a specific nn_number = number of NN in result (group size)
data_cols_pairwDist_NN = ['mean_nn','mean_nn_shuff_all',
'mean_nn_shuff_ref','mean_nn_csr'] # All data columns in table
for col in data_cols_pairwDist_NN:
nn_df[col] = [res[nn_group_number] for res in nn_df[col]]
### COLS TO NORMALIZE #################################################################################
cols_to_norm = col_dict['cols_to_norm']
cols_to_norm_label = col_dict['cols_to_norm_label']
norm_to = col_dict['norm_to']
cols = col_dict['cols']
# Normalize
nn_df_norm = norm_pairw_nn_df(nn_df, cols_to_norm, cols, norm_to)
nn_df_norm.reset_index(drop=True, inplace=True)
# Plot
if plot:
plot_pairw_nn_summary(nn_df_norm,
cols_to_norm,
colors=colors,
xlabels=cols_to_norm_label,
save_path=figure_dir,
label='NN')
# Print statistics
print(f'Data over {len(nn_df.session_name)} datasets (careful! Can be multiplane!) ({len(set(nn_df.animal_name))} animals)')
print(f'{set(nn_df.animal_name)}')
# Calculate p values MannWhithney and 1 sample Wilcoxon rank
nn_df_norm_ = nn_df_norm[cols_to_norm]
results = pd.DataFrame(columns = nn_df_norm_.columns,
index = nn_df_norm_.columns)
for (label1, column1), (label2, column2) in itertools.combinations(nn_df_norm_.items(), 2):
_ ,results.loc[label1, label2] = _ ,results.loc[label2, label1] = print_mannwhitneyu(column1, column2, label_A=label1, label_B=label2)
#print(tabulate(results, headers='keys', tablefmt='psql'))
print('\nWilcoxon signed rank test (against 1.):')
for col in cols_to_norm:
#_, onesample_p_data = ttest_1samp(pairw_df_norm[col], 1.)
try:
print_wilcoxon(nn_df_norm[col] - 1., label=col)
except ValueError:
print(f'Skipping column {col} (all zero?)')
# Print some more stats
print('Mean and SEM for NN results')
for col in cols_to_norm:
mean_col, sem_col = np.nanmean(nn_df_norm[col]), np.std(nn_df_norm[col]) / np.sqrt(len(nn_df_norm[col]))
print(f'{col:<30} | Mean ± SEM: {mean_col:.2f} ± {sem_col:.2f}')
return nn_df_norm, len(set(nn_df.animal_name)), len(set(nn_df.session_name))
if __name__ == "__main__":
grid_mice = [
'82913','88592', '87244', '60480',
'97046','89841'
]
ov_mice = [
'87187','88106','87245','90222',
'94557','89622'
]
all_animals = [
'90222','90218','90647',
'82913','88592','89622',
'87244','89841','60480',
'87245','87187','88106',
'94557','97045','97046',
]
animals = grid_mice
pairwise_dist_param = "A"
param_hash_id_cell = 'standard'
region = 'MEC'
# Cutoff number of cells
cutoff_n_starters = 15.
# For NN
nn_group_number = 5
###### PAIRWISE DISTANCES ####################################################################################
# print(f'Creating pairwise distance figure for {len(animals)} animal(s)')
# print(animals)
# print('\n')
# # Create column dictionary
# col_dict = {}
# col_dict['cols_to_norm'] = ['mean_pairw_dist_shuffall', 'mean_pairw_dist_shuffref', 'mean_pairw_dist']
# #mean_pairw_dist_shuffref, mean_pairw_dist_shuffall
# col_dict['cols_to_norm_label'] = ['All', 'Ref', 'Data']
# col_dict['norm_to'] = 'mean_pairw_dist_shuffall'
# col_dict['cols'] = 'animal_name'
# pairw_dist(animals,
# col_dict,
# param_hash_session='cf83e1357eefb8bd',
# param_hash_id_cell=param_hash_id_cell,
# region=region,
# pairwise_dist_param=pairwise_dist_param,
# cutoff_n_starters=cutoff_n_starters,
# )
####### NN DISTANCES ###########################################################################################
print('\n########################################################################################################')
print(f'\nCreating NN distance figure for {len(animals)} animal(s)')
print(animals)
print('\n')
# Create column dictionary
col_dict = {}
col_dict['cols_to_norm'] = ['mean_nn_shuff_all', 'mean_nn_shuff_ref', 'mean_nn']
col_dict['cols_to_norm_label'] = ['All', 'Ref', 'Data']
col_dict['norm_to'] = 'mean_nn_shuff_all'
col_dict['cols'] = 'animal_name'
group_nn_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell=param_hash_id_cell,
region=region,
pairwise_dist_param=pairwise_dist_param,
cutoff_n_starters=cutoff_n_starters,
nn_group_number=nn_group_number,
plot=True)
print(figure_dir)
print('Success.')
|
[
"pandas.DataFrame",
"helpers_topography.notebooks.pairw_distances.plot_pairw_nn_summary",
"numpy.std",
"helpers_topography.notebooks.pairw_distances.norm_pairw_nn_df",
"os.path.dirname",
"dj_plotter.helpers.plotting_helpers.make_linear_colormap",
"numpy.nanmean",
"seaborn.set",
"general.print_wilcoxon",
"general.print_mannwhitneyu"
] |
[((352, 374), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (359, 374), True, 'import seaborn as sns\n'), ((2598, 2676), 'dj_plotter.helpers.plotting_helpers.make_linear_colormap', 'make_linear_colormap', (['pairw_df.animal_name'], {'categorical': '(True)', 'cmap': '"""cmr.guppy"""'}), "(pairw_df.animal_name, categorical=True, cmap='cmr.guppy')\n", (2618, 2676), False, 'from dj_plotter.helpers.plotting_helpers import make_linear_colormap\n'), ((3136, 3191), 'helpers_topography.notebooks.pairw_distances.norm_pairw_nn_df', 'norm_pairw_nn_df', (['pairw_df', 'cols_to_norm', 'cols', 'norm_to'], {}), '(pairw_df, cols_to_norm, cols, norm_to)\n', (3152, 3191), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((3899, 3973), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'pairw_df_norm_.columns', 'index': 'pairw_df_norm_.columns'}), '(columns=pairw_df_norm_.columns, index=pairw_df_norm_.columns)\n', (3911, 3973), True, 'import pandas as pd\n'), ((6797, 6872), 'dj_plotter.helpers.plotting_helpers.make_linear_colormap', 'make_linear_colormap', (['nn_df.animal_name'], {'categorical': '(True)', 'cmap': '"""cmr.guppy"""'}), "(nn_df.animal_name, categorical=True, cmap='cmr.guppy')\n", (6817, 6872), False, 'from dj_plotter.helpers.plotting_helpers import make_linear_colormap\n'), ((7556, 7608), 'helpers_topography.notebooks.pairw_distances.norm_pairw_nn_df', 'norm_pairw_nn_df', (['nn_df', 'cols_to_norm', 'cols', 'norm_to'], {}), '(nn_df, cols_to_norm, cols, norm_to)\n', (7572, 7608), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((8292, 8360), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nn_df_norm_.columns', 'index': 'nn_df_norm_.columns'}), '(columns=nn_df_norm_.columns, index=nn_df_norm_.columns)\n', (8304, 8360), True, 'import pandas as pd\n'), ((3281, 3417), 'helpers_topography.notebooks.pairw_distances.plot_pairw_nn_summary', 'plot_pairw_nn_summary', (['pairw_df_norm', 'cols_to_norm'], {'colors': 'colors', 'xlabels': 'cols_to_norm_label', 'save_path': 'figure_dir', 'label': '"""PairwD"""'}), "(pairw_df_norm, cols_to_norm, colors=colors, xlabels=\n cols_to_norm_label, save_path=figure_dir, label='PairwD')\n", (3302, 3417), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((4179, 4247), 'general.print_mannwhitneyu', 'print_mannwhitneyu', (['column1', 'column2'], {'label_A': 'label1', 'label_B': 'label2'}), '(column1, column2, label_A=label1, label_B=label2)\n', (4197, 4247), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((7696, 7825), 'helpers_topography.notebooks.pairw_distances.plot_pairw_nn_summary', 'plot_pairw_nn_summary', (['nn_df_norm', 'cols_to_norm'], {'colors': 'colors', 'xlabels': 'cols_to_norm_label', 'save_path': 'figure_dir', 'label': '"""NN"""'}), "(nn_df_norm, cols_to_norm, colors=colors, xlabels=\n cols_to_norm_label, save_path=figure_dir, label='NN')\n", (7717, 7825), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((8563, 8631), 'general.print_mannwhitneyu', 'print_mannwhitneyu', (['column1', 'column2'], {'label_A': 'label1', 'label_B': 'label2'}), '(column1, column2, label_A=label1, label_B=label2)\n', (8581, 8631), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((611, 636), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (626, 636), False, 'import sys, os\n'), ((4421, 4472), 'general.print_wilcoxon', 'print_wilcoxon', (['(pairw_df_norm[col] - 1.0)'], {'label': 'col'}), '(pairw_df_norm[col] - 1.0, label=col)\n', (4435, 4472), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((4689, 4719), 'numpy.nanmean', 'np.nanmean', (['pairw_df_norm[col]'], {}), '(pairw_df_norm[col])\n', (4699, 4719), True, 'import numpy as np\n'), ((8872, 8920), 'general.print_wilcoxon', 'print_wilcoxon', (['(nn_df_norm[col] - 1.0)'], {'label': 'col'}), '(nn_df_norm[col] - 1.0, label=col)\n', (8886, 8920), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((9138, 9165), 'numpy.nanmean', 'np.nanmean', (['nn_df_norm[col]'], {}), '(nn_df_norm[col])\n', (9148, 9165), True, 'import numpy as np\n'), ((4721, 4747), 'numpy.std', 'np.std', (['pairw_df_norm[col]'], {}), '(pairw_df_norm[col])\n', (4727, 4747), True, 'import numpy as np\n'), ((9167, 9190), 'numpy.std', 'np.std', (['nn_df_norm[col]'], {}), '(nn_df_norm[col])\n', (9173, 9190), True, 'import numpy as np\n')]
|
import base64
import json
import os
import zlib
from urllib.request import urlretrieve
import boto3
import mrcnn.model as modellib
import numpy as np
import pandas as pd
import skimage.io
from mrcnn import utils
from mrcnn.config import Config
from superai.meta_ai import BaseModel
s3 = boto3.client("s3")
_MODEL_PATH = os.path.join("sagify_base/local_test/test_dir/", "model")
# _MODEL_PATH = "s3://canotic-ai/model/mask-rcnn-model.tar.gz"
# _MODEL_PATH = 'Mask_RCNN' # Path for models
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = [
"BG",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
class ModelService(BaseModel):
def __init__(self):
super().__init__()
self.model = None
self.initialized = False
def initialize(self, context):
class InferenceConfig(Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME = "inference"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
config = InferenceConfig()
config.display()
print("Initialised class...")
self.initialized = True
properties = context.system_properties
_MODEL_PATH = properties.get("model_dir")
if self.model is None:
print("Model Content : ", os.listdir(_MODEL_PATH))
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(_MODEL_PATH, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
try:
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=os.path.join("logs"), config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
self.model = model
except RuntimeError:
raise MemoryError
return self.model
def predict_from_image(self, path, class_id=3):
image = skimage.io.imread(path)
# Run detection
clf = self.model
print("model retrieved.")
results = clf.detect([image], verbose=0)
print("detection on image done.")
# Visualize results
r = results[0]
# get indices corresponding to unwanted classes
indices_to_remove = np.where(r["class_ids"] != class_id)
# remove corresponding entries from `r`
new_masks = np.delete(r["masks"], indices_to_remove, axis=2)
scores = np.delete(r["scores"], indices_to_remove, axis=0)
aggregate_mask = np.logical_not(new_masks.any(axis=2))
class_ids = np.delete(r["class_ids"], indices_to_remove, axis=0)
return {
"new_masks": new_masks,
"aggregate_mask": aggregate_mask,
"scores": scores,
"class_ids": class_ids,
}
def predict_intermediate(self, input):
image_urls = input["image_url"]
predictions = []
for i, url in enumerate(image_urls):
image_path = f"image_{i}.jpg"
# download image
urlretrieve(url, image_path)
print("image retrieved")
image_path = os.getcwd() + "/" + image_path
prediction = self.predict_from_image(image_path)
print("predict from image done.")
new_masks = prediction["new_masks"]
aggregate_mask = prediction["aggregate_mask"]
n_masks = new_masks.shape[-1]
pred = []
for inst in range(n_masks):
pred.append(self._handle_mask(prediction, inst))
print(f"processing mask number {inst} done")
# num_workers = mp.cpu_count() // 4
# with Pool(num_workers) as pool:
# result = [pool.apply_async(_handle_mask, (prediction, i),) for i in range(n_masks)]
# pred = [res.get(timeout=15) for res in result]
print("everything done, uploading data.")
# data_uri = save_and_upload(aggregate_mask)
# pred.append({
# "category": "Background",
# "maskUrl": data_uri,
# "instance": 0
# })
predictions.append(pred)
return predictions
def predict(self, json_input):
"""
Prediction given the request input
:param json_input: [dict], request input
:return: [dict], prediction
"""
# transform json_input and assign the transformed value to model_input
print("json input", json_input)
json_input = json_input[0]["body"]
json_input = json_input.decode("utf-8")
print("Fixed json input", json_input)
try:
model_input = pd.read_json(json.loads(json_input))
except ValueError:
model_input = pd.read_json(json_input)
predictions = self.predict_intermediate(model_input)
print("Predictions: ", predictions)
# TODO If we have more than 1 model, then create additional classes similar to ModelService
# TODO where each of one will load one of your models
# # transform predictions to a list and assign and return it
# prediction_list = []
# output_keys = set([key.split("_")[0] for key in predictions.keys()])
# for index, row in predictions.iterrows():
# out_row = {key: {} for key in output_keys}
# for i, j in row.items():
# name, p_type = i.split("_")
# if p_type == "predictions":
# p_type = "prediction"
# if p_type == "probabilities":
# p_type = "probability"
# out_row[name][p_type] = j
# prediction_list.append(out_row)
return predictions
def train(self, input_data_path, model_save_path, hyperparams_path=None):
pass
@classmethod
def load_weights(cls, weights_path):
pass
@staticmethod
def get_encoding_string(mask):
data = zlib.compress(mask)
encoded_string = base64.b64encode(data).decode("utf-8")
return encoded_string
def _handle_mask(self, prediction, inst):
new_masks = prediction["new_masks"]
scores = prediction["scores"]
class_ids = prediction["class_ids"]
print(f"processing mask number {inst}")
mask = new_masks[..., inst]
mask_data = self.get_encoding_string(mask)
class_id = class_ids[inst]
w, h = mask.shape[:2]
print(f"processing mask number {inst} done")
return {
"category": class_names[class_id],
"class_id": int(class_id),
"maskData": mask_data,
"instance": inst,
"score": float(scores[inst]),
"width": w,
"height": h,
}
|
[
"os.listdir",
"mrcnn.utils.download_trained_weights",
"json.loads",
"boto3.client",
"os.getcwd",
"os.path.exists",
"pandas.read_json",
"zlib.compress",
"urllib.request.urlretrieve",
"numpy.where",
"base64.b64encode",
"os.path.join",
"numpy.delete"
] |
[((290, 308), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (302, 308), False, 'import boto3\n'), ((324, 381), 'os.path.join', 'os.path.join', (['"""sagify_base/local_test/test_dir/"""', '"""model"""'], {}), "('sagify_base/local_test/test_dir/', 'model')\n", (336, 381), False, 'import os\n'), ((3886, 3922), 'numpy.where', 'np.where', (["(r['class_ids'] != class_id)"], {}), "(r['class_ids'] != class_id)\n", (3894, 3922), True, 'import numpy as np\n'), ((3992, 4040), 'numpy.delete', 'np.delete', (["r['masks']", 'indices_to_remove'], {'axis': '(2)'}), "(r['masks'], indices_to_remove, axis=2)\n", (4001, 4040), True, 'import numpy as np\n'), ((4058, 4107), 'numpy.delete', 'np.delete', (["r['scores']", 'indices_to_remove'], {'axis': '(0)'}), "(r['scores'], indices_to_remove, axis=0)\n", (4067, 4107), True, 'import numpy as np\n'), ((4191, 4243), 'numpy.delete', 'np.delete', (["r['class_ids']", 'indices_to_remove'], {'axis': '(0)'}), "(r['class_ids'], indices_to_remove, axis=0)\n", (4200, 4243), True, 'import numpy as np\n'), ((7594, 7613), 'zlib.compress', 'zlib.compress', (['mask'], {}), '(mask)\n', (7607, 7613), False, 'import zlib\n'), ((2816, 2862), 'os.path.join', 'os.path.join', (['_MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(_MODEL_PATH, 'mask_rcnn_coco.h5')\n", (2828, 2862), False, 'import os\n'), ((4657, 4685), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'image_path'], {}), '(url, image_path)\n', (4668, 4685), False, 'from urllib.request import urlretrieve\n'), ((2712, 2735), 'os.listdir', 'os.listdir', (['_MODEL_PATH'], {}), '(_MODEL_PATH)\n', (2722, 2735), False, 'import os\n'), ((6316, 6338), 'json.loads', 'json.loads', (['json_input'], {}), '(json_input)\n', (6326, 6338), False, 'import json\n'), ((6393, 6417), 'pandas.read_json', 'pd.read_json', (['json_input'], {}), '(json_input)\n', (6405, 6417), True, 'import pandas as pd\n'), ((7639, 7661), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (7655, 7661), False, 'import base64\n'), ((2971, 3002), 'os.path.exists', 'os.path.exists', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (2985, 3002), False, 'import os\n'), ((3024, 3071), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (3054, 3071), False, 'from mrcnn import utils\n'), ((4748, 4759), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4757, 4759), False, 'import os\n'), ((3199, 3219), 'os.path.join', 'os.path.join', (['"""logs"""'], {}), "('logs')\n", (3211, 3219), False, 'import os\n')]
|
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy as np
import cv2
#load the trained model to classify sign
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from pickle import dump, load
from tensorflow.keras.preprocessing.image import load_img, img_to_array
base_model = InceptionV3(weights = 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
vgg_model = Model(base_model.input, base_model.layers[-2].output)
def preprocess_img(img_path):
#inception v3 excepts img in 299*299
img = load_img(img_path, target_size = (299, 299))
x = img_to_array(img)
# Add one more dimension
x = np.expand_dims(x, axis = 0)
x = preprocess_input(x)
return x
def encode(image):
image = preprocess_img(image)
vec = vgg_model.predict(image)
vec = np.reshape(vec, (vec.shape[1]))
return vec
pickle_in = open("wordtoix.pkl", "rb")
wordtoix = load(pickle_in)
pickle_in = open("ixtoword.pkl", "rb")
ixtoword = load(pickle_in)
max_length = 74
def greedy_search(pic):
start = 'startseq'
for i in range(max_length):
seq = [wordtoix[word] for word in start.split() if word in wordtoix]
seq = pad_sequences([seq], maxlen = max_length)
yhat = model.predict([pic, seq])
yhat = np.argmax(yhat)
word = ixtoword[yhat]
start += ' ' + word
if word == 'endseq':
break
final = start.split()
final = final[1:-1]
final = ' '.join(final)
return final
def beam_search(image, beam_index = 3):
start = [wordtoix["startseq"]]
# start_word[0][0] = index of the starting word
# start_word[0][1] = probability of the word predicted
start_word = [[start, 0.0]]
while len(start_word[0][0]) < max_length:
temp = []
for s in start_word:
par_caps = pad_sequences([s[0]], maxlen=max_length)
e = image
preds = model.predict([e, np.array(par_caps)])
# Getting the top <beam_index>(n) predictions
word_preds = np.argsort(preds[0])[-beam_index:]
# creating a new list so as to put them via the model again
for w in word_preds:
next_cap, prob = s[0][:], s[1]
next_cap.append(w)
prob += preds[0][w]
temp.append([next_cap, prob])
start_word = temp
# Sorting according to the probabilities
start_word = sorted(start_word, reverse=False, key=lambda l: l[1])
# Getting the top words
start_word = start_word[-beam_index:]
start_word = start_word[-1][0]
intermediate_caption = [ixtoword[i] for i in start_word]
final_caption = []
for i in intermediate_caption:
if i != 'endseq':
final_caption.append(i)
else:
break
final_caption = ' '.join(final_caption[1:])
return final_caption
model = load_model('new-model-1.h5')
#initialise GUI
top=tk.Tk()
top.geometry('800x600')
top.title('Image Caption Generator')
top.configure(background='#CDCDCD')
label2=Label(top,background='#CDCDCD', font=('arial',15))
label1=Label(top,background='#CDCDCD', font=('arial',15))
label=Label(top,background='#CDCDCD', font=('arial',15))
sign_image = Label(top)
def classify(file_path):
global label_packed
enc = encode(file_path)
image = enc.reshape(1, 2048)
pred = greedy_search(image)
print(pred)
label.configure(foreground='#000', text= 'Greedy: ' + pred)
label.pack(side=BOTTOM,expand=True)
beam_3 = beam_search(image)
print(beam_3)
label1.configure(foreground='#011638', text = 'Beam_3: ' + beam_3)
label1.pack(side = BOTTOM, expand = True)
beam_5 = beam_search(image, 5)
print(beam_5)
label2.configure(foreground='#228B22', text = 'Beam_5: ' + beam_5)
label2.pack(side = BOTTOM, expand = True)
def show_classify_button(file_path):
classify_b=Button(top,text="Generate",command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
classify_b.place(relx=0.79,rely=0.46)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
label1.configure(text='')
label2.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Upload an image",command=upload_image,padx=10,pady=5)
upload.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
#label2.pack(side = BOTTOM, expand = True)
heading = Label(top, text="Image Caption Generator",pady=20, font=('arial',22,'bold'))
heading.configure(background='#CDCDED',foreground='#FF6348')
heading.pack()
top.mainloop()
|
[
"PIL.ImageTk.PhotoImage",
"tensorflow.keras.models.load_model",
"tensorflow.keras.applications.inception_v3.preprocess_input",
"numpy.argmax",
"tensorflow.keras.applications.inception_v3.InceptionV3",
"tensorflow.keras.preprocessing.image.img_to_array",
"numpy.expand_dims",
"tkinter.filedialog.askopenfilename",
"PIL.Image.open",
"tensorflow.keras.models.Model",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"pickle.load",
"numpy.argsort",
"numpy.reshape",
"numpy.array",
"tkinter.Tk"
] |
[((533, 606), 'tensorflow.keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""inception_v3_weights_tf_dim_ordering_tf_kernels.h5"""'}), "(weights='inception_v3_weights_tf_dim_ordering_tf_kernels.h5')\n", (544, 606), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((621, 674), 'tensorflow.keras.models.Model', 'Model', (['base_model.input', 'base_model.layers[-2].output'], {}), '(base_model.input, base_model.layers[-2].output)\n', (626, 674), False, 'from tensorflow.keras.models import Model\n'), ((1132, 1147), 'pickle.load', 'load', (['pickle_in'], {}), '(pickle_in)\n', (1136, 1147), False, 'from pickle import dump, load\n'), ((1198, 1213), 'pickle.load', 'load', (['pickle_in'], {}), '(pickle_in)\n', (1202, 1213), False, 'from pickle import dump, load\n'), ((3191, 3219), 'tensorflow.keras.models.load_model', 'load_model', (['"""new-model-1.h5"""'], {}), "('new-model-1.h5')\n", (3201, 3219), False, 'from tensorflow.keras.models import load_model\n'), ((3241, 3248), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3246, 3248), True, 'import tkinter as tk\n'), ((757, 799), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img_path'], {'target_size': '(299, 299)'}), '(img_path, target_size=(299, 299))\n', (765, 799), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((810, 827), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (822, 827), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((865, 890), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (879, 890), True, 'import numpy as np\n'), ((901, 920), 'tensorflow.keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (917, 920), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((1033, 1062), 'numpy.reshape', 'np.reshape', (['vec', 'vec.shape[1]'], {}), '(vec, vec.shape[1])\n', (1043, 1062), True, 'import numpy as np\n'), ((1401, 1440), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[seq]'], {'maxlen': 'max_length'}), '([seq], maxlen=max_length)\n', (1414, 1440), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1499, 1514), 'numpy.argmax', 'np.argmax', (['yhat'], {}), '(yhat)\n', (1508, 1514), True, 'import numpy as np\n'), ((4455, 4483), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (4481, 4483), False, 'from tkinter import filedialog\n'), ((4501, 4522), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (4511, 4522), False, 'from PIL import ImageTk, Image\n'), ((4615, 4643), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['uploaded'], {}), '(uploaded)\n', (4633, 4643), False, 'from PIL import ImageTk, Image\n'), ((2060, 2100), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[s[0]]'], {'maxlen': 'max_length'}), '([s[0]], maxlen=max_length)\n', (2073, 2100), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2278, 2298), 'numpy.argsort', 'np.argsort', (['preds[0]'], {}), '(preds[0])\n', (2288, 2298), True, 'import numpy as np\n'), ((2161, 2179), 'numpy.array', 'np.array', (['par_caps'], {}), '(par_caps)\n', (2169, 2179), True, 'import numpy as np\n')]
|
import pandas as pd
import argparse
import os
import mdtraj
import numpy as np
parser = argparse.ArgumentParser(description='Script to generate trajectories containing only top scoring frames as scored by RWPlus. These top scoring trajectories can then be averaged with Gromacs to produce an averaged structure.')
parser.add_argument('-p','--path',help='Path to directory containing all refinement trajectories and RWPlus score files.',required=True,dest='path')
parser.add_argument('--percent',help='Percent of top scoring structures to average over. Default: 15,5,40,1',nargs='*',default=[15,5,40,1],type=int)
args = parser.parse_args()
dir_path = args.path
percent = args.percent
all_trajs = dict()
rw_df = pd.DataFrame(columns = ['traj_idx','frame_idx','score'])
for file in os.listdir(dir_path):
if file.endswith('.dcd') and file.startswith('refinement_'):
print(f'Reading {file}')
traj_idx = int(file[file.rfind('_')+1:file.rfind('.')])
curr_traj = mdtraj.load(os.path.join(dir_path,file),top=os.path.join(dir_path,f'minimized_{traj_idx}.pdb'))
curr_traj.remove_solvent(inplace=True)
all_trajs[traj_idx] = curr_traj
elif file.endswith('.txt') and file.startswith('scorelist_'):
print(f'Reading {file}')
traj_idx = int(file[file.rfind('_')+1:file.rfind('.')])
with open(os.path.join(dir_path,file),'r') as f:
scores = f.readlines()
scores = np.array(scores,dtype=float)
num_frames = len(scores)
df = pd.DataFrame(list(zip([traj_idx]*num_frames,np.arange(num_frames),scores)),columns=['traj_idx','frame_idx','score'])
rw_df = rw_df.append(df)
rw_df.sort_values(by=['score'],inplace=True)
num_frames = len(rw_df)
for perc in percent:
num_top = round(perc*.01*num_frames)
print(perc)
print(num_top)
best_frames = rw_df.head(num_top)
for idx in all_trajs.keys():
traj_best = best_frames[best_frames['traj_idx'] == idx]
try:
newtraj = newtraj.join([all_trajs[idx][list(traj_best['frame_idx'])]])
except:
newtraj = all_trajs[idx][list(traj_best['frame_idx'])]
print(len(newtraj))
print(f'Saving top {perc}% of frames to top_{perc}_percent.xtc')
newtraj.save(os.path.join(dir_path,f'top_{perc}_percent.xtc'),force_overwrite=True)
del newtraj
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"numpy.array",
"numpy.arange",
"os.path.join",
"os.listdir"
] |
[((90, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script to generate trajectories containing only top scoring frames as scored by RWPlus. These top scoring trajectories can then be averaged with Gromacs to produce an averaged structure."""'}), "(description=\n 'Script to generate trajectories containing only top scoring frames as scored by RWPlus. These top scoring trajectories can then be averaged with Gromacs to produce an averaged structure.'\n )\n", (113, 325), False, 'import argparse\n'), ((714, 770), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['traj_idx', 'frame_idx', 'score']"}), "(columns=['traj_idx', 'frame_idx', 'score'])\n", (726, 770), True, 'import pandas as pd\n'), ((783, 803), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (793, 803), False, 'import os\n'), ((2263, 2312), 'os.path.join', 'os.path.join', (['dir_path', 'f"""top_{perc}_percent.xtc"""'], {}), "(dir_path, f'top_{perc}_percent.xtc')\n", (2275, 2312), False, 'import os\n'), ((999, 1027), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (1011, 1027), False, 'import os\n'), ((1442, 1471), 'numpy.array', 'np.array', (['scores'], {'dtype': 'float'}), '(scores, dtype=float)\n', (1450, 1471), True, 'import numpy as np\n'), ((1031, 1082), 'os.path.join', 'os.path.join', (['dir_path', 'f"""minimized_{traj_idx}.pdb"""'], {}), "(dir_path, f'minimized_{traj_idx}.pdb')\n", (1043, 1082), False, 'import os\n'), ((1351, 1379), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (1363, 1379), False, 'import os\n'), ((1561, 1582), 'numpy.arange', 'np.arange', (['num_frames'], {}), '(num_frames)\n', (1570, 1582), True, 'import numpy as np\n')]
|
"""
{This script reads in the raw chain and plots times series for all parameters
in order to identify the burn-in}
"""
# Libs
from cosmo_utils.utils import work_paths as cwpaths
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
import pandas as pd
import numpy as np
import math
import os
__author__ = '{<NAME>}'
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']},size=20)
rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
def find_nearest(array, value):
"""Finds the element in array that is closest to the value
Args:
array (numpy.array): Array of values
value (numpy.float): Value to find closest match to
Returns:
numpy.float: Closest match found in array
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
survey = 'eco'
mf_type = 'smf'
quenching = 'hybrid'
nwalkers = 260
if mf_type == 'smf':
path_to_proc = path_to_proc + 'smhm_colour_run27/'
else:
path_to_proc = path_to_proc + 'bmhm_run3/'
chain_fname = path_to_proc + 'mcmc_{0}_colour_raw.txt'.format(survey)
if quenching == 'hybrid':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mstar_q','Mhalo_q','mu','nu'],
header=None)
emcee_table = emcee_table[emcee_table.Mstar_q.values != '#']
emcee_table.Mstar_q = emcee_table.Mstar_q.astype(np.float64)
emcee_table.Mhalo_q = emcee_table.Mhalo_q.astype(np.float64)
emcee_table.mu = emcee_table.mu.astype(np.float64)
emcee_table.nu = emcee_table.nu.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = emcee_table.values[idx+1][0]
row[3] = nu_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
# emcee_table.nu = np.log10(emcee_table.nu)
elif quenching == 'halo':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
emcee_table = emcee_table[emcee_table.Mh_qc.values != '#']
emcee_table.Mh_qc = emcee_table.Mh_qc.astype(np.float64)
emcee_table.Mh_qs = emcee_table.Mh_qs.astype(np.float64)
emcee_table.mu_c = emcee_table.mu_c.astype(np.float64)
emcee_table.mu_s = emcee_table.mu_s.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = emcee_table.values[idx+1][0]
row[3] = mu_s_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
chi2_fname = path_to_proc + '{0}_colour_chi2.txt'.format(survey)
chi2_df = pd.read_csv(chi2_fname,header=None,names=['chisquared'])
chi2 = np.log10(chi2_df.chisquared.values)
emcee_table['chi2'] = chi2
# Each chunk is now a step and within each chunk, each row is a walker
# Different from what it used to be where each chunk was a walker and
# within each chunk, each row was a step
walker_id_arr = np.zeros(len(emcee_table))
iteration_id_arr = np.zeros(len(emcee_table))
counter_wid = 0
counter_stepid = 0
for idx,row in emcee_table.iterrows():
counter_wid += 1
if idx % nwalkers == 0:
counter_stepid += 1
counter_wid = 1
walker_id_arr[idx] = counter_wid
iteration_id_arr[idx] = counter_stepid
id_data = {'walker_id': walker_id_arr, 'iteration_id': iteration_id_arr}
id_df = pd.DataFrame(id_data, index=emcee_table.index)
emcee_table = emcee_table.assign(**id_df)
grps = emcee_table.groupby('iteration_id')
grp_keys = grps.groups.keys()
if quenching == 'hybrid':
Mstar_q = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
Mhalo_q = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
nu = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
chi2 = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
for idx,key in enumerate(grp_keys):
group = grps.get_group(key)
Mstar_q_mean = np.mean(group.Mstar_q.values)
Mstar_q_std = np.std(group.Mstar_q.values)
Mstar_q[0][idx] = Mstar_q_mean
Mstar_q[1][idx] = Mstar_q_std
Mhalo_q_mean = np.mean(group.Mhalo_q.values)
Mhalo_q_std = np.std(group.Mhalo_q.values)
Mhalo_q[0][idx] = Mhalo_q_mean
Mhalo_q[1][idx] = Mhalo_q_std
mu_mean = np.mean(group.mu.values)
mu_std = np.std(group.mu.values)
mu[0][idx] = mu_mean
mu[1][idx] = mu_std
nu_mean = np.mean(group.nu.values)
nu_std = np.std(group.nu.values)
nu[0][idx] = nu_mean
nu[1][idx] = nu_std
chi2_mean = np.mean(group.chi2.values)
chi2_std = np.std(group.chi2.values)
chi2[0][idx] = chi2_mean
chi2[1][idx] = chi2_std
zumandelbaum_param_vals = [10.5, 13.76, 0.69, 0.15]
grp_keys = list(grp_keys)
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mstar_q[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax2.plot(grp_keys, Mhalo_q[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax3.plot(grp_keys, mu[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax4.plot(grp_keys, nu[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax1.fill_between(grp_keys, Mstar_q[0]-Mstar_q[1], Mstar_q[0]+Mstar_q[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mhalo_q[0]-Mhalo_q[1], Mhalo_q[0]+Mhalo_q[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu[0]-mu[1], mu[0]+mu[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, nu[0]-nu[1], nu[0]+nu[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{*}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{h}}$")
ax3.set_ylabel(r"$\boldsymbol{\mu}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\nu}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(0.15, (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
elif quenching == 'halo':
Mh_qc = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
Mh_qs = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu_c = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu_s = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
chi2 = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
for idx,key in enumerate(grp_keys):
group = grps.get_group(key)
Mh_qc_mean = np.mean(group.Mh_qc.values)
Mh_qc_std = np.std(group.Mh_qc.values)
Mh_qc[0][idx] = Mh_qc_mean
Mh_qc[1][idx] = Mh_qc_std
Mh_qs_mean = np.mean(group.Mh_qs.values)
Mh_qs_std = np.std(group.Mh_qs.values)
Mh_qs[0][idx] = Mh_qs_mean
Mh_qs[1][idx] = Mh_qs_std
mu_c_mean = np.mean(group.mu_c.values)
mu_c_std = np.std(group.mu_c.values)
mu_c[0][idx] = mu_c_mean
mu_c[1][idx] = mu_c_std
mu_s_mean = np.mean(group.mu_s.values)
mu_s_std = np.std(group.mu_s.values)
mu_s[0][idx] = mu_s_mean
mu_s[1][idx] = mu_s_std
chi2_mean = np.mean(group.chi2.values)
chi2_std = np.std(group.chi2.values)
chi2[0][idx] = chi2_mean
chi2[1][idx] = chi2_std
zumandelbaum_param_vals = [12.2, 12.17, 0.38, 0.15]
grp_keys = list(grp_keys)
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mh_qc[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax2.plot(grp_keys, Mh_qs[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax3.plot(grp_keys, mu_c[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax4.plot(grp_keys, mu_s[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax1.fill_between(grp_keys, Mh_qc[0]-Mh_qc[1], Mh_qc[0]+Mh_qc[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mh_qs[0]-Mh_qs[1], Mh_qs[0]+Mh_qs[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu_c[0]-mu_c[1], mu_c[0]+mu_c[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, mu_s[0]-mu_s[1], mu_s[0]+mu_s[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ Mh_{qc}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ Mh_{qs}}$")
ax3.set_ylabel(r"$\boldsymbol{\ mu_{c}}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\ mu_{s}}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(0.15, (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
######################## Calculate acceptance fraction ########################
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_proc = dict_of_paths['proc_dir']
if mf_type == 'smf':
path_to_proc = path_to_proc + 'smhm_colour_run21/'
else:
path_to_proc = path_to_proc + 'bmhm_run3/'
chain_fname = path_to_proc + 'mcmc_{0}_colour_raw.txt'.format(survey)
if quenching == 'hybrid':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mstar_q','Mhalo_q','mu','nu'],
header=None)
emcee_table = emcee_table[emcee_table.Mstar_q.values != '#']
emcee_table.Mstar_q = emcee_table.Mstar_q.astype(np.float64)
emcee_table.Mhalo_q = emcee_table.Mhalo_q.astype(np.float64)
emcee_table.mu = emcee_table.mu.astype(np.float64)
emcee_table.nu = emcee_table.nu.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = emcee_table.values[idx+1][0]
row[3] = nu_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
# emcee_table.nu = np.log10(emcee_table.nu)
elif quenching == 'halo':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
emcee_table = emcee_table[emcee_table.Mh_qc.values != '#']
emcee_table.Mh_qc = emcee_table.Mh_qc.astype(np.float64)
emcee_table.Mh_qs = emcee_table.Mh_qs.astype(np.float64)
emcee_table.mu_c = emcee_table.mu_c.astype(np.float64)
emcee_table.mu_s = emcee_table.mu_s.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = emcee_table.values[idx+1][0]
row[3] = mu_s_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
num_unique_rows = emcee_table[['Mstar_q','Mhalo_q','mu','nu']].drop_duplicates().shape[0]
num_rows = len(emcee_table)
acceptance_fraction = num_unique_rows / num_rows
print("Acceptance fraction: {0}%".format(np.round(acceptance_fraction,2)*100))
# For behroozi chains
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_proc = dict_of_paths['proc_dir']
chain_fname = path_to_proc + 'smhm_run6/mcmc_{0}_raw.txt'.\
format(survey)
emcee_table = pd.read_csv(chain_fname,
names=['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',
'scatter'],header=None, delim_whitespace=True)
emcee_table = emcee_table[emcee_table.mhalo_c.values != '#']
emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64)
emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64)
emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[4] == True and np.isnan(row)[3] == False:
scatter_val = emcee_table.values[idx+1][0]
row[4] = scatter_val
emcee_table = emcee_table.dropna(axis='index', how='any').reset_index(drop=True)
num_unique_rows = emcee_table[['mhalo_c','mstellar_c','lowmass_slope',\
'highmass_slope']].drop_duplicates().shape[0]
num_rows = len(emcee_table)
acceptance_fraction = num_unique_rows / num_rows
print("Acceptance fraction: {0}%".format(np.round(acceptance_fraction,2)*100))
################################################################################
def hybrid_quenching_model(theta, gals_df, mock, randint=None):
"""
Apply hybrid quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mstar_q = theta[0] # Msun/h
Mh_q = theta[1] # Msun/h
mu = theta[2]
nu = theta[3]
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df, \
mock)
cen_stellar_mass_arr, sat_stellar_mass_arr = get_stellar_mock(gals_df, mock, \
randint)
f_red_cen = 1 - np.exp(-((cen_stellar_mass_arr/(10**Mstar_q))**mu))
g_Mstar = np.exp(-((sat_stellar_mass_arr/(10**Mstar_q))**mu))
h_Mh = np.exp(-((sat_hosthalo_mass_arr/(10**Mh_q))**nu))
f_red_sat = 1 - (g_Mstar * h_Mh)
return f_red_cen, f_red_sat
def assign_colour_label_mock(f_red_cen, f_red_sat, gals_df, drop_fred=False):
"""
Assign colour label to mock catalog
Parameters
----------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
gals_df: pandas Dataframe
Mock catalog
drop_fred: boolean
Whether or not to keep red fraction column after colour has been
assigned
Returns
---------
df: pandas Dataframe
Dataframe with colour label and random number assigned as
new columns
"""
# Copy of dataframe
df = gals_df.copy()
# Saving labels
color_label_arr = [[] for x in range(len(df))]
rng_arr = [[] for x in range(len(df))]
# Adding columns for f_red to df
df.loc[:, 'f_red'] = np.zeros(len(df))
df.loc[df['cs_flag'] == 1, 'f_red'] = f_red_cen
df.loc[df['cs_flag'] == 0, 'f_red'] = f_red_sat
# Converting to array
f_red_arr = df['f_red'].values
# Looping over galaxies
for ii, cs_ii in enumerate(df['cs_flag']):
# Draw a random number
rng = np.random.uniform()
# Comparing against f_red
if (rng >= f_red_arr[ii]):
color_label = 'B'
else:
color_label = 'R'
# Saving to list
color_label_arr[ii] = color_label
rng_arr[ii] = rng
## Assigning to DataFrame
df.loc[:, 'colour_label'] = color_label_arr
df.loc[:, 'rng'] = rng_arr
# Dropping 'f_red` column
if drop_fred:
df.drop('f_red', axis=1, inplace=True)
return df
def get_host_halo_mock(gals_df, mock):
"""
Get host halo mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_halos: array
Array of central host halo masses
sat_halos: array
Array of satellite host halo masses
"""
df = gals_df.copy()
# groups = df.groupby('halo_id')
# keys = groups.groups.keys()
# for key in keys:
# group = groups.get_group(key)
# for index, value in enumerate(group.cs_flag):
# if value == 1:
# cen_halos.append(group.loghalom.values[index])
# else:
# sat_halos.append(group.loghalom.values[index])
if mock == 'vishnu':
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(df.halo_mvir.values[index])
else:
sat_halos.append(df.halo_mvir.values[index])
else:
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(df.loghalom.values[index])
else:
sat_halos.append(df.loghalom.values[index])
cen_halos = np.array(cen_halos)
sat_halos = np.array(sat_halos)
return cen_halos, sat_halos
def get_stellar_mock(gals_df, mock, randint=None):
"""
Get stellar mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_gals: array
Array of central stellar masses
sat_gals: array
Array of satellite stellar masses
"""
df = gals_df.copy()
if mock == 'vishnu':
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append(df['{0}'.format(randint)].values[idx])
elif value == 0:
sat_gals.append(df['{0}'.format(randint)].values[idx])
else:
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append(df.logmstar.values[idx])
elif value == 0:
sat_gals.append(df.logmstar.values[idx])
cen_gals = np.array(cen_gals)
sat_gals = np.array(sat_gals)
return cen_gals, sat_gals
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco' and colour_flag == 'R':
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 6
elif survey == 'eco' and colour_flag == 'B':
bin_max = np.round(np.log10((10**11) / 2.041), 1)
bin_num = 6
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
else:
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
bins = np.linspace(bin_min, bin_max, bin_num)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
def measure_all_smf(table, volume, data_bool, randint_logmstar=None):
"""
Calculates differential stellar mass function for all, red and blue galaxies
from mock/data
Parameters
----------
table: pandas Dataframe
Dataframe of either mock or data
volume: float
Volume of simulation/survey
cvar: float
Cosmic variance error
data_bool: Boolean
Data or mock
Returns
---------
3 multidimensional arrays of stellar mass, phi, total error in SMF and
counts per bin for all, red and blue galaxies
"""
colour_col = 'colour_label'
if data_bool:
logmstar_col = 'logmstar'
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, False, 'B')
else:
# logmstar_col = 'stellar_mass'
logmstar_col = '{0}'.format(randint_logmstar)
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, True)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, True, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, True, 'B')
return [max_total, phi_total, err_total, counts_total] , \
[max_red, phi_red, err_red, counts_red] , \
[max_blue, phi_blue, err_blue, counts_blue]
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
def read_data_catl(path_to_file, survey):
"""
Reads survey catalog from file
Parameters
----------
path_to_file: `string`
Path to survey catalog file
survey: `string`
Name of survey
Returns
---------
catl: `pandas.DataFrame`
Survey catalog with grpcz, abs rmag and stellar mass limits
volume: `float`
Volume of survey
z_median: `float`
Median redshift of survey
"""
if survey == 'eco':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s',
'fc', 'grpmb', 'grpms','modelu_rcorr']
# 13878 galaxies
eco_buff = pd.read_csv(path_to_file,delimiter=",", header=0, \
usecols=columns)
if mf_type == 'smf':
# 6456 galaxies
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.125
z_median = np.median(catl.grpcz.values) / (3 * 10**5)
elif survey == 'resolvea' or survey == 'resolveb':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh',
'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']
# 2286 galaxies
resolve_live18 = pd.read_csv(path_to_file, delimiter=",", header=0, \
usecols=columns)
if survey == 'resolvea':
if mf_type == 'smf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.30
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
elif survey == 'resolveb':
if mf_type == 'smf':
# 487 - cz, 369 - grpcz
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3
# cvar = 0.58
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
return catl, volume, z_median
def std_func(bins, mass_arr, vel_arr):
## Calculate std from mean=0
last_index = len(bins)-1
i = 0
std_arr = []
for index1, bin_edge in enumerate(bins):
if index1 == last_index:
break
cen_deltav_arr = []
for index2, stellar_mass in enumerate(mass_arr):
if stellar_mass >= bin_edge and stellar_mass < bins[index1+1]:
cen_deltav_arr.append(vel_arr[index2])
N = len(cen_deltav_arr)
mean = 0
diff_sqrd_arr = []
for value in cen_deltav_arr:
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
std_arr.append(std)
return std_arr
def get_deltav_sigma_vishnu_qmcolour(gals_df, randint):
"""
Calculate spread in velocity dispersion from Vishnu mock (logmstar already
in h=1)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
mock_pd = gals_df.copy()
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
logmstar_col = '{0}'.format(randint)
g_galtype_col = 'g_galtype_{0}'.format(randint)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer except no M_r cut since vishnu mock has no M_r info
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd[logmstar_col].values >= (10**mstar_limit/2.041))]
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd[g_galtype_col] == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd[g_galtype_col] == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group['{0}'.format(randint)].loc[group[g_galtype_col].\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
red_cen_stellar_mass_arr = np.log10(red_cen_stellar_mass_arr)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group['{0}'.format(randint)].loc[group[g_galtype_col]\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
blue_cen_stellar_mass_arr = np.log10(blue_cen_stellar_mass_arr)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, std_blue, centers_red, centers_blue
def get_deltav_sigma_mocks_qmcolour(survey, path):
"""
Calculate spread in velocity dispersion from survey mocks (logmstar converted
to h=1 units before analysis)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
std_red_arr = []
centers_red_arr = []
std_blue_arr = []
centers_blue_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
Mstar_q = 10.5 # Msun/h
Mh_q = 13.76 # Msun/h
mu = 0.69
nu = 0.15
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
std_red_arr.append(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
std_blue_arr.append(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red_arr.append(centers_red)
centers_blue_arr.append(centers_blue)
std_red_arr = np.array(std_red_arr)
centers_red_arr = np.array(centers_red_arr)
std_blue_arr = np.array(std_blue_arr)
centers_blue_arr = np.array(centers_blue_arr)
return std_red_arr, std_blue_arr, centers_red_arr, centers_blue_arr
def get_deltav_sigma_data(df):
"""
Measure spread in velocity dispersion separately for red and blue galaxies
by binning up central stellar mass (changes logmstar units from h=0.7 to h=1)
Parameters
----------
df: pandas Dataframe
Data catalog
Returns
---------
std_red: numpy array
Spread in velocity dispersion of red galaxies
centers_red: numpy array
Bin centers of central stellar mass for red galaxies
std_blue: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue: numpy array
Bin centers of central stellar mass for blue galaxies
"""
catl = df.copy()
if survey == 'eco' or survey == 'resolvea':
catl = catl.loc[catl.logmstar >= 8.9]
elif survey == 'resolveb':
catl = catl.loc[catl.logmstar >= 8.7]
catl.logmstar = np.log10((10**catl.logmstar) / 2.041)
red_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'R') & (catl.fc == 1)].values)
blue_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'B') & (catl.fc == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups with a
# red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups with a
# blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, blue_cen_stellar_mass_arr,
blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, centers_red, std_blue, centers_blue
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def get_err_data(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
# logmstar_red_max_arr = []
# logmstar_blue_max_arr = []
# colour_err_arr = []
# colour_corr_mat_inv = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
Mstar_q = 10.5 # Msun/h
Mh_q = 13.76 # Msun/h
mu = 0.69
nu = 0.15
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
# logmstar_red_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'R'].max()
# logmstar_red_max_arr.append(logmstar_red_max)
# logmstar_blue_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'B'].max()
# logmstar_blue_max_arr.append(logmstar_blue_max)
logmstar_arr = mock_pd.logmstar.values
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, False, 'B')
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
# phi_arr_colour = np.append(phi_arr_red, phi_arr_blue, axis = 0)
# Covariance matrix for total phi (all galaxies)
# cov_mat = np.cov(phi_arr_total, rowvar=False) # default norm is N-1
# err_total = np.sqrt(cov_mat.diagonal())
# cov_mat_red = np.cov(phi_arr_red, rowvar=False) # default norm is N-1
# err_red = np.sqrt(cov_mat_red.diagonal())
# colour_err_arr.append(err_red)
# cov_mat_blue = np.cov(phi_arr_blue, rowvar=False) # default norm is N-1
# err_blue = np.sqrt(cov_mat_blue.diagonal())
# colour_err_arr.append(err_blue)
# corr_mat_red = cov_mat_red / np.outer(err_red , err_red)
# corr_mat_inv_red = np.linalg.inv(corr_mat_red)
# colour_corr_mat_inv.append(corr_mat_inv_red)
# corr_mat_blue = cov_mat_blue / np.outer(err_blue , err_blue)
# corr_mat_inv_blue = np.linalg.inv(corr_mat_blue)
# colour_corr_mat_inv.append(corr_mat_inv_blue)
deltav_sig_red, deltav_sig_blue, deltav_sig_cen_red, deltav_sig_cen_blue = \
get_deltav_sigma_mocks_qmcolour(survey, path)
phi_red_0 = phi_arr_red[:,0]
phi_red_1 = phi_arr_red[:,1]
phi_red_2 = phi_arr_red[:,2]
phi_red_3 = phi_arr_red[:,3]
phi_red_4 = phi_arr_red[:,4]
phi_blue_0 = phi_arr_blue[:,0]
phi_blue_1 = phi_arr_blue[:,1]
phi_blue_2 = phi_arr_blue[:,2]
phi_blue_3 = phi_arr_blue[:,3]
phi_blue_4 = phi_arr_blue[:,4]
dv_red_0 = deltav_sig_red[:,0]
dv_red_1 = deltav_sig_red[:,1]
dv_red_2 = deltav_sig_red[:,2]
dv_red_3 = deltav_sig_red[:,3]
dv_red_4 = deltav_sig_red[:,4]
dv_blue_0 = deltav_sig_blue[:,0]
dv_blue_1 = deltav_sig_blue[:,1]
dv_blue_2 = deltav_sig_blue[:,2]
dv_blue_3 = deltav_sig_blue[:,3]
dv_blue_4 = deltav_sig_blue[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4})
# Correlation matrix of phi and deltav colour measurements combined
corr_mat_colour = combined_df.corr()
corr_mat_inv_colour = np.linalg.inv(corr_mat_colour.values)
err_colour = np.sqrt(np.diag(combined_df.cov()))
# deltav_sig_colour = np.append(deltav_sig_red, deltav_sig_blue, axis = 0)
# cov_mat_colour = np.cov(phi_arr_colour,deltav_sig_colour, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
# cov_mat_colour = np.cov(phi_arr_red,phi_arr_blue, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
return err_colour, corr_mat_inv_colour
def debug_within_outside_1sig(emcee_table, grp_keys, Mstar_q, Mhalo_q, mu, nu, chi2):
zumandelbaum_param_vals = [10.5, 13.76, 0.69, 0.15]
iteration = 600.0
emcee_table_it600 = emcee_table.loc[emcee_table.iteration_id == iteration]
chi2_std_it600 = np.std(emcee_table_it600.chi2)
chi2_mean_it600 = np.mean(emcee_table_it600.chi2)
# selecting value from within one sigma
df_within_sig = emcee_table_it600.loc[(emcee_table_it600.chi2 < chi2_mean_it600 + chi2_std_it600)&(emcee_table_it600.chi2 > chi2_mean_it600 - chi2_std_it600)]
chi2_within_sig = df_within_sig.chi2.values[3]
mstar_within_sig = df_within_sig.Mstar_q.values[3]
mhalo_within_sig = df_within_sig.Mhalo_q.values[3]
mu_within_sig = df_within_sig.mu.values[3]
nu_within_sig = df_within_sig.nu.values[3]
# # selecting value from outside one sigma
df_outside_sig = emcee_table_it600.loc[emcee_table_it600.chi2 > chi2_mean_it600 + chi2_std_it600]
chi2_outside_sig = df_outside_sig.chi2.values[3]
mstar_outside_sig = df_outside_sig.Mstar_q.values[3]
mhalo_outside_sig = df_outside_sig.Mhalo_q.values[3]
mu_outside_sig = df_outside_sig.mu.values[3]
nu_outside_sig = df_outside_sig.nu.values[3]
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mstar_q[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax1.scatter(iteration, mstar_outside_sig, marker='*', c='k', s=70)
ax1.scatter(iteration, mstar_within_sig, marker='o', c='k', s=70)
ax2.plot(grp_keys, Mhalo_q[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax2.scatter(iteration, mhalo_outside_sig, marker='*', c='k', s=70)
ax2.scatter(iteration, mhalo_within_sig, marker='o', c='k', s=70)
ax3.plot(grp_keys, mu[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax3.scatter(iteration, mu_outside_sig, marker='*', c='k', s=70)
ax3.scatter(iteration, mu_within_sig, marker='o', c='k', s=70)
ax4.plot(grp_keys, nu[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax4.scatter(iteration, nu_outside_sig, marker='*', c='k', s=70)
ax4.scatter(iteration, nu_within_sig, marker='o', c='k', s=70)
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax5.scatter(iteration, chi2_outside_sig, marker='*', c='k', s=70)
ax5.scatter(iteration, chi2_within_sig, marker='o', c='k', s=70)
ax1.fill_between(grp_keys, Mstar_q[0]-Mstar_q[1], Mstar_q[0]+Mstar_q[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mhalo_q[0]-Mhalo_q[1], Mhalo_q[0]+Mhalo_q[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu[0]-mu[1], mu[0]+mu[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, nu[0]-nu[1], nu[0]+nu[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{*}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{h}}$")
ax3.set_ylabel(r"$\boldsymbol{\mu}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\nu}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(zumandelbaum_param_vals[3], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
path_to_data = dict_of_paths['data_dir']
catl_file = path_to_raw + "eco/eco_all.csv"
path_to_mocks = path_to_data + 'mocks/m200b/eco/'
randint_logmstar_file = pd.read_csv("/Users/asadm2/Desktop/randint_logmstar.txt",
header=None)
mock_num = randint_logmstar_file[0].values[int(iteration)-1]
gals_df_ = reading_catls(path_to_proc + "gal_group.hdf5")
theta_within = [mstar_within_sig, mhalo_within_sig, mu_within_sig, nu_within_sig]
f_red_cen, f_red_sat = hybrid_quenching_model(theta_within, gals_df_, 'vishnu', \
mock_num)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, gals_df_)
v_sim = 130**3
total_model, red_model, blue_model = measure_all_smf(gals_df, v_sim
, False, mock_num)
sig_red_within, sig_blue_within, cen_red_within, cen_blue_within = \
get_deltav_sigma_vishnu_qmcolour(gals_df, mock_num)
total_model_within, red_model_within, blue_model_within = total_model, \
red_model, blue_model
theta_outside = [mstar_outside_sig, mhalo_outside_sig, mu_outside_sig, \
nu_outside_sig]
f_red_cen, f_red_sat = hybrid_quenching_model(theta_outside, gals_df_, 'vishnu', \
mock_num)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, gals_df_)
v_sim = 130**3
total_model, red_model, blue_model = measure_all_smf(gals_df, v_sim
, False, mock_num)
sig_red_outside, sig_blue_outside, cen_red_outside, cen_blue_outside = \
get_deltav_sigma_vishnu_qmcolour(gals_df, mock_num)
total_model_outside, red_model_outside, blue_model_outside = total_model, \
red_model, blue_model
catl, volume, z_median = read_data_catl(catl_file, survey)
catl = assign_colour_label_data(catl)
total_data, red_data, blue_data = measure_all_smf(catl, volume, True)
std_red, centers_red, std_blue, centers_blue = get_deltav_sigma_data(catl)
sigma, corr_mat_inv = get_err_data(survey, path_to_mocks)
plt.clf()
plt.plot(total_model_within[0], total_model_within[1], c='k', linestyle='-', \
label='total within 1sig')
plt.plot(total_model_outside[0], total_model_outside[1], c='k', linestyle='--',\
label='total outside 1sig')
plt.plot(red_model_within[0], red_model_within[1], color='maroon',
linestyle='--', label='within 1sig')
plt.plot(blue_model_within[0], blue_model_within[1], color='mediumblue',
linestyle='--', label='within 1sig')
plt.plot(red_model_outside[0], red_model_outside[1], color='indianred',
linestyle='--', label='outside 1sig')
plt.plot(blue_model_outside[0], blue_model_outside[1], color='cornflowerblue',
linestyle='--', label='outside 1sig')
plt.errorbar(x=red_data[0], y=red_data[1], yerr=sigma[0:5], xerr=None,
color='r', label='data')
plt.errorbar(x=blue_data[0], y=blue_data[1], yerr=sigma[5:10], xerr=None,
color='b', label='data')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,\mathrm{h}^{3} \right]$', fontsize=20)
plt.legend(loc='best')
plt.title('ECO SMF')
plt.show()
plt.clf()
plt.plot(max_total, phi_total, c='k')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,\mathrm{h}^{3} \right]$', fontsize=20)
plt.legend(loc='best')
plt.title('ECO SMF')
plt.show()
plt.clf()
plt.scatter(cen_red_within, sig_red_within, c='maroon', label='within 1sig')
plt.scatter(cen_red_outside, sig_red_outside, c='indianred', label='outside 1sig')
plt.scatter(cen_blue_within, sig_blue_within, c='mediumblue', label='within 1sig')
plt.scatter(cen_blue_outside, sig_blue_outside, c='cornflowerblue', \
label='outside 1sig')
plt.errorbar(x=centers_red, y=std_red, yerr=sigma[10:15], xerr=None, color='r',\
label='data', fmt='')
plt.errorbar(x=centers_blue, y=std_blue, yerr=sigma[15:20], xerr=None, \
color='b', label='data', fmt='')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'$\sigma$')
plt.legend(loc='best')
plt.title(r'ECO spread in $\delta v$')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.rc",
"numpy.abs",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"numpy.isnan",
"numpy.histogram",
"numpy.mean",
"numpy.exp",
"cosmo_utils.utils.work_paths.cookiecutter_paths",
"numpy.round",
"numpy.unique",
"pandas.DataFrame",
"pandas.read_hdf",
"numpy.std",
"os.path.exists",
"numpy.linspace",
"numpy.log10",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"numpy.median",
"numpy.asarray",
"matplotlib.pyplot.legend",
"numpy.linalg.inv",
"matplotlib.pyplot.ylabel",
"numpy.random.uniform",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.array",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((344, 420), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': '(20)'}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)\n", (346, 420), False, 'from matplotlib import rc\n'), ((416, 439), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (418, 439), False, 'from matplotlib import rc\n'), ((509, 532), 'matplotlib.rc', 'rc', (['"""axes"""'], {'linewidth': '(2)'}), "('axes', linewidth=2)\n", (511, 532), False, 'from matplotlib import rc\n'), ((533, 567), 'matplotlib.rc', 'rc', (['"""xtick.major"""'], {'width': '(2)', 'size': '(7)'}), "('xtick.major', width=2, size=7)\n", (535, 567), False, 'from matplotlib import rc\n'), ((568, 602), 'matplotlib.rc', 'rc', (['"""ytick.major"""'], {'width': '(2)', 'size': '(7)'}), "('ytick.major', width=2, size=7)\n", (570, 602), False, 'from matplotlib import rc\n'), ((1002, 1030), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (1028, 1030), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((3117, 3175), 'pandas.read_csv', 'pd.read_csv', (['chi2_fname'], {'header': 'None', 'names': "['chisquared']"}), "(chi2_fname, header=None, names=['chisquared'])\n", (3128, 3175), True, 'import pandas as pd\n'), ((3181, 3216), 'numpy.log10', 'np.log10', (['chi2_df.chisquared.values'], {}), '(chi2_df.chisquared.values)\n', (3189, 3216), True, 'import numpy as np\n'), ((3855, 3901), 'pandas.DataFrame', 'pd.DataFrame', (['id_data'], {'index': 'emcee_table.index'}), '(id_data, index=emcee_table.index)\n', (3867, 3901), True, 'import pandas as pd\n'), ((11390, 11418), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (11416, 11418), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((13520, 13548), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (13546, 13548), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((13684, 13827), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'names': "['mhalo_c', 'mstellar_c', 'lowmass_slope', 'highmass_slope', 'scatter']", 'header': 'None', 'delim_whitespace': '(True)'}), "(chain_fname, names=['mhalo_c', 'mstellar_c', 'lowmass_slope',\n 'highmass_slope', 'scatter'], header=None, delim_whitespace=True)\n", (13695, 13827), True, 'import pandas as pd\n'), ((899, 916), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (909, 916), True, 'import numpy as np\n'), ((1512, 1618), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mstar_q', 'Mhalo_q', 'mu', 'nu']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mstar_q', 'Mhalo_q',\n 'mu', 'nu'], header=None)\n", (1523, 1618), True, 'import pandas as pd\n'), ((5359, 5408), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(5, 1, sharex=True, figsize=(10, 10))\n', (5371, 5408), True, 'import matplotlib.pyplot as plt\n'), ((7595, 7639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathbf{iteration\\\\ number}$"""'], {}), "('$\\\\mathbf{iteration\\\\ number}$')\n", (7605, 7639), True, 'import matplotlib.pyplot as plt\n'), ((7643, 7653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7651, 7653), True, 'import matplotlib.pyplot as plt\n'), ((11706, 11812), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mstar_q', 'Mhalo_q', 'mu', 'nu']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mstar_q', 'Mhalo_q',\n 'mu', 'nu'], header=None)\n", (11717, 11812), True, 'import pandas as pd\n'), ((15560, 15613), 'numpy.exp', 'np.exp', (['(-(sat_stellar_mass_arr / 10 ** Mstar_q) ** mu)'], {}), '(-(sat_stellar_mass_arr / 10 ** Mstar_q) ** mu)\n', (15566, 15613), True, 'import numpy as np\n'), ((15623, 15674), 'numpy.exp', 'np.exp', (['(-(sat_hosthalo_mass_arr / 10 ** Mh_q) ** nu)'], {}), '(-(sat_hosthalo_mass_arr / 10 ** Mh_q) ** nu)\n', (15629, 15674), True, 'import numpy as np\n'), ((18631, 18650), 'numpy.array', 'np.array', (['cen_halos'], {}), '(cen_halos)\n', (18639, 18650), True, 'import numpy as np\n'), ((18667, 18686), 'numpy.array', 'np.array', (['sat_halos'], {}), '(sat_halos)\n', (18675, 18686), True, 'import numpy as np\n'), ((19687, 19705), 'numpy.array', 'np.array', (['cen_gals'], {}), '(cen_gals)\n', (19695, 19705), True, 'import numpy as np\n'), ((19721, 19739), 'numpy.array', 'np.array', (['sat_gals'], {}), '(sat_gals)\n', (19729, 19739), True, 'import numpy as np\n'), ((21577, 21614), 'numpy.histogram', 'np.histogram', (['logmstar_arr'], {'bins': 'bins'}), '(logmstar_arr, bins=bins)\n', (21589, 21614), True, 'import numpy as np\n'), ((21923, 21936), 'numpy.log10', 'np.log10', (['phi'], {}), '(phi)\n', (21931, 21936), True, 'import numpy as np\n'), ((31210, 31315), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd[g_galtype_col] ==\n 1)].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd[\n g_galtype_col] == 1)].values)\n", (31219, 31315), True, 'import numpy as np\n'), ((31348, 31453), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd[g_galtype_col] ==\n 1)].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd[\n g_galtype_col] == 1)].values)\n", (31357, 31453), True, 'import numpy as np\n'), ((32142, 32176), 'numpy.log10', 'np.log10', (['red_cen_stellar_mass_arr'], {}), '(red_cen_stellar_mass_arr)\n', (32150, 32176), True, 'import numpy as np\n'), ((32544, 32561), 'numpy.array', 'np.array', (['std_red'], {}), '(std_red)\n', (32552, 32561), True, 'import numpy as np\n'), ((33254, 33289), 'numpy.log10', 'np.log10', (['blue_cen_stellar_mass_arr'], {}), '(blue_cen_stellar_mass_arr)\n', (33262, 33289), True, 'import numpy as np\n'), ((33669, 33687), 'numpy.array', 'np.array', (['std_blue'], {}), '(std_blue)\n', (33677, 33687), True, 'import numpy as np\n'), ((35468, 35494), 'numpy.linspace', 'np.linspace', (['(5001)', '(5008)', '(8)'], {}), '(5001, 5008, 8)\n', (35479, 35494), True, 'import numpy as np\n'), ((39686, 39707), 'numpy.array', 'np.array', (['std_red_arr'], {}), '(std_red_arr)\n', (39694, 39707), True, 'import numpy as np\n'), ((39730, 39755), 'numpy.array', 'np.array', (['centers_red_arr'], {}), '(centers_red_arr)\n', (39738, 39755), True, 'import numpy as np\n'), ((39775, 39797), 'numpy.array', 'np.array', (['std_blue_arr'], {}), '(std_blue_arr)\n', (39783, 39797), True, 'import numpy as np\n'), ((39821, 39847), 'numpy.array', 'np.array', (['centers_blue_arr'], {}), '(centers_blue_arr)\n', (39829, 39847), True, 'import numpy as np\n'), ((40802, 40839), 'numpy.log10', 'np.log10', (['(10 ** catl.logmstar / 2.041)'], {}), '(10 ** catl.logmstar / 2.041)\n', (40810, 40839), True, 'import numpy as np\n'), ((40868, 40943), 'numpy.unique', 'np.unique', (["catl.grp.loc[(catl.colour_label == 'R') & (catl.fc == 1)].values"], {}), "(catl.grp.loc[(catl.colour_label == 'R') & (catl.fc == 1)].values)\n", (40877, 40943), True, 'import numpy as np\n'), ((40981, 41056), 'numpy.unique', 'np.unique', (["catl.grp.loc[(catl.colour_label == 'B') & (catl.fc == 1)].values"], {}), "(catl.grp.loc[(catl.colour_label == 'B') & (catl.fc == 1)].values)\n", (40990, 41056), True, 'import numpy as np\n'), ((42009, 42026), 'numpy.array', 'np.array', (['std_red'], {}), '(std_red)\n', (42017, 42026), True, 'import numpy as np\n'), ((42985, 43003), 'numpy.array', 'np.array', (['std_blue'], {}), '(std_blue)\n', (42993, 43003), True, 'import numpy as np\n'), ((47334, 47360), 'numpy.linspace', 'np.linspace', (['(5001)', '(5008)', '(8)'], {}), '(5001, 5008, 8)\n', (47345, 47360), True, 'import numpy as np\n'), ((49379, 49402), 'numpy.array', 'np.array', (['phi_arr_total'], {}), '(phi_arr_total)\n', (49387, 49402), True, 'import numpy as np\n'), ((49421, 49442), 'numpy.array', 'np.array', (['phi_arr_red'], {}), '(phi_arr_red)\n', (49429, 49442), True, 'import numpy as np\n'), ((49462, 49484), 'numpy.array', 'np.array', (['phi_arr_blue'], {}), '(phi_arr_blue)\n', (49470, 49484), True, 'import numpy as np\n'), ((51258, 51780), 'pandas.DataFrame', 'pd.DataFrame', (["{'phi_red_0': phi_red_0, 'phi_red_1': phi_red_1, 'phi_red_2': phi_red_2,\n 'phi_red_3': phi_red_3, 'phi_red_4': phi_red_4, 'phi_blue_0':\n phi_blue_0, 'phi_blue_1': phi_blue_1, 'phi_blue_2': phi_blue_2,\n 'phi_blue_3': phi_blue_3, 'phi_blue_4': phi_blue_4, 'dv_red_0':\n dv_red_0, 'dv_red_1': dv_red_1, 'dv_red_2': dv_red_2, 'dv_red_3':\n dv_red_3, 'dv_red_4': dv_red_4, 'dv_blue_0': dv_blue_0, 'dv_blue_1':\n dv_blue_1, 'dv_blue_2': dv_blue_2, 'dv_blue_3': dv_blue_3, 'dv_blue_4':\n dv_blue_4}"], {}), "({'phi_red_0': phi_red_0, 'phi_red_1': phi_red_1, 'phi_red_2':\n phi_red_2, 'phi_red_3': phi_red_3, 'phi_red_4': phi_red_4, 'phi_blue_0':\n phi_blue_0, 'phi_blue_1': phi_blue_1, 'phi_blue_2': phi_blue_2,\n 'phi_blue_3': phi_blue_3, 'phi_blue_4': phi_blue_4, 'dv_red_0':\n dv_red_0, 'dv_red_1': dv_red_1, 'dv_red_2': dv_red_2, 'dv_red_3':\n dv_red_3, 'dv_red_4': dv_red_4, 'dv_blue_0': dv_blue_0, 'dv_blue_1':\n dv_blue_1, 'dv_blue_2': dv_blue_2, 'dv_blue_3': dv_blue_3, 'dv_blue_4':\n dv_blue_4})\n", (51270, 51780), True, 'import pandas as pd\n'), ((51950, 51987), 'numpy.linalg.inv', 'np.linalg.inv', (['corr_mat_colour.values'], {}), '(corr_mat_colour.values)\n', (51963, 51987), True, 'import numpy as np\n'), ((53076, 53106), 'numpy.std', 'np.std', (['emcee_table_it600.chi2'], {}), '(emcee_table_it600.chi2)\n', (53082, 53106), True, 'import numpy as np\n'), ((53129, 53160), 'numpy.mean', 'np.mean', (['emcee_table_it600.chi2'], {}), '(emcee_table_it600.chi2)\n', (53136, 53160), True, 'import numpy as np\n'), ((54138, 54187), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(5, 1, sharex=True, figsize=(10, 10))\n', (54150, 54187), True, 'import matplotlib.pyplot as plt\n'), ((57088, 57132), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathbf{iteration\\\\ number}$"""'], {}), "('$\\\\mathbf{iteration\\\\ number}$')\n", (57098, 57132), True, 'import matplotlib.pyplot as plt\n'), ((57136, 57146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57144, 57146), True, 'import matplotlib.pyplot as plt\n'), ((57168, 57196), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (57194, 57196), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((57556, 57626), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/asadm2/Desktop/randint_logmstar.txt"""'], {'header': 'None'}), "('/Users/asadm2/Desktop/randint_logmstar.txt', header=None)\n", (57567, 57626), True, 'import pandas as pd\n'), ((59352, 59361), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (59359, 59361), True, 'import matplotlib.pyplot as plt\n'), ((59366, 59473), 'matplotlib.pyplot.plot', 'plt.plot', (['total_model_within[0]', 'total_model_within[1]'], {'c': '"""k"""', 'linestyle': '"""-"""', 'label': '"""total within 1sig"""'}), "(total_model_within[0], total_model_within[1], c='k', linestyle='-',\n label='total within 1sig')\n", (59374, 59473), True, 'import matplotlib.pyplot as plt\n'), ((59484, 59596), 'matplotlib.pyplot.plot', 'plt.plot', (['total_model_outside[0]', 'total_model_outside[1]'], {'c': '"""k"""', 'linestyle': '"""--"""', 'label': '"""total outside 1sig"""'}), "(total_model_outside[0], total_model_outside[1], c='k', linestyle=\n '--', label='total outside 1sig')\n", (59492, 59596), True, 'import matplotlib.pyplot as plt\n'), ((59605, 59712), 'matplotlib.pyplot.plot', 'plt.plot', (['red_model_within[0]', 'red_model_within[1]'], {'color': '"""maroon"""', 'linestyle': '"""--"""', 'label': '"""within 1sig"""'}), "(red_model_within[0], red_model_within[1], color='maroon',\n linestyle='--', label='within 1sig')\n", (59613, 59712), True, 'import matplotlib.pyplot as plt\n'), ((59722, 59835), 'matplotlib.pyplot.plot', 'plt.plot', (['blue_model_within[0]', 'blue_model_within[1]'], {'color': '"""mediumblue"""', 'linestyle': '"""--"""', 'label': '"""within 1sig"""'}), "(blue_model_within[0], blue_model_within[1], color='mediumblue',\n linestyle='--', label='within 1sig')\n", (59730, 59835), True, 'import matplotlib.pyplot as plt\n'), ((59845, 59958), 'matplotlib.pyplot.plot', 'plt.plot', (['red_model_outside[0]', 'red_model_outside[1]'], {'color': '"""indianred"""', 'linestyle': '"""--"""', 'label': '"""outside 1sig"""'}), "(red_model_outside[0], red_model_outside[1], color='indianred',\n linestyle='--', label='outside 1sig')\n", (59853, 59958), True, 'import matplotlib.pyplot as plt\n'), ((59968, 60089), 'matplotlib.pyplot.plot', 'plt.plot', (['blue_model_outside[0]', 'blue_model_outside[1]'], {'color': '"""cornflowerblue"""', 'linestyle': '"""--"""', 'label': '"""outside 1sig"""'}), "(blue_model_outside[0], blue_model_outside[1], color=\n 'cornflowerblue', linestyle='--', label='outside 1sig')\n", (59976, 60089), True, 'import matplotlib.pyplot as plt\n'), ((60098, 60197), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'red_data[0]', 'y': 'red_data[1]', 'yerr': 'sigma[0:5]', 'xerr': 'None', 'color': '"""r"""', 'label': '"""data"""'}), "(x=red_data[0], y=red_data[1], yerr=sigma[0:5], xerr=None,\n color='r', label='data')\n", (60110, 60197), True, 'import matplotlib.pyplot as plt\n'), ((60207, 60309), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'blue_data[0]', 'y': 'blue_data[1]', 'yerr': 'sigma[5:10]', 'xerr': 'None', 'color': '"""b"""', 'label': '"""data"""'}), "(x=blue_data[0], y=blue_data[1], yerr=sigma[5:10], xerr=None,\n color='b', label='data')\n", (60219, 60309), True, 'import matplotlib.pyplot as plt\n'), ((60319, 60444), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$'\n , fontsize=20)\n", (60329, 60444), True, 'import matplotlib.pyplot as plt\n'), ((60430, 60558), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$'\n , fontsize=20)\n", (60440, 60558), True, 'import matplotlib.pyplot as plt\n'), ((60545, 60567), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (60555, 60567), True, 'import matplotlib.pyplot as plt\n'), ((60572, 60592), 'matplotlib.pyplot.title', 'plt.title', (['"""ECO SMF"""'], {}), "('ECO SMF')\n", (60581, 60592), True, 'import matplotlib.pyplot as plt\n'), ((60597, 60607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60605, 60607), True, 'import matplotlib.pyplot as plt\n'), ((60613, 60622), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (60620, 60622), True, 'import matplotlib.pyplot as plt\n'), ((60627, 60664), 'matplotlib.pyplot.plot', 'plt.plot', (['max_total', 'phi_total'], {'c': '"""k"""'}), "(max_total, phi_total, c='k')\n", (60635, 60664), True, 'import matplotlib.pyplot as plt\n'), ((60669, 60794), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$'\n , fontsize=20)\n", (60679, 60794), True, 'import matplotlib.pyplot as plt\n'), ((60780, 60908), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$'\n , fontsize=20)\n", (60790, 60908), True, 'import matplotlib.pyplot as plt\n'), ((60895, 60917), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (60905, 60917), True, 'import matplotlib.pyplot as plt\n'), ((60922, 60942), 'matplotlib.pyplot.title', 'plt.title', (['"""ECO SMF"""'], {}), "('ECO SMF')\n", (60931, 60942), True, 'import matplotlib.pyplot as plt\n'), ((60947, 60957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60955, 60957), True, 'import matplotlib.pyplot as plt\n'), ((60963, 60972), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (60970, 60972), True, 'import matplotlib.pyplot as plt\n'), ((60977, 61053), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_red_within', 'sig_red_within'], {'c': '"""maroon"""', 'label': '"""within 1sig"""'}), "(cen_red_within, sig_red_within, c='maroon', label='within 1sig')\n", (60988, 61053), True, 'import matplotlib.pyplot as plt\n'), ((61058, 61145), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_red_outside', 'sig_red_outside'], {'c': '"""indianred"""', 'label': '"""outside 1sig"""'}), "(cen_red_outside, sig_red_outside, c='indianred', label=\n 'outside 1sig')\n", (61069, 61145), True, 'import matplotlib.pyplot as plt\n'), ((61145, 61232), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_blue_within', 'sig_blue_within'], {'c': '"""mediumblue"""', 'label': '"""within 1sig"""'}), "(cen_blue_within, sig_blue_within, c='mediumblue', label=\n 'within 1sig')\n", (61156, 61232), True, 'import matplotlib.pyplot as plt\n'), ((61232, 61326), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_blue_outside', 'sig_blue_outside'], {'c': '"""cornflowerblue"""', 'label': '"""outside 1sig"""'}), "(cen_blue_outside, sig_blue_outside, c='cornflowerblue', label=\n 'outside 1sig')\n", (61243, 61326), True, 'import matplotlib.pyplot as plt\n'), ((61336, 61442), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'centers_red', 'y': 'std_red', 'yerr': 'sigma[10:15]', 'xerr': 'None', 'color': '"""r"""', 'label': '"""data"""', 'fmt': '""""""'}), "(x=centers_red, y=std_red, yerr=sigma[10:15], xerr=None, color=\n 'r', label='data', fmt='')\n", (61348, 61442), True, 'import matplotlib.pyplot as plt\n'), ((61451, 61558), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'centers_blue', 'y': 'std_blue', 'yerr': 'sigma[15:20]', 'xerr': 'None', 'color': '"""b"""', 'label': '"""data"""', 'fmt': '""""""'}), "(x=centers_blue, y=std_blue, yerr=sigma[15:20], xerr=None,\n color='b', label='data', fmt='')\n", (61463, 61558), True, 'import matplotlib.pyplot as plt\n'), ((61569, 61694), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$'\n , fontsize=20)\n", (61579, 61694), True, 'import matplotlib.pyplot as plt\n'), ((61680, 61703), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (61690, 61703), True, 'import matplotlib.pyplot as plt\n'), ((61708, 61730), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (61718, 61730), True, 'import matplotlib.pyplot as plt\n'), ((61735, 61773), 'matplotlib.pyplot.title', 'plt.title', (['"""ECO spread in $\\\\delta v$"""'], {}), "('ECO spread in $\\\\delta v$')\n", (61744, 61773), True, 'import matplotlib.pyplot as plt\n'), ((61778, 61788), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61786, 61788), True, 'import matplotlib.pyplot as plt\n'), ((2322, 2428), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mh_qc', 'Mh_qs', 'mu_c', 'mu_s']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mh_qc', 'Mh_qs',\n 'mu_c', 'mu_s'], header=None)\n", (2333, 2428), True, 'import pandas as pd\n'), ((4451, 4480), 'numpy.mean', 'np.mean', (['group.Mstar_q.values'], {}), '(group.Mstar_q.values)\n', (4458, 4480), True, 'import numpy as np\n'), ((4503, 4531), 'numpy.std', 'np.std', (['group.Mstar_q.values'], {}), '(group.Mstar_q.values)\n', (4509, 4531), True, 'import numpy as np\n'), ((4632, 4661), 'numpy.mean', 'np.mean', (['group.Mhalo_q.values'], {}), '(group.Mhalo_q.values)\n', (4639, 4661), True, 'import numpy as np\n'), ((4684, 4712), 'numpy.std', 'np.std', (['group.Mhalo_q.values'], {}), '(group.Mhalo_q.values)\n', (4690, 4712), True, 'import numpy as np\n'), ((4808, 4832), 'numpy.mean', 'np.mean', (['group.mu.values'], {}), '(group.mu.values)\n', (4815, 4832), True, 'import numpy as np\n'), ((4850, 4873), 'numpy.std', 'np.std', (['group.mu.values'], {}), '(group.mu.values)\n', (4856, 4873), True, 'import numpy as np\n'), ((4949, 4973), 'numpy.mean', 'np.mean', (['group.nu.values'], {}), '(group.nu.values)\n', (4956, 4973), True, 'import numpy as np\n'), ((4991, 5014), 'numpy.std', 'np.std', (['group.nu.values'], {}), '(group.nu.values)\n', (4997, 5014), True, 'import numpy as np\n'), ((5092, 5118), 'numpy.mean', 'np.mean', (['group.chi2.values'], {}), '(group.chi2.values)\n', (5099, 5118), True, 'import numpy as np\n'), ((5138, 5163), 'numpy.std', 'np.std', (['group.chi2.values'], {}), '(group.chi2.values)\n', (5144, 5163), True, 'import numpy as np\n'), ((8991, 9040), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(5, 1, sharex=True, figsize=(10, 10))\n', (9003, 9040), True, 'import matplotlib.pyplot as plt\n'), ((11233, 11277), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathbf{iteration\\\\ number}$"""'], {}), "('$\\\\mathbf{iteration\\\\ number}$')\n", (11243, 11277), True, 'import matplotlib.pyplot as plt\n'), ((11281, 11291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11289, 11291), True, 'import matplotlib.pyplot as plt\n'), ((12516, 12622), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mh_qc', 'Mh_qs', 'mu_c', 'mu_s']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mh_qc', 'Mh_qs',\n 'mu_c', 'mu_s'], header=None)\n", (12527, 12622), True, 'import pandas as pd\n'), ((15493, 15546), 'numpy.exp', 'np.exp', (['(-(cen_stellar_mass_arr / 10 ** Mstar_q) ** mu)'], {}), '(-(cen_stellar_mass_arr / 10 ** Mstar_q) ** mu)\n', (15499, 15546), True, 'import numpy as np\n'), ((16862, 16881), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (16879, 16881), True, 'import numpy as np\n'), ((20509, 20542), 'numpy.log10', 'np.log10', (['(10 ** mstar_arr / 2.041)'], {}), '(10 ** mstar_arr / 2.041)\n', (20517, 20542), True, 'import numpy as np\n'), ((20576, 20595), 'numpy.log10', 'np.log10', (['mstar_arr'], {}), '(mstar_arr)\n', (20584, 20595), True, 'import numpy as np\n'), ((21279, 21317), 'numpy.linspace', 'np.linspace', (['bin_min', 'bin_max', 'bin_num'], {}), '(bin_min, bin_max, bin_num)\n', (21290, 21317), True, 'import numpy as np\n'), ((21801, 21816), 'numpy.sqrt', 'np.sqrt', (['counts'], {}), '(counts)\n', (21808, 21816), True, 'import numpy as np\n'), ((25760, 25827), 'pandas.read_csv', 'pd.read_csv', (['path_to_file'], {'delimiter': '""","""', 'header': '(0)', 'usecols': 'columns'}), "(path_to_file, delimiter=',', header=0, usecols=columns)\n", (25771, 25827), True, 'import pandas as pd\n'), ((29249, 29271), 'numpy.mean', 'np.mean', (['diff_sqrd_arr'], {}), '(diff_sqrd_arr)\n', (29256, 29271), True, 'import numpy as np\n'), ((29286, 29309), 'numpy.sqrt', 'np.sqrt', (['mean_diff_sqrd'], {}), '(mean_diff_sqrd)\n', (29293, 29309), True, 'import numpy as np\n'), ((32322, 32347), 'numpy.linspace', 'np.linspace', (['(8.6)', '(11.2)', '(6)'], {}), '(8.6, 11.2, 6)\n', (32333, 32347), True, 'import numpy as np\n'), ((33436, 33461), 'numpy.linspace', 'np.linspace', (['(8.6)', '(10.7)', '(6)'], {}), '(8.6, 10.7, 6)\n', (33447, 33461), True, 'import numpy as np\n'), ((41787, 41812), 'numpy.linspace', 'np.linspace', (['(8.6)', '(11.2)', '(6)'], {}), '(8.6, 11.2, 6)\n', (41798, 41812), True, 'import numpy as np\n'), ((42753, 42778), 'numpy.linspace', 'np.linspace', (['(8.6)', '(10.7)', '(6)'], {}), '(8.6, 10.7, 6)\n', (42764, 42778), True, 'import numpy as np\n'), ((45506, 45530), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (45520, 45530), False, 'import os\n'), ((45699, 45720), 'pandas.read_hdf', 'pd.read_hdf', (['filename'], {}), '(filename)\n', (45710, 45720), True, 'import pandas as pd\n'), ((929, 950), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (935, 950), True, 'import numpy as np\n'), ((8085, 8112), 'numpy.mean', 'np.mean', (['group.Mh_qc.values'], {}), '(group.Mh_qc.values)\n', (8092, 8112), True, 'import numpy as np\n'), ((8133, 8159), 'numpy.std', 'np.std', (['group.Mh_qc.values'], {}), '(group.Mh_qc.values)\n', (8139, 8159), True, 'import numpy as np\n'), ((8250, 8277), 'numpy.mean', 'np.mean', (['group.Mh_qs.values'], {}), '(group.Mh_qs.values)\n', (8257, 8277), True, 'import numpy as np\n'), ((8298, 8324), 'numpy.std', 'np.std', (['group.Mh_qs.values'], {}), '(group.Mh_qs.values)\n', (8304, 8324), True, 'import numpy as np\n'), ((8414, 8440), 'numpy.mean', 'np.mean', (['group.mu_c.values'], {}), '(group.mu_c.values)\n', (8421, 8440), True, 'import numpy as np\n'), ((8460, 8485), 'numpy.std', 'np.std', (['group.mu_c.values'], {}), '(group.mu_c.values)\n', (8466, 8485), True, 'import numpy as np\n'), ((8571, 8597), 'numpy.mean', 'np.mean', (['group.mu_s.values'], {}), '(group.mu_s.values)\n', (8578, 8597), True, 'import numpy as np\n'), ((8617, 8642), 'numpy.std', 'np.std', (['group.mu_s.values'], {}), '(group.mu_s.values)\n', (8623, 8642), True, 'import numpy as np\n'), ((8728, 8754), 'numpy.mean', 'np.mean', (['group.chi2.values'], {}), '(group.chi2.values)\n', (8735, 8754), True, 'import numpy as np\n'), ((8774, 8799), 'numpy.std', 'np.std', (['group.chi2.values'], {}), '(group.chi2.values)\n', (8780, 8799), True, 'import numpy as np\n'), ((13443, 13475), 'numpy.round', 'np.round', (['acceptance_fraction', '(2)'], {}), '(acceptance_fraction, 2)\n', (13451, 13475), True, 'import numpy as np\n'), ((14609, 14641), 'numpy.round', 'np.round', (['acceptance_fraction', '(2)'], {}), '(acceptance_fraction, 2)\n', (14617, 14641), True, 'import numpy as np\n'), ((20672, 20699), 'numpy.log10', 'np.log10', (['(10 ** 8.9 / 2.041)'], {}), '(10 ** 8.9 / 2.041)\n', (20680, 20699), True, 'import numpy as np\n'), ((21483, 21515), 'numpy.linspace', 'np.linspace', (['bin_min', 'bin_max', '(7)'], {}), '(bin_min, bin_max, 7)\n', (21494, 21515), True, 'import numpy as np\n'), ((26407, 26435), 'numpy.median', 'np.median', (['catl.grpcz.values'], {}), '(catl.grpcz.values)\n', (26416, 26435), True, 'import numpy as np\n'), ((26785, 26852), 'pandas.read_csv', 'pd.read_csv', (['path_to_file'], {'delimiter': '""","""', 'header': '(0)', 'usecols': 'columns'}), "(path_to_file, delimiter=',', header=0, usecols=columns)\n", (26796, 26852), True, 'import pandas as pd\n'), ((31851, 31875), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (31858, 31875), True, 'import numpy as np\n'), ((32409, 32434), 'numpy.linspace', 'np.linspace', (['(8.4)', '(11.0)', '(6)'], {}), '(8.4, 11.0, 6)\n', (32420, 32434), True, 'import numpy as np\n'), ((32959, 32983), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (32966, 32983), True, 'import numpy as np\n'), ((33524, 33549), 'numpy.linspace', 'np.linspace', (['(8.4)', '(10.4)', '(6)'], {}), '(8.4, 10.4, 6)\n', (33535, 33549), True, 'import numpy as np\n'), ((36507, 36547), 'numpy.log10', 'np.log10', (['(10 ** mock_pd.logmstar / 2.041)'], {}), '(10 ** mock_pd.logmstar / 2.041)\n', (36515, 36547), True, 'import numpy as np\n'), ((36580, 36680), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd.g_galtype == 1)\n ].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd.\n g_galtype == 1)].values)\n", (36589, 36680), True, 'import numpy as np\n'), ((36729, 36829), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd.g_galtype == 1)\n ].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd.\n g_galtype == 1)].values)\n", (36738, 36829), True, 'import numpy as np\n'), ((38019, 38036), 'numpy.array', 'np.array', (['std_red'], {}), '(std_red)\n', (38027, 38036), True, 'import numpy as np\n'), ((39273, 39291), 'numpy.array', 'np.array', (['std_blue'], {}), '(std_blue)\n', (39281, 39291), True, 'import numpy as np\n'), ((41425, 41449), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (41432, 41449), True, 'import numpy as np\n'), ((41874, 41899), 'numpy.linspace', 'np.linspace', (['(8.4)', '(11.0)', '(6)'], {}), '(8.4, 11.0, 6)\n', (41885, 41899), True, 'import numpy as np\n'), ((42388, 42412), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (42395, 42412), True, 'import numpy as np\n'), ((42841, 42866), 'numpy.linspace', 'np.linspace', (['(8.4)', '(10.4)', '(6)'], {}), '(8.4, 10.4, 6)\n', (42852, 42866), True, 'import numpy as np\n'), ((14146, 14159), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (14154, 14159), True, 'import numpy as np\n'), ((14175, 14188), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (14183, 14188), True, 'import numpy as np\n'), ((20786, 20814), 'numpy.log10', 'np.log10', (['(10 ** 11.5 / 2.041)'], {}), '(10 ** 11.5 / 2.041)\n', (20794, 20814), True, 'import numpy as np\n'), ((21376, 21403), 'numpy.log10', 'np.log10', (['(10 ** 8.7 / 2.041)'], {}), '(10 ** 8.7 / 2.041)\n', (21384, 21403), True, 'import numpy as np\n'), ((21435, 21463), 'numpy.log10', 'np.log10', (['(10 ** 11.8 / 2.041)'], {}), '(10 ** 11.8 / 2.041)\n', (21443, 21463), True, 'import numpy as np\n'), ((37757, 37782), 'numpy.linspace', 'np.linspace', (['(8.6)', '(11.2)', '(6)'], {}), '(8.6, 11.2, 6)\n', (37768, 37782), True, 'import numpy as np\n'), ((39000, 39025), 'numpy.linspace', 'np.linspace', (['(8.6)', '(10.7)', '(6)'], {}), '(8.6, 10.7, 6)\n', (39011, 39025), True, 'import numpy as np\n'), ((1998, 2011), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2006, 2011), True, 'import numpy as np\n'), ((2027, 2040), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2035, 2040), True, 'import numpy as np\n'), ((12192, 12205), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (12200, 12205), True, 'import numpy as np\n'), ((12221, 12234), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (12229, 12234), True, 'import numpy as np\n'), ((20927, 20953), 'numpy.log10', 'np.log10', (['(10 ** 11 / 2.041)'], {}), '(10 ** 11 / 2.041)\n', (20935, 20953), True, 'import numpy as np\n'), ((27618, 27656), 'numpy.median', 'np.median', (['resolve_live18.grpcz.values'], {}), '(resolve_live18.grpcz.values)\n', (27627, 27656), True, 'import numpy as np\n'), ((37288, 37312), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (37295, 37312), True, 'import numpy as np\n'), ((37860, 37885), 'numpy.linspace', 'np.linspace', (['(8.4)', '(11.0)', '(6)'], {}), '(8.4, 11.0, 6)\n', (37871, 37885), True, 'import numpy as np\n'), ((38527, 38551), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (38534, 38551), True, 'import numpy as np\n'), ((39104, 39129), 'numpy.linspace', 'np.linspace', (['(8.4)', '(10.4)', '(6)'], {}), '(8.4, 10.4, 6)\n', (39115, 39129), True, 'import numpy as np\n'), ((2806, 2819), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2814, 2819), True, 'import numpy as np\n'), ((2835, 2848), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2843, 2848), True, 'import numpy as np\n'), ((13000, 13013), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (13008, 13013), True, 'import numpy as np\n'), ((13029, 13042), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (13037, 13042), True, 'import numpy as np\n'), ((21105, 21133), 'numpy.log10', 'np.log10', (['(10 ** 11.5 / 2.041)'], {}), '(10 ** 11.5 / 2.041)\n', (21113, 21133), True, 'import numpy as np\n'), ((21207, 21235), 'numpy.log10', 'np.log10', (['(10 ** 11.5 / 2.041)'], {}), '(10 ** 11.5 / 2.041)\n', (21215, 21235), True, 'import numpy as np\n'), ((28474, 28512), 'numpy.median', 'np.median', (['resolve_live18.grpcz.values'], {}), '(resolve_live18.grpcz.values)\n', (28483, 28512), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def plot_tsp(parameters, rank):
rank = np.concatenate([rank, rank[0:1]], axis=0)
plt.figure()
plt.plot(parameters[:, 0], parameters[:, 1], 'ro', color='red')
plt.plot(parameters[:, 0][rank], parameters[:, 1][rank], 'r-', color='blue')
plt.show()
|
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.concatenate"
] |
[((96, 137), 'numpy.concatenate', 'np.concatenate', (['[rank, rank[0:1]]'], {'axis': '(0)'}), '([rank, rank[0:1]], axis=0)\n', (110, 137), True, 'import numpy as np\n'), ((143, 155), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (153, 155), True, 'import matplotlib.pyplot as plt\n'), ((160, 223), 'matplotlib.pyplot.plot', 'plt.plot', (['parameters[:, 0]', 'parameters[:, 1]', '"""ro"""'], {'color': '"""red"""'}), "(parameters[:, 0], parameters[:, 1], 'ro', color='red')\n", (168, 223), True, 'import matplotlib.pyplot as plt\n'), ((228, 304), 'matplotlib.pyplot.plot', 'plt.plot', (['parameters[:, 0][rank]', 'parameters[:, 1][rank]', '"""r-"""'], {'color': '"""blue"""'}), "(parameters[:, 0][rank], parameters[:, 1][rank], 'r-', color='blue')\n", (236, 304), True, 'import matplotlib.pyplot as plt\n'), ((309, 319), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (317, 319), True, 'import matplotlib.pyplot as plt\n')]
|
"""build_test_dataset.py -- The functions to build simulated data sets.
"""
import pickle
import numpy as np
from scipy import stats
# import matplotlib.pyplot as plt
# import corner
DATA_NAME = 'simple' # default
DATA_NAME = '3_gaus'
MB_HOST = 'indirect' # default
MB_HOST = 'step' # todo implement this
MB_HOST = 'direct'
np.random.seed(13048293)
N_SNE = 300
YOUNG_FRAC = 0.95
N_YOUNG = int(N_SNE*YOUNG_FRAC)
N_OLD = N_SNE - N_YOUNG
# TRUE VALUES
c_true = np.random.randn(N_SNE)*0.1
mass_young = np.random.randn(N_YOUNG) + 11 - np.random.exponential(0.5, N_YOUNG)
mass_old = np.random.randn(N_OLD)*0.75 + 11
mass_true = np.concatenate((mass_young, mass_old))
x1_true = np.random.randn(N_SNE)*((mass_true>10)*0.75 + (mass_true<=10)*0.9) + ((mass_true>10)*-0.5 + (mass_true<=10)*0.1)
age_young = (np.random.triangular(0.25, 0.5, 6, size=N_YOUNG)*(mass_young/4)
+ np.random.exponential(size=N_YOUNG)*x1_true[:N_YOUNG]/3)
age_old = np.random.randn(N_OLD)*0.75 + 10
age_true = np.append(age_young, age_old)
COFF = [-0.1, 3, 0.05/0.5, 0.05/2]
if MB_HOST == 'direct':
mb_true = COFF[0]*x1_true + COFF[1]*c_true + COFF[2]*mass_true + COFF[3]*age_true - 20
else:
mb_true = COFF[0]*x1_true + COFF[1]*c_true - 20
# corner.corner(np.array([x1_true, c_true, mass_true, age_true, mb_true]).T,
# labels=['x1', 'c', 'mass', 'age', 'M'])
# plt.show()
# OBSERVATIONAL
x1_obs = x1_true + np.random.randn(N_SNE)*0.3
c_obs = c_true + np.random.randn(N_SNE)*0.04
mass_obs = mass_true + np.random.randn(N_SNE)*0.5
# todo add obs systematic to ages
if DATA_NAME == '3_gaus':
AGE_STD = 0.2
# each should be shape (N_SNE, 3) for the 3_gaus model
# tile works if the input array is shape (N_SNE, 1)
age_gaus_mean = np.abs(np.tile(age_true.reshape(N_SNE, 1), 3) +
np.random.randn(N_SNE, 3)*AGE_STD*np.tile(age_true.reshape(N_SNE, 1), 3))
age_gaus_mean = np.expand_dims(age_gaus_mean, 0)
# only apply 1/3 of the uncertainty to each Gaussian
age_gaus_std = np.random.randn(N_SNE, 3)*(AGE_STD*np.tile(age_true.reshape(N_SNE, 1), 3))/3
age_gaus_std = np.expand_dims(age_gaus_std, 0)
# it just works, test it with .sum(axis=1).
age_gaus_A = np.random.dirichlet((1, 1, 1), (N_SNE))
age_gaus_A = np.expand_dims(age_gaus_A, 0)
else:
# defaults to simple model
AGE_STD = 0.2
age_obs = np.abs(age_true + np.random.randn(N_SNE)*AGE_STD*age_true)
age_gaus_std = [np.array([AGE_STD*np.abs(age_true)]).T]
age_gaus_A = np.ones((1, N_SNE, 1), dtype=np.float)
mb_obs = mb_true + np.random.randn(N_SNE)*0.15
# corner.corner(np.array([x1_obs, c_obs, mass_obs, age_obs, mb_obs]).T,
# labels=['x1', 'c', 'mass', 'age', 'M'], show_titles=True)
# plt.show()
# SAVE DATA
if DATA_NAME == '3_gaus':
n_age_mix = 3
else:
n_age_mix = 1
pickle.dump(dict( # general properties
n_sne=N_SNE, n_props=5, n_non_gaus_props=1, n_sn_set=1,
sn_set_inds=[0]*N_SNE,
# redshifts
z_helio=[0.1]*N_SNE, z_CMB=[0.1]*N_SNE,
# Gaussian defined properties
obs_mBx1c=[[mb_obs[i], x1_obs[i], c_obs[i], mass_obs[i]] for i in range(N_SNE)],
obs_mBx1c_cov=[np.diag([0.05**2, 0.3**2, 0.04**2, 0.3**2])]*N_SNE,
# Non-Gaussian properties, aka age
n_age_mix=n_age_mix, age_gaus_mean=age_gaus_mean,
age_gaus_std=age_gaus_std, age_gaus_A=age_gaus_A,
# Other stuff that does not really need to change
do_fullDint=0, outl_frac_prior_lnmean=-4.6, outl_frac_prior_lnwidth=1,
lognormal_intr_prior=0, allow_alpha_S_N=0),
open(f'test_{DATA_NAME}_{N_SNE}_obs.pkl', 'wb'))
pickle.dump({'x1': x1_true, 'c': c_true, 'mass': mass_true,
'age': age_true, 'mb': mb_true},
open(f'test_{DATA_NAME}_{N_SNE}_true.pkl', 'wb'))
|
[
"numpy.random.dirichlet",
"numpy.random.triangular",
"numpy.random.seed",
"numpy.abs",
"numpy.random.randn",
"numpy.random.exponential",
"numpy.expand_dims",
"numpy.ones",
"numpy.append",
"numpy.diag",
"numpy.concatenate"
] |
[((340, 364), 'numpy.random.seed', 'np.random.seed', (['(13048293)'], {}), '(13048293)\n', (354, 364), True, 'import numpy as np\n'), ((644, 682), 'numpy.concatenate', 'np.concatenate', (['(mass_young, mass_old)'], {}), '((mass_young, mass_old))\n', (658, 682), True, 'import numpy as np\n'), ((1011, 1040), 'numpy.append', 'np.append', (['age_young', 'age_old'], {}), '(age_young, age_old)\n', (1020, 1040), True, 'import numpy as np\n'), ((479, 501), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (494, 501), True, 'import numpy as np\n'), ((552, 587), 'numpy.random.exponential', 'np.random.exponential', (['(0.5)', 'N_YOUNG'], {}), '(0.5, N_YOUNG)\n', (573, 587), True, 'import numpy as np\n'), ((1940, 1972), 'numpy.expand_dims', 'np.expand_dims', (['age_gaus_mean', '(0)'], {}), '(age_gaus_mean, 0)\n', (1954, 1972), True, 'import numpy as np\n'), ((2145, 2176), 'numpy.expand_dims', 'np.expand_dims', (['age_gaus_std', '(0)'], {}), '(age_gaus_std, 0)\n', (2159, 2176), True, 'import numpy as np\n'), ((2242, 2279), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(1, 1, 1)', 'N_SNE'], {}), '((1, 1, 1), N_SNE)\n', (2261, 2279), True, 'import numpy as np\n'), ((2299, 2328), 'numpy.expand_dims', 'np.expand_dims', (['age_gaus_A', '(0)'], {}), '(age_gaus_A, 0)\n', (2313, 2328), True, 'import numpy as np\n'), ((2534, 2572), 'numpy.ones', 'np.ones', (['(1, N_SNE, 1)'], {'dtype': 'np.float'}), '((1, N_SNE, 1), dtype=np.float)\n', (2541, 2572), True, 'import numpy as np\n'), ((520, 544), 'numpy.random.randn', 'np.random.randn', (['N_YOUNG'], {}), '(N_YOUNG)\n', (535, 544), True, 'import numpy as np\n'), ((599, 621), 'numpy.random.randn', 'np.random.randn', (['N_OLD'], {}), '(N_OLD)\n', (614, 621), True, 'import numpy as np\n'), ((694, 716), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (709, 716), True, 'import numpy as np\n'), ((821, 869), 'numpy.random.triangular', 'np.random.triangular', (['(0.25)', '(0.5)', '(6)'], {'size': 'N_YOUNG'}), '(0.25, 0.5, 6, size=N_YOUNG)\n', (841, 869), True, 'import numpy as np\n'), ((967, 989), 'numpy.random.randn', 'np.random.randn', (['N_OLD'], {}), '(N_OLD)\n', (982, 989), True, 'import numpy as np\n'), ((1435, 1457), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (1450, 1457), True, 'import numpy as np\n'), ((1479, 1501), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (1494, 1501), True, 'import numpy as np\n'), ((1530, 1552), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (1545, 1552), True, 'import numpy as np\n'), ((2592, 2614), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (2607, 2614), True, 'import numpy as np\n'), ((900, 935), 'numpy.random.exponential', 'np.random.exponential', ([], {'size': 'N_YOUNG'}), '(size=N_YOUNG)\n', (921, 935), True, 'import numpy as np\n'), ((2049, 2074), 'numpy.random.randn', 'np.random.randn', (['N_SNE', '(3)'], {}), '(N_SNE, 3)\n', (2064, 2074), True, 'import numpy as np\n'), ((1846, 1871), 'numpy.random.randn', 'np.random.randn', (['N_SNE', '(3)'], {}), '(N_SNE, 3)\n', (1861, 1871), True, 'import numpy as np\n'), ((2416, 2438), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (2431, 2438), True, 'import numpy as np\n'), ((3294, 3345), 'numpy.diag', 'np.diag', (['[0.05 ** 2, 0.3 ** 2, 0.04 ** 2, 0.3 ** 2]'], {}), '([0.05 ** 2, 0.3 ** 2, 0.04 ** 2, 0.3 ** 2])\n', (3301, 3345), True, 'import numpy as np\n'), ((2495, 2511), 'numpy.abs', 'np.abs', (['age_true'], {}), '(age_true)\n', (2501, 2511), True, 'import numpy as np\n')]
|
"""Run model ensemble
The canonical form of `job run` is:
job run [OPTIONS] -- EXECUTABLE [OPTIONS]
where `EXECUTABLE` is your model executable or a command, followed by its
arguments. Note the `--` that separates `job run` arguments `OPTIONS` from the
executable. When there is no ambiguity in the command-line arguments (as seen
by python's argparse) it may be dropped. `job run` options determine in which
manner to run the model, which parameter values to vary (the ensemble), and how
to communicate these parameter values to the model.
"""
examples="""
Examples
--------
job run -p a=2,3,4 b=0,1 -o out --shell -- echo --a {a} --b {b} --out {}
--a 2 --b 0 --out out/0
--a 2 --b 1 --out out/1
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
--a 4 --b 1 --out out/5
The command above runs an ensemble of 6 model versions, by calling `echo --a {a}
--b {b} --out {}` where `{a}`, `{b}` and `{}` are formatted using runtime with
parameter and run directory values, as displayed in the output above. Parameters can also be provided as a file:
job run -p a=2,3,4 b=0,1 -o out --file-name "params.txt" --file-type "linesep" --line-sep " " --shell cat {}/params.txt
a 2
b 0
a 2
b 1
a 3
b 0
a 3
b 1
a 4
b 0
a 4
b 1
Where UNIX `cat` command displays file content into the terminal. File types
that involve grouping, such as namelist, require a group prefix with a `.`
separator in the parameter name:
job run -p g1.a=0,1 g2.b=2. -o out --file-name "params.txt" --file-type "namelist" --shell cat {}/params.txt
&g1
a = 0
/
&g2
b = 2.0
/
&g1
a = 1
/
&g2
b = 2.0
/
"""
import argparse
import tempfile
import numpy as np
from runner.param import MultiParam, DiscreteParam
from runner.model import Model
#from runner.xparams import XParams
from runner.xrun import XParams, XRun, XPARAM
from runner.job.model import interface
from runner.job.config import ParserIO, program
import os
EXPCONFIG = 'experiment.json'
EXPDIR = 'out'
# run
# ---
def parse_slurm_array_indices(a):
indices = []
for i in a.split(","):
if '-' in i:
if ':' in i:
i, step = i.split(':')
step = int(step)
else:
step = 1
start, stop = i.split('-')
start = int(start)
stop = int(stop) + 1 # last index is ignored in python
indices.extend(range(start, stop, step))
else:
indices.append(int(i))
return indices
def _typechecker(type):
def check(string):
try:
type(string) # just a check
except Exception as error:
print('ERROR:', str(error))
raise
return string
submit = argparse.ArgumentParser(add_help=False)
grp = submit.add_argument_group("simulation modes")
#grp.add_argument('--batch-script', help='')
#x = grp.add_mutually_exclusive_group()
grp.add_argument('--max-workers', type=int,
help="number of workers for parallel processing (need to be allocated, e.g. via sbatch) -- default to the number of runs")
grp.add_argument('-t', '--timeout', type=float, default=31536000, help='timeout in seconds (default to %(default)s)')
grp.add_argument('--shell', action='store_true',
help='print output to terminal instead of log file, run sequentially, mostly useful for testing/debugging')
grp.add_argument('--echo', action='store_true',
help='display commands instead of running them (but does setup output directory). Alias for --shell --force echo [model args ...]')
#grp.add_argument('-b', '--array', action='store_true',
# help='submit using sbatch --array (faster!), EXPERIMENTAL)')
grp.add_argument('-f', '--force', action='store_true',
help='perform run even if params.txt already exists directory')
folders = argparse.ArgumentParser(add_help=False)
grp = folders.add_argument_group("simulation settings")
grp.add_argument('-o','--out-dir', default=EXPDIR, dest='expdir',
help='experiment directory \
(params.txt and logs/ will be created, as well as individual model output directories')
grp.add_argument('-a','--auto-dir', action='store_true',
help='run directory named according to parameter values instead of run `id`')
params_parser = argparse.ArgumentParser(add_help=False)
x = params_parser.add_mutually_exclusive_group()
x.add_argument('-p', '--params',
type=DiscreteParam.parse,
help="""Param values to combine.
SPEC specifies discrete parameter values
as a comma-separated list `VALUE[,VALUE...]`
or a range `START:STOP:N`.""",
metavar="NAME=SPEC",
nargs='*')
x.add_argument('-i','--params-file', help='ensemble parameters file')
x.add_argument('--continue', dest="continue_simu", action='store_true',
help=argparse.SUPPRESS)
#help='load params.txt from simulation directory')
params_parser.add_argument('-j','--id', type=_typechecker(parse_slurm_array_indices), dest='runid',
metavar="I,J...,START-STOP:STEP,...",
help='select one or several ensemble members (0-based !), \
slurm sbatch --array syntax, e.g. `0,2,4` or `0-4:2` \
or a combination of these, `0,2,4,5` <==> `0-4:2,5`')
params_parser.add_argument('--include-default',
action='store_true',
help='also run default model version (with no parameters)')
#grp = output_parser.add_argument_group("model output",
# description='model output variables')
#grp.add_argument("-v", "--output-variables", nargs='+', default=[],
# help='list of state variables to include in output.txt')
#
#grp.add_argument('-l', '--likelihood',
# type=ScipyParam.parse,
# help='distribution, to compute weights',
# metavar="NAME=DIST",
# default = [],
# nargs='+')
parser = argparse.ArgumentParser(parents=[interface.parser, params_parser, folders, submit], epilog=examples, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
runio = interface.join(ParserIO(folders)) # interface + folder: saveit
@program(parser)
def main(o):
if o.echo:
o.model = ['echo'] + o.model
o.shell = True
o.force = True
model = Model(interface.get(o))
pfile = os.path.join(o.expdir, XPARAM)
if o.continue_simu:
o.params_file = pfile
o.force = True
if o.params_file:
xparams = XParams.read(o.params_file)
elif o.params:
prior = MultiParam(o.params)
xparams = prior.product() # only product allowed as direct input
#update = {p.name:p.value for p in o.params}
else:
xparams = XParams(np.empty((0,0)), names=[])
o.include_default = True
xrun = XRun(model, xparams, expdir=o.expdir, autodir=o.auto_dir, max_workers=o.max_workers, timeout=o.timeout)
# create dir, write params.txt file, as well as experiment configuration
try:
if not o.continue_simu:
xrun.setup(force=o.force)
except RuntimeError as error:
print("ERROR :: "+str(error))
print("Use -f/--force to bypass this check")
parser.exit(1)
#write_config(vars(o), os.path.join(o.expdir, EXPCONFIG), parser=experiment)
runio.dump(o, open(os.path.join(o.expdir, EXPCONFIG),'w'))
if o.runid:
indices = parse_slurm_array_indices(o.runid)
else:
indices = np.arange(xparams.size)
if o.include_default:
indices = list(indices) + [None]
# test: run everything serially
if o.shell:
for i in indices:
xrun[i].run(background=False)
# the default
else:
xrun.run(indices=indices)
return
main.register('run', help='run model (single version or ensemble)')
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"numpy.empty",
"runner.param.MultiParam",
"runner.xrun.XParams.read",
"runner.job.config.program",
"runner.job.config.ParserIO",
"numpy.arange",
"runner.job.model.interface.get",
"os.path.join",
"runner.xrun.XRun"
] |
[((2929, 2968), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (2952, 2968), False, 'import argparse\n'), ((4063, 4102), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (4086, 4102), False, 'import argparse\n'), ((4548, 4587), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (4571, 4587), False, 'import argparse\n'), ((6271, 6455), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'parents': '[interface.parser, params_parser, folders, submit]', 'epilog': 'examples', 'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(parents=[interface.parser, params_parser, folders,\n submit], epilog=examples, description=__doc__, formatter_class=argparse\n .RawDescriptionHelpFormatter)\n', (6294, 6455), False, 'import argparse\n'), ((6522, 6537), 'runner.job.config.program', 'program', (['parser'], {}), '(parser)\n', (6529, 6537), False, 'from runner.job.config import ParserIO, program\n'), ((6471, 6488), 'runner.job.config.ParserIO', 'ParserIO', (['folders'], {}), '(folders)\n', (6479, 6488), False, 'from runner.job.config import ParserIO, program\n'), ((6700, 6730), 'os.path.join', 'os.path.join', (['o.expdir', 'XPARAM'], {}), '(o.expdir, XPARAM)\n', (6712, 6730), False, 'import os\n'), ((7169, 7277), 'runner.xrun.XRun', 'XRun', (['model', 'xparams'], {'expdir': 'o.expdir', 'autodir': 'o.auto_dir', 'max_workers': 'o.max_workers', 'timeout': 'o.timeout'}), '(model, xparams, expdir=o.expdir, autodir=o.auto_dir, max_workers=o.\n max_workers, timeout=o.timeout)\n', (7173, 7277), False, 'from runner.xrun import XParams, XRun, XPARAM\n'), ((6669, 6685), 'runner.job.model.interface.get', 'interface.get', (['o'], {}), '(o)\n', (6682, 6685), False, 'from runner.job.model import interface\n'), ((6850, 6877), 'runner.xrun.XParams.read', 'XParams.read', (['o.params_file'], {}), '(o.params_file)\n', (6862, 6877), False, 'from runner.xrun import XParams, XRun, XPARAM\n'), ((7822, 7845), 'numpy.arange', 'np.arange', (['xparams.size'], {}), '(xparams.size)\n', (7831, 7845), True, 'import numpy as np\n'), ((6914, 6934), 'runner.param.MultiParam', 'MultiParam', (['o.params'], {}), '(o.params)\n', (6924, 6934), False, 'from runner.param import MultiParam, DiscreteParam\n'), ((7684, 7717), 'os.path.join', 'os.path.join', (['o.expdir', 'EXPCONFIG'], {}), '(o.expdir, EXPCONFIG)\n', (7696, 7717), False, 'import os\n'), ((7097, 7113), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (7105, 7113), True, 'import numpy as np\n')]
|
import os
import h5py
import torch
import numpy as np
import scipy
import json
class CorresPondenceNet(torch.utils.data.Dataset):
def __init__(self, cfg, flag='train'):
super().__init__()
with open(os.path.join(cfg['data_path'], 'name2id.json'), 'r') as f:
self.name2id = json.load(f)
try:
self.catg = self.name2id[cfg['class_name'].capitalize()]
except:
raise ValueError
self.task = cfg['task_type']
with h5py.File(os.path.join(cfg['data_path'], 'corr_mean_dist_geo', '{}_mean_distance.h5'.format(self.catg)), 'r') as f:
self.mean_distance = f['mean_distance'][:]
if self.task == 'embedding':
self.users = {}
self.pcds = []
self.keypoints = []
self.num_annos = 0
filename = os.path.join(
cfg['data_path'], '{}.h5'.format(self.catg))
with h5py.File(filename, 'r') as f:
self.pcds = f['point_clouds'][:]
self.keypoints = f['keypoints'][:]
self.mesh_names = f['mesh_names'][:]
num_train = int(self.pcds.shape[0] * 0.7)
num_divide = int(self.pcds.shape[0] * 0.85)
if flag == 'train':
self.pcds = self.pcds[:num_train]
self.keypoints = self.keypoints[:num_train]
self.mesh_names = self.mesh_names[:num_train]
elif flag == 'val':
self.pcds = self.pcds[num_train:num_divide]
self.keypoints = self.keypoints[num_train:num_divide]
self.mesh_names = self.mesh_names[num_train:num_divide]
elif flag == 'test':
self.pcds = self.pcds[num_divide:]
self.keypoints = self.keypoints[num_divide:]
self.mesh_names = self.mesh_names[num_divide:]
else:
raise ValueError
self.num_annos = self.pcds.shape[0]
else:
raise ValueError
def __getitem__(self, item):
if self.task == 'embedding':
pcd = self.pcds[item]
keypoint_index = np.array(self.keypoints[item], dtype=np.int32)
return torch.tensor(pcd).float(), torch.tensor(keypoint_index).int(), torch.tensor(self.mean_distance).float(), 0
else:
raise ValueError
def __len__(self):
return self.num_annos
|
[
"h5py.File",
"json.load",
"numpy.array",
"os.path.join",
"torch.tensor"
] |
[((306, 318), 'json.load', 'json.load', (['f'], {}), '(f)\n', (315, 318), False, 'import json\n'), ((2186, 2232), 'numpy.array', 'np.array', (['self.keypoints[item]'], {'dtype': 'np.int32'}), '(self.keypoints[item], dtype=np.int32)\n', (2194, 2232), True, 'import numpy as np\n'), ((219, 265), 'os.path.join', 'os.path.join', (["cfg['data_path']", '"""name2id.json"""'], {}), "(cfg['data_path'], 'name2id.json')\n", (231, 265), False, 'import os\n'), ((954, 978), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (963, 978), False, 'import h5py\n'), ((2252, 2269), 'torch.tensor', 'torch.tensor', (['pcd'], {}), '(pcd)\n', (2264, 2269), False, 'import torch\n'), ((2279, 2307), 'torch.tensor', 'torch.tensor', (['keypoint_index'], {}), '(keypoint_index)\n', (2291, 2307), False, 'import torch\n'), ((2315, 2347), 'torch.tensor', 'torch.tensor', (['self.mean_distance'], {}), '(self.mean_distance)\n', (2327, 2347), False, 'import torch\n')]
|
import os
import time
import torch
import random
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from util import epoch_time
import torch.optim as optim
from model.neural_network import RandomlyWiredNeuralNetwork
from data.data_util import fetch_dataloader, test_voc, test_imagenet
SEED = 981126
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
class Trainer:
def __init__(self, num_epoch, lr,
batch_size, num_node,
p, k, m, channel,
in_channels, path,
graph_mode, dataset,
is_small_regime,
checkpoint_path, load):
super(Trainer, self).__init__()
self.params = {'num_epoch': num_epoch,
'batch_size': batch_size,
'lr': lr,
'node_num': num_node,
'p': p,
'k': k,
'm': m,
'in_channels': in_channels,
'channel': channel,
'classes': 21 if dataset == 'voc' else 1000,
'graph_mode': graph_mode,
'load': load,
'path': path,
'dataset': dataset,
'is_small_regime': is_small_regime,
'checkpoint_path': checkpoint_path
}
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
self.train_data, self.val_data, self.test_data = fetch_dataloader(
self.params['dataset'],
self.params['path'],
self.params['batch_size'])
self.rwnn = RandomlyWiredNeuralNetwork(
self.params['channel'],
self.params['in_channels'],
self.params['p'],
self.params['k'],
self.params['m'],
self.params['graph_mode'],
self.params['classes'],
self.params['node_num'],
self.params['checkpoint_path'],
self.params['load'],
self.params['is_small_regime']
).to(self.device)
self.optimizer = optim.SGD(
self.rwnn.parameters(), self.params['lr'], 0.9, weight_decay=5e-5)
self.best_loss = float('inf')
self.step_num = 0
if load:
checkpoint = torch.load(os.path.join(
self.params['checkpoint_path'], 'train.tar'))
self.rwnn.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(
checkpoint['optimizer_state_dict'])
self.epoch = checkpoint['epoch']
self.best_loss = checkpoint['best_loss']
self.scheduler = checkpoint['scheduler']
self.step_num = checkpoint['step_num']
else:
self.epoch = 0
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, self.params['num_epoch'])
self.criterion = nn.CrossEntropyLoss()
pytorch_total_params = sum(p.numel() for p in self.rwnn.parameters())
print(f"Number of parameters {pytorch_total_params}")
def train(self):
print("\nbegin training...")
for epoch in range(self.epoch, self.params['num_epoch']):
print(
f"\nEpoch: {epoch+1} out of {self.params['num_epoch']}, step: {self.step_num}")
start_time = time.perf_counter()
epoch_loss, step = train_loop(
self.train_data, self.rwnn, self.optimizer, self.criterion, self.device)
val_loss = val_loop(self.val_data, self.rwnn,
self.criterion, self.device)
if val_loss < self.best_loss:
self.best_loss = val_loss
with open(os.path.join(self.params['checkpoint_path'], 'best_model.txt'), 'w') as f:
f.write(
f"epoch: {epoch+1}, 'validation loss: {val_loss}, step: {self.step_num}")
torch.save(
self.rwnn,
os.path.join(self.params['checkpoint_path'], 'best.pt'))
if (epoch + 1) % 15 == 0:
if self.params['dataset'] == 'voc':
test_voc(self.test_data, self.rwnn, self.device)
self.step_num += step
self.scheduler.step()
end_time = time.perf_counter()
minutes, seconds, time_left_min, time_left_sec = epoch_time(
end_time-start_time, epoch, self.params['num_epoch'])
torch.save({
'epoch': epoch,
'model_state_dict': self.rwnn.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_loss': self.best_loss,
'scheduler': self.scheduler,
'step_num': self.step_num
}, os.path.join(self.params['checkpoint_path'], 'train.tar'))
print(
f"Train_loss: {round(epoch_loss, 3)} - Val_loss: {round(val_loss, 3)}")
print(
f"Epoch time: {minutes}m {seconds}s - Time left for training: {time_left_min}m {time_left_sec}s")
def train_loop(train_iter, model, optimizer, criterion, device):
epoch_loss = 0
step_num = 0
model.train()
print("Training...")
for src, tgt in tqdm(train_iter):
src = src.to(device)
tgt = tgt.to(device)
optimizer.zero_grad()
logits = model(src)
loss = criterion(logits, tgt)
loss.backward()
optimizer.step()
step_num += 1
epoch_loss += loss.item()
return epoch_loss / len(train_iter), step_num
def val_loop(val_iter, model, criterion, device):
model.eval()
val_loss = 0
with torch.no_grad():
print("Validating...")
for src, tgt in tqdm(val_iter):
src = src.to(device)
tgt = tgt.to(device)
logits = model(src)
loss = criterion(logits, tgt)
val_loss += loss.item()
return val_loss / len(val_iter)
|
[
"util.epoch_time",
"tqdm.tqdm",
"numpy.random.seed",
"torch.manual_seed",
"data.data_util.fetch_dataloader",
"model.neural_network.RandomlyWiredNeuralNetwork",
"torch.cuda.manual_seed",
"torch.nn.CrossEntropyLoss",
"time.perf_counter",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"data.data_util.test_voc",
"random.seed",
"torch.cuda.is_available",
"torch.no_grad",
"os.path.join"
] |
[((314, 331), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (325, 331), False, 'import random\n'), ((332, 352), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (346, 352), True, 'import numpy as np\n'), ((353, 376), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (370, 376), False, 'import torch\n'), ((377, 405), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (399, 405), False, 'import torch\n'), ((5502, 5518), 'tqdm.tqdm', 'tqdm', (['train_iter'], {}), '(train_iter)\n', (5506, 5518), False, 'from tqdm import tqdm\n'), ((1660, 1753), 'data.data_util.fetch_dataloader', 'fetch_dataloader', (["self.params['dataset']", "self.params['path']", "self.params['batch_size']"], {}), "(self.params['dataset'], self.params['path'], self.params[\n 'batch_size'])\n", (1676, 1753), False, 'from data.data_util import fetch_dataloader, test_voc, test_imagenet\n'), ((3131, 3152), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3150, 3152), True, 'import torch.nn as nn\n'), ((5932, 5947), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5945, 5947), False, 'import torch\n'), ((6005, 6019), 'tqdm.tqdm', 'tqdm', (['val_iter'], {}), '(val_iter)\n', (6009, 6019), False, 'from tqdm import tqdm\n'), ((3009, 3087), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'optim.lr_scheduler.CosineAnnealingLR', (['self.optimizer', "self.params['num_epoch']"], {}), "(self.optimizer, self.params['num_epoch'])\n", (3045, 3087), True, 'import torch.optim as optim\n'), ((3560, 3579), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3577, 3579), False, 'import time\n'), ((4536, 4555), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4553, 4555), False, 'import time\n'), ((4618, 4684), 'util.epoch_time', 'epoch_time', (['(end_time - start_time)', 'epoch', "self.params['num_epoch']"], {}), "(end_time - start_time, epoch, self.params['num_epoch'])\n", (4628, 4684), False, 'from util import epoch_time\n'), ((1564, 1589), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1587, 1589), False, 'import torch\n'), ((1807, 2119), 'model.neural_network.RandomlyWiredNeuralNetwork', 'RandomlyWiredNeuralNetwork', (["self.params['channel']", "self.params['in_channels']", "self.params['p']", "self.params['k']", "self.params['m']", "self.params['graph_mode']", "self.params['classes']", "self.params['node_num']", "self.params['checkpoint_path']", "self.params['load']", "self.params['is_small_regime']"], {}), "(self.params['channel'], self.params[\n 'in_channels'], self.params['p'], self.params['k'], self.params['m'],\n self.params['graph_mode'], self.params['classes'], self.params[\n 'node_num'], self.params['checkpoint_path'], self.params['load'], self.\n params['is_small_regime'])\n", (1833, 2119), False, 'from model.neural_network import RandomlyWiredNeuralNetwork\n'), ((2495, 2552), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""train.tar"""'], {}), "(self.params['checkpoint_path'], 'train.tar')\n", (2507, 2552), False, 'import os\n'), ((5034, 5091), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""train.tar"""'], {}), "(self.params['checkpoint_path'], 'train.tar')\n", (5046, 5091), False, 'import os\n'), ((4225, 4280), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""best.pt"""'], {}), "(self.params['checkpoint_path'], 'best.pt')\n", (4237, 4280), False, 'import os\n'), ((4393, 4441), 'data.data_util.test_voc', 'test_voc', (['self.test_data', 'self.rwnn', 'self.device'], {}), '(self.test_data, self.rwnn, self.device)\n', (4401, 4441), False, 'from data.data_util import fetch_dataloader, test_voc, test_imagenet\n'), ((3944, 4006), 'os.path.join', 'os.path.join', (["self.params['checkpoint_path']", '"""best_model.txt"""'], {}), "(self.params['checkpoint_path'], 'best_model.txt')\n", (3956, 4006), False, 'import os\n')]
|
import trimesh
import numpy as np
import cv2
import copy
import pickle
import torch
import pdb
def depth2normal(depth, f_pix_x, f_pix_y=None):
'''
To compute a normal map from the depth map
Input:
- depth: torch.Tensor (H, W)
- f_pix_x: K[0, 0]
- f_pix_y: K[1, 1]
Return:
- normal: torch.Tensor (H, W, 3)
'''
if f_pix_y is None:
f_pix_y = f_pix_x
h, w = depth.shape
eps = 1e-12
bg_flag = (depth > 1e5) | (depth == 0)
depth[bg_flag] = 0.0
depth_left, depth_right, depth_up, depth_down = torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w), torch.zeros(h, w)
if depth.get_device() != -1:
device_id = depth.get_device()
depth_left, depth_right, depth_up, depth_down = depth_left.to(device_id), depth_right.to(device_id), depth_up.to(device_id), depth_down.to(device_id)
depth_left[:, 1:w-1] = depth[:, :w-2].clone()
depth_right[:, 1:w-1] = depth[:, 2:].clone()
depth_up[1:h-1, :] = depth[:h-2, :].clone()
depth_down[1:h-1, :] = depth[2:, :].clone()
dzdx = (depth_right - depth_left) * f_pix_x / 2.0
dzdy = (depth_down - depth_up) * f_pix_y / 2.0
normal = torch.stack([dzdx, dzdy, -torch.ones_like(dzdx)]).permute(1, 2, 0)
normal_length = torch.norm(normal, p=2, dim=2)
normal = normal / (normal_length + 1e-12)[:,:,None]
normal[bg_flag] = 0.0
return normal
def quad2rotation(quad):
'''
input: torch.Tensor (4)
'''
bs = quad.shape[0]
qr, qi, qj, qk = quad[:,0], quad[:,1], quad[:,2], quad[:,3]
rot_mat = torch.zeros(bs, 3, 3).to(quad.get_device())
rot_mat[:,0,0] = 1 - 2 * (qj ** 2 + qk ** 2)
rot_mat[:,0,1] = 2 * (qi * qj - qk * qr)
rot_mat[:,0,2] = 2 * (qi * qk + qj * qr)
rot_mat[:,1,0] = 2 * (qi * qj + qk * qr)
rot_mat[:,1,1] = 1 - 2 * (qi ** 2 + qk ** 2)
rot_mat[:,1,2] = 2 * (qj * qk - qi * qr)
rot_mat[:,2,0] = 2 * (qi * qk - qj * qr)
rot_mat[:,2,1] = 2 * (qj * qk + qi * qr)
rot_mat[:,2,2] = 1 - 2 * (qi ** 2 + qj ** 2)
return rot_mat
def get_camera_from_tensor(inputs):
N = len(inputs.shape)
if N == 1:
inputs = inputs.unsqueeze(0)
quad, T = inputs[:,:4], inputs[:,4:]
R = quad2rotation(quad)
RT = torch.cat([R, T[:,:,None]], 2)
if N == 1:
RT = RT[0]
return RT
def get_tensor_from_camera(RT):
gpu_id = -1
if type(RT) == torch.Tensor:
if RT.get_device() != -1:
RT = RT.detach().cpu()
gpu_id = RT.get_device()
RT = RT.numpy()
from mathutils import Matrix
R, T = RT[:,:3], RT[:,3]
rot = Matrix(R)
quad = rot.to_quaternion()
tensor = np.concatenate([quad, T], 0)
tensor = torch.from_numpy(tensor).float()
if gpu_id != -1:
tensor = tensor.to(gpu_id)
return tensor
def downsize_camera_intrinsic(intrinsic, factor):
'''
Input:
- intrinsic type: np.array (3,3)
- factor int
'''
img_h, img_w = int(2 * intrinsic[1,2]), int(2 * intrinsic[0,2])
img_h_new, img_w_new = img_h / factor, img_w / factor
if (img_h_new - round(img_h_new)) > 1e-12 or (img_w_new - round(img_w_new)) > 1e-12:
raise ValueError('The image size {0} should be divisible by the factor {1}.'.format((img_h, img_w), factor))
intrinsic_new = copy.deepcopy(intrinsic)
intrinsic_new[0,:] = intrinsic[0,:] / factor
intrinsic_new[1,:] = intrinsic[1,:] / factor
return intrinsic_new
def sample_points_from_mesh(mesh, N=30000):
'''
Return:
-- points: np.array (N, 3)
'''
points = trimesh.sample.sample_surface(mesh, N)[0]
return points
def transform_point_cloud(points):
'''
solve the mismatch between the point cloud coordinate and the mesh obj.
'''
points_new = copy.deepcopy(points)
points_new[:,1] = -points[:,2]
points_new[:,2] = points[:,1]
return points_new
def read_pickle(fname):
with open(fname, 'rb') as f:
data = pickle.load(f, encoding='latin1')
return data
def save_render_output(render_output, fname):
depth_rendered, normal_rendered, valid_mask_rendered, _ = render_output
output = {}
output['depth'] = depth_rendered.detach().cpu().numpy()
output['normal'] = normal_rendered.detach().cpu().numpy()
output['valid_mask'] = valid_mask_rendered.detach().cpu().numpy()
save_pkl(output, fname)
def save_pkl(data, fname):
with open(fname, 'wb') as f:
pickle.dump(data, f)
|
[
"torch.ones_like",
"copy.deepcopy",
"trimesh.sample.sample_surface",
"pickle.dump",
"torch.norm",
"torch.cat",
"pickle.load",
"torch.zeros",
"mathutils.Matrix",
"numpy.concatenate",
"torch.from_numpy"
] |
[((1268, 1298), 'torch.norm', 'torch.norm', (['normal'], {'p': '(2)', 'dim': '(2)'}), '(normal, p=2, dim=2)\n', (1278, 1298), False, 'import torch\n'), ((2244, 2276), 'torch.cat', 'torch.cat', (['[R, T[:, :, None]]', '(2)'], {}), '([R, T[:, :, None]], 2)\n', (2253, 2276), False, 'import torch\n'), ((2607, 2616), 'mathutils.Matrix', 'Matrix', (['R'], {}), '(R)\n', (2613, 2616), False, 'from mathutils import Matrix\n'), ((2661, 2689), 'numpy.concatenate', 'np.concatenate', (['[quad, T]', '(0)'], {}), '([quad, T], 0)\n', (2675, 2689), True, 'import numpy as np\n'), ((3298, 3322), 'copy.deepcopy', 'copy.deepcopy', (['intrinsic'], {}), '(intrinsic)\n', (3311, 3322), False, 'import copy\n'), ((3768, 3789), 'copy.deepcopy', 'copy.deepcopy', (['points'], {}), '(points)\n', (3781, 3789), False, 'import copy\n'), ((561, 578), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (572, 578), False, 'import torch\n'), ((580, 597), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (591, 597), False, 'import torch\n'), ((599, 616), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (610, 616), False, 'import torch\n'), ((618, 635), 'torch.zeros', 'torch.zeros', (['h', 'w'], {}), '(h, w)\n', (629, 635), False, 'import torch\n'), ((3563, 3601), 'trimesh.sample.sample_surface', 'trimesh.sample.sample_surface', (['mesh', 'N'], {}), '(mesh, N)\n', (3592, 3601), False, 'import trimesh\n'), ((3954, 3987), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3965, 3987), False, 'import pickle\n'), ((4433, 4453), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (4444, 4453), False, 'import pickle\n'), ((1571, 1592), 'torch.zeros', 'torch.zeros', (['bs', '(3)', '(3)'], {}), '(bs, 3, 3)\n', (1582, 1592), False, 'import torch\n'), ((2703, 2727), 'torch.from_numpy', 'torch.from_numpy', (['tensor'], {}), '(tensor)\n', (2719, 2727), False, 'import torch\n'), ((1207, 1228), 'torch.ones_like', 'torch.ones_like', (['dzdx'], {}), '(dzdx)\n', (1222, 1228), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2020 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test Fraunhofer estimation.
"""
import numpy as np
from shabanipy.jj.fraunhofer.estimation import guess_current_distribution
def create_fraunhofer_like():
fields = np.linspace(-1, 1, 1001)
return fields, np.abs(np.sinc(8 * (fields - 0.1)))
def create_squid_like():
fields = np.linspace(-1, 1, 1001)
return (
fields,
2 + np.cos(8 * np.pi * (fields + 0.1)) * np.sinc(0.1 * (fields + 0.1)),
)
def validate_fraunhofer(offset, first_node, amplitude, c_dis):
np.testing.assert_almost_equal(offset, 0.1)
assert abs(first_node + 0.025) < 0.05
np.testing.assert_almost_equal(amplitude, 1.0)
np.testing.assert_array_equal(c_dis, np.ones(5) / 20)
def validate_squid(offset, first_node, amplitude, c_dis):
np.testing.assert_almost_equal(offset, -0.1)
assert abs(first_node - 0.025) < 0.05
np.testing.assert_almost_equal(amplitude, 3.0)
np.testing.assert_array_equal(c_dis, np.array([0.625, 0, 0, 0, 0.625]))
def test_guess_current_distribution_fraunhofer():
"""Test identifying a fraunhofer like pattern.
"""
fields, fraunhofer_like_ics = create_fraunhofer_like()
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, fraunhofer_like_ics, 5, 4
)
validate_fraunhofer(offsets, first_nodes, amplitudes, c_dis)
def test_guess_current_distribution_squid():
"""Test identifying a SQUID like pattern.
"""
fields, squid_like_ics = create_squid_like()
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, squid_like_ics, 5, 4
)
validate_squid(offsets, first_nodes, amplitudes, c_dis)
def test_guess_current_distribution_too_small_data():
"""Test handling data which do not comport enough points.
"""
fields = np.linspace(-1, 1, 201)
fraunhofer_like_ics = np.abs(np.sinc(2 * (fields - 0.1)))
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, fraunhofer_like_ics, 5, 4
)
np.testing.assert_almost_equal(offsets, 0.1)
assert amplitudes == 1.0
def test_2D_inputs():
"""Test that we can handle properly 2D inputs."""
fields_f, fraunhofer_like_ics = create_fraunhofer_like()
fields_s, squid_like_ics = create_squid_like()
# 2D inputs
fields = np.empty((2, len(fields_f)))
fields[0] = fields_f
fields[1] = fields_s
ics = np.empty_like(fields)
ics[0] = fraunhofer_like_ics
ics[1] = squid_like_ics
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, ics, 5, 4
)
for o, f, a, cd, validator in zip(
offsets, first_nodes, amplitudes, c_dis, (validate_fraunhofer, validate_squid)
):
validator(o, f, a, cd)
def test_3D_inputs():
"""Test that we can handle properly 3D inputs."""
fields_f, fraunhofer_like_ics = create_fraunhofer_like()
fields_s, squid_like_ics = create_squid_like()
# 3D inputs
fields = np.empty((2, 2, len(fields_f)))
fields[0, :] = fields_f
fields[1, :] = fields_s
ics = np.empty_like(fields)
ics[0, :] = fraunhofer_like_ics
ics[1, :] = squid_like_ics
offsets, first_nodes, amplitudes, c_dis = guess_current_distribution(
fields, ics, 5, 4
)
for o, f, a, cd, validator in zip(
offsets, first_nodes, amplitudes, c_dis, (validate_fraunhofer, validate_squid)
):
validator(o[0], f[0], a[0], cd[0])
validator(o[1], f[1], a[1], cd[1])
|
[
"numpy.testing.assert_almost_equal",
"shabanipy.jj.fraunhofer.estimation.guess_current_distribution",
"numpy.empty_like",
"numpy.ones",
"numpy.sinc",
"numpy.array",
"numpy.linspace",
"numpy.cos"
] |
[((556, 580), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1001)'], {}), '(-1, 1, 1001)\n', (567, 580), True, 'import numpy as np\n'), ((676, 700), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1001)'], {}), '(-1, 1, 1001)\n', (687, 700), True, 'import numpy as np\n'), ((885, 928), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['offset', '(0.1)'], {}), '(offset, 0.1)\n', (915, 928), True, 'import numpy as np\n'), ((975, 1021), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['amplitude', '(1.0)'], {}), '(amplitude, 1.0)\n', (1005, 1021), True, 'import numpy as np\n'), ((1144, 1188), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['offset', '(-0.1)'], {}), '(offset, -0.1)\n', (1174, 1188), True, 'import numpy as np\n'), ((1235, 1281), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['amplitude', '(3.0)'], {}), '(amplitude, 3.0)\n', (1265, 1281), True, 'import numpy as np\n'), ((1576, 1637), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'fraunhofer_like_ics', '(5)', '(4)'], {}), '(fields, fraunhofer_like_ics, 5, 4)\n', (1602, 1637), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((1916, 1972), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'squid_like_ics', '(5)', '(4)'], {}), '(fields, squid_like_ics, 5, 4)\n', (1942, 1972), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((2188, 2211), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(201)'], {}), '(-1, 1, 201)\n', (2199, 2211), True, 'import numpy as np\n'), ((2321, 2382), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'fraunhofer_like_ics', '(5)', '(4)'], {}), '(fields, fraunhofer_like_ics, 5, 4)\n', (2347, 2382), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((2401, 2445), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['offsets', '(0.1)'], {}), '(offsets, 0.1)\n', (2431, 2445), True, 'import numpy as np\n'), ((2785, 2806), 'numpy.empty_like', 'np.empty_like', (['fields'], {}), '(fields)\n', (2798, 2806), True, 'import numpy as np\n'), ((2915, 2960), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'ics', '(5)', '(4)'], {}), '(fields, ics, 5, 4)\n', (2941, 2960), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((3459, 3480), 'numpy.empty_like', 'np.empty_like', (['fields'], {}), '(fields)\n', (3472, 3480), True, 'import numpy as np\n'), ((3595, 3640), 'shabanipy.jj.fraunhofer.estimation.guess_current_distribution', 'guess_current_distribution', (['fields', 'ics', '(5)', '(4)'], {}), '(fields, ics, 5, 4)\n', (3621, 3640), False, 'from shabanipy.jj.fraunhofer.estimation import guess_current_distribution\n'), ((1323, 1356), 'numpy.array', 'np.array', (['[0.625, 0, 0, 0, 0.625]'], {}), '([0.625, 0, 0, 0, 0.625])\n', (1331, 1356), True, 'import numpy as np\n'), ((2245, 2272), 'numpy.sinc', 'np.sinc', (['(2 * (fields - 0.1))'], {}), '(2 * (fields - 0.1))\n', (2252, 2272), True, 'import numpy as np\n'), ((607, 634), 'numpy.sinc', 'np.sinc', (['(8 * (fields - 0.1))'], {}), '(8 * (fields - 0.1))\n', (614, 634), True, 'import numpy as np\n'), ((1063, 1073), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (1070, 1073), True, 'import numpy as np\n'), ((742, 776), 'numpy.cos', 'np.cos', (['(8 * np.pi * (fields + 0.1))'], {}), '(8 * np.pi * (fields + 0.1))\n', (748, 776), True, 'import numpy as np\n'), ((779, 808), 'numpy.sinc', 'np.sinc', (['(0.1 * (fields + 0.1))'], {}), '(0.1 * (fields + 0.1))\n', (786, 808), True, 'import numpy as np\n')]
|
"""
Author :
<NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from keras import backend as K
from tqdm.keras import TqdmCallback
from scipy.stats import spearmanr
from tensorflow.keras import Input
from tensorflow.keras import optimizers
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from tensorflow.keras.models import Model
from statistics import mean
from sklearn.utils import shuffle
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
import pandas as pd
import datetime
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau ,Callback,TensorBoard
from keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras import applications
import PIL
from keras.activations import softmax,sigmoid
import h5py
from PIL import Image
from keras.layers import Layer
from scipy.stats import spearmanr,pearsonr
import sklearn
import tensorflow as tf
from tensorflow.keras.layers import MaxPooling2D ,Dense,Concatenate ,Dropout ,Input,concatenate,Conv2D,Reshape,GlobalMaxPooling2D,Flatten,GlobalAveragePooling2D,AveragePooling2D,Lambda,MaxPooling2D,TimeDistributed, Bidirectional, LSTM
import argparse
import random
from tqdm import tqdm
tf.keras.backend.clear_session()
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
#os.environ['CUDA_VISIBLE_DEVICES']=""
def data_generator(data,batch_size=16):
num_samples = len(data)
random.shuffle(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
y_train[i,:] = y_train[i,:]
yield X_train, y_train
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
# 4-parameter logistic function
logisticPart = 1 + np.exp(np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
'''
def data_generator_1(data,batch_size=4):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield X_train
def data_generator_2(data,batch_size=1):
num_samples = len(data)
while True:
for offset in range(0, num_samples, batch_size):
# Get the samples you'll use in this batch
batch_samples = data[offset:offset+batch_size]
X_train = np.zeros((batch_size, 30,25,2560))
y_train = np.zeros((batch_size,1))
for i in range(batch_size):
X_train[i,:,:,:] = np.load(batch_samples[i][0])
y_train[i,:] = np.load(batch_samples[i][1])
yield y_train
'''
def build_model(batch_shape, model_final):
model = models.Sequential()
model.add(TimeDistributed(model_final,input_shape = batch_shape))
model.add(Bidirectional(LSTM(64,return_sequences=True,kernel_initializer='random_normal',
recurrent_initializer='random_normal',
dropout=0.4,recurrent_dropout=0)))
model.add(Bidirectional(LSTM(64,return_sequences=True,
kernel_initializer='random_normal',
recurrent_initializer='random_normal', dropout=0.4,recurrent_dropout=0)))
model.add(Flatten())
model.add(Dense(256,activation='relu', kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001)))
model.add(layers.Dropout(rate=0.5))
model.add(layers.Dense(1))
model.add(layers.Activation('linear'))
model.compile(optimizer=optimizers.Adam(),loss='mse',metrics=['mae'])
model.summary()
return model
def data_prepare():
x = os.listdir('features_X')
li = []
for i in range(len(x)):
tem = []
x_f = './features_X/' + x[i]
y_f = './features_y/' + x[i]
tem.append(x_f)
tem.append(y_f)
li.append(tem)
li.sort()
return (li)
if __name__ == '__main__':
parser = argparse.ArgumentParser("End2End_train")
parser.add_argument('-nf',
'--num_frames',
default=30,
type=int,
help='Number of cropped frames per video.'
)
parser.add_argument('-m',
'--pretrained_model',
default='/models/res-bi-sp_koniq.h5',
type=str,
help='path to pretrained spatial pooling module.'
)
parser.add_argument('-b',
'--batch_size',
default=16,
type=int,
help='batch_size.'
)
if not os.path.exists('./models'):
os.makedirs('./models')
args = parser.parse_args()
md = ModelCheckpoint(filepath='./models/trained_model.h5',monitor='val_loss', mode='min',save_weights_only=True,save_best_only=True,verbose=1)
rd = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=20,min_lr=1e-7, verbose=2, mode='min')
ear = EarlyStopping(monitor='val_loss',mode ='min', patience=80, verbose=2,restore_best_weights=False)
callbacks_k = [md,rd,TqdmCallback(verbose=2),ear]
li = data_prepare()
li.sort()
num_patch = 25
nb = args.num_frames
batch_size = args.batch_size
sp_pretrained = args.pretrained_model
sep = int(len(li)/5)
train_l = li[0:sep*4]
test_l = li[sep*4:]
train_gen = data_generator(train_l,batch_size= batch_size)
val_gen = data_generator(test_l,batch_size= batch_size)
In = Input((nb,num_patch,2048))
model = load_model(sp_pretrained)
for layer in model.layers:
layer.trainable = True
model_final = Model(inputs=model.input,outputs=model.layers[-3].output )
model = build_model((nb,num_patch,2048), model_final)
history = model.fit_generator(train_gen,steps_per_epoch = int(len(train_l)/ batch_size),
epochs=200,validation_data=val_gen,validation_steps =
int(len(test_l)/batch_size) ,verbose=0,callbacks=callbacks_k)
|
[
"keras.models.load_model",
"numpy.load",
"numpy.abs",
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dense",
"random.shuffle",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"os.path.exists",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"keras.callbacks.ReduceLROnPlateau",
"numpy.divide",
"tensorflow.keras.layers.Dropout",
"keras.callbacks.ModelCheckpoint",
"tensorflow.keras.backend.clear_session",
"tensorflow.keras.models.Model",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.layers.TimeDistributed",
"os.listdir",
"os.makedirs",
"numpy.zeros",
"keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.LSTM",
"tqdm.keras.TqdmCallback"
] |
[((1330, 1362), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (1360, 1362), True, 'import tensorflow as tf\n'), ((1559, 1579), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (1573, 1579), False, 'import random\n'), ((3628, 3647), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (3645, 3647), False, 'from tensorflow.keras import models\n'), ((4530, 4554), 'os.listdir', 'os.listdir', (['"""features_X"""'], {}), "('features_X')\n", (4540, 4554), False, 'import os\n'), ((4781, 4821), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""End2End_train"""'], {}), "('End2End_train')\n", (4804, 4821), False, 'import argparse\n'), ((5380, 5525), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""./models/trained_model.h5"""', 'monitor': '"""val_loss"""', 'mode': '"""min"""', 'save_weights_only': '(True)', 'save_best_only': '(True)', 'verbose': '(1)'}), "(filepath='./models/trained_model.h5', monitor='val_loss',\n mode='min', save_weights_only=True, save_best_only=True, verbose=1)\n", (5395, 5525), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, TensorBoard\n'), ((5524, 5627), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(20)', 'min_lr': '(1e-07)', 'verbose': '(2)', 'mode': '"""min"""'}), "(monitor='val_loss', factor=0.5, patience=20, min_lr=1e-07,\n verbose=2, mode='min')\n", (5541, 5627), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, TensorBoard\n'), ((5629, 5730), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'patience': '(80)', 'verbose': '(2)', 'restore_best_weights': '(False)'}), "(monitor='val_loss', mode='min', patience=80, verbose=2,\n restore_best_weights=False)\n", (5642, 5730), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, Callback, TensorBoard\n'), ((6105, 6133), 'tensorflow.keras.layers.Input', 'Input', (['(nb, num_patch, 2048)'], {}), '((nb, num_patch, 2048))\n', (6110, 6133), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((6141, 6166), 'keras.models.load_model', 'load_model', (['sp_pretrained'], {}), '(sp_pretrained)\n', (6151, 6166), False, 'from keras.models import load_model\n'), ((6235, 6293), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': 'model.layers[-3].output'}), '(inputs=model.input, outputs=model.layers[-3].output)\n', (6240, 6293), False, 'from tensorflow.keras.models import Model\n'), ((2329, 2369), 'numpy.divide', 'np.divide', (['(bayta1 - bayta2)', 'logisticPart'], {}), '(bayta1 - bayta2, logisticPart)\n', (2338, 2369), True, 'import numpy as np\n'), ((3660, 3713), 'tensorflow.keras.layers.TimeDistributed', 'TimeDistributed', (['model_final'], {'input_shape': 'batch_shape'}), '(model_final, input_shape=batch_shape)\n', (3675, 3713), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((4166, 4175), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4173, 4175), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((4301, 4325), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (4315, 4325), False, 'from tensorflow.keras import layers\n'), ((4340, 4355), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (4352, 4355), False, 'from tensorflow.keras import layers\n'), ((4369, 4396), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""linear"""'], {}), "('linear')\n", (4386, 4396), False, 'from tensorflow.keras import layers\n'), ((5290, 5316), 'os.path.exists', 'os.path.exists', (['"""./models"""'], {}), "('./models')\n", (5304, 5316), False, 'import os\n'), ((5320, 5343), 'os.makedirs', 'os.makedirs', (['"""./models"""'], {}), "('./models')\n", (5331, 5343), False, 'import os\n'), ((5748, 5771), 'tqdm.keras.TqdmCallback', 'TqdmCallback', ([], {'verbose': '(2)'}), '(verbose=2)\n', (5760, 5771), False, 'from tqdm.keras import TqdmCallback\n'), ((1803, 1839), 'numpy.zeros', 'np.zeros', (['(batch_size, 30, 25, 2560)'], {}), '((batch_size, 30, 25, 2560))\n', (1811, 1839), True, 'import numpy as np\n'), ((1860, 1885), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (1868, 1885), True, 'import numpy as np\n'), ((3744, 3888), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)', 'kernel_initializer': '"""random_normal"""', 'recurrent_initializer': '"""random_normal"""', 'dropout': '(0.4)', 'recurrent_dropout': '(0)'}), "(64, return_sequences=True, kernel_initializer='random_normal',\n recurrent_initializer='random_normal', dropout=0.4, recurrent_dropout=0)\n", (3748, 3888), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((3946, 4090), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)', 'kernel_initializer': '"""random_normal"""', 'recurrent_initializer': '"""random_normal"""', 'dropout': '(0.4)', 'recurrent_dropout': '(0)'}), "(64, return_sequences=True, kernel_initializer='random_normal',\n recurrent_initializer='random_normal', dropout=0.4, recurrent_dropout=0)\n", (3950, 4090), False, 'from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, Dropout, Input, concatenate, Conv2D, Reshape, GlobalMaxPooling2D, Flatten, GlobalAveragePooling2D, AveragePooling2D, Lambda, MaxPooling2D, TimeDistributed, Bidirectional, LSTM\n'), ((4425, 4442), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (4440, 4442), False, 'from tensorflow.keras import optimizers\n'), ((1958, 1986), 'numpy.load', 'np.load', (['batch_samples[i][0]'], {}), '(batch_samples[i][0])\n', (1965, 1986), True, 'import numpy as np\n'), ((2016, 2044), 'numpy.load', 'np.load', (['batch_samples[i][1]'], {}), '(batch_samples[i][1])\n', (2023, 2044), True, 'import numpy as np\n'), ((4238, 4286), 'tensorflow.keras.initializers.RandomNormal', 'tf.keras.initializers.RandomNormal', ([], {'stddev': '(0.001)'}), '(stddev=0.001)\n', (4272, 4286), True, 'import tensorflow as tf\n'), ((2293, 2307), 'numpy.abs', 'np.abs', (['bayta4'], {}), '(bayta4)\n', (2299, 2307), True, 'import numpy as np\n')]
|
#
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
import unittest
import pytest # for pytest filterwarnings
import numpy as np
from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder
from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder
from sionna.fec.polar.decoding import Polar5GDecoder
from sionna.fec.crc import CRCEncoder
from sionna.fec.utils import GaussianPriorSource
from sionna.utils import BinarySource
from sionna.fec.polar.utils import generate_5g_ranking
class TestPolarDecodingSC(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCDecoder(frozen_pos, n+1)
# test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
PolarSCDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output equals all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
c = -10. * np.ones([bs, p[1]]) # all-zero with BPSK (no noise);logits
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero llr input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
def test_identity(self):
"""test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarSCDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCDecoder(frozen_pos, n)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and xla is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarSCDecoder(frozen_pos, n)
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
dec = PolarSCDecoder(frozen_pos, n)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_dtype_flexible(self):
"""Test that output_dtype can be flexible."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingSCL(unittest.TestCase):
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarSCLDecoder(frozen_pos, n+1)
# also test valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarSCLDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarSCLDecoder(frozen_pos, 64, output_dtype=tf.complex64)
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is the all-zero
codeword."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
# check shape
self.assertTrue(u.shape[-1]==p[0])
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
# also test different list sizes
n = 32
k = 16
frozen_pos, _ = generate_5g_ranking(k, n)
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, n])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==k)
# also check that all-zero input yields all-zero
u_hat = np.zeros([bs, k])
self.assertTrue(np.array_equal(u, u_hat))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_numerical_stab(self):
"""Test for numerical stability (no nan or infty as output)"""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = GaussianPriorSource()
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
# case 1: extremely large inputs
c = source([[bs, p[1]], 0.0001])
# llrs
u1 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u1)))
#no inftfy
self.assertFalse(np.any(np.isinf(u1)))
self.assertFalse(np.any(np.isneginf(u1)))
# case 2: zero input
c = tf.zeros([bs, p[1]])
# llrs
u2 = dec(c).numpy()
# no nan
self.assertFalse(np.any(np.isnan(u2)))
#no inftfy
self.assertFalse(np.any(np.isinf(u2)))
self.assertFalse(np.any(np.isneginf(u2)))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k,n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256]]
source = BinarySource()
# use_hybrid, use_fast_scl, cpu_only, use_scatter
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
p[1],
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(),
u_hat.numpy()))
# also test different list sizes
n = 32
k = 16
crc_degree = "CRC11"
frozen_pos, _ = generate_5g_ranking(k, n)
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
u = source([bs, k-enc_crc.crc_length])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 200.*(2.*c-1) # demod BPSK witout noise
list_sizes = [1, 2, 8, 32]
for list_size in list_sizes:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(),
u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 16
n = 32
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against multi-dimensional input shapes.
As reshaping is done before calling the actual decoder, no exhaustive
testing against all decoder options is required.
"""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarSCLDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 78
n = 128
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
b = source([1,15,n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
bs = 10
k = 16
n = 32
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
crc_degrees = [None, "CRC11"]
for crc_degree in crc_degrees:
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
dec = PolarSCLDecoder(frozen_pos,
n,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter,
crc_degree=crc_degree)
# test that for arbitrary input only binary values are
# returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
if not cpu_only: # cpu only does not support XLA
# run same test for XLA (jit_compile=True)
u = source([bs, n])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs+1, n])
x = run_graph_xla(u).numpy()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_ref_implementation(self):
"""Test against pre-calculated results from internal implementation.
Also verifies that all decoding options yield same results.
Remark: results are for SC only, i.e., list_size=1.
"""
ref_path = '../test/codes/polar/'
filename = ["P_128_37", "P_128_110", "P_256_128"]
for f in filename:
A = np.load(ref_path + f + "_Avec.npy")
llr_ch = np.load(ref_path + f + "_Lch.npy")
u_hat = np.load(ref_path + f + "_uhat.npy")
frozen_pos = np.array(np.where(A==0)[0])
info_pos = np.array(np.where(A==1)[0])
n = len(frozen_pos) + len(info_pos)
k = len(info_pos)
for use_fast_scl in [False, True]:
for cpu_only in [False, True]:
for use_scatter in [False, True]:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=1,
use_fast_scl=use_fast_scl,
cpu_only=cpu_only,
use_scatter=use_scatter)
l_in = -1. * llr_ch # logits
u_hat_tf = dec(l_in).numpy()
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_tf, u_hat))
def test_hybrid_scl(self):
"""Verify hybrid SC decoding option.
Remark: XLA is currently not supported.
"""
bs = 10
n = 32
k = 16
crc_degree = "CRC11"
list_sizes = [1, 2, 8, 32]
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
enc = PolarEncoder(frozen_pos, n)
enc_crc = CRCEncoder(crc_degree)
k_crc = enc_crc.crc_length
u = source([bs, k-k_crc])
u_crc = enc_crc(u)
c = enc(u_crc)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
for list_size in list_sizes:
dec = PolarSCLDecoder(frozen_pos,
n,
list_size=list_size,
use_hybrid_sc=True,
crc_degree=crc_degree)
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u_crc.numpy(), u_hat.numpy()))
# verify that graph can be executed
@tf.function
def run_graph(u):
return dec(u)
u = source([bs, n])
# execute the graph twice
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
def test_dtype_flexible(self):
"""Test that output_dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex-valued inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarSCLDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecodingBP(unittest.TestCase):
"""Test Polar BP decoder."""
def test_invalid_inputs(self):
"""Test against invalid values of n and frozen_pos."""
# frozen vec to long
n = 32
frozen_pos = np.arange(n+1)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n)
# n not a pow of 2
# frozen vec to long
n = 32
k = 12
frozen_pos,_ = generate_5g_ranking(k, n)
with self.assertRaises(AssertionError):
PolarBPDecoder(frozen_pos, n+1)
# test also valid shapes
# (k, n)
param_valid = [[0, 32], [10, 32], [32, 32], [100, 256],
[123, 1024], [1024, 1024]]
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
PolarBPDecoder(frozen_pos, p[1])
# no complex-valued input allowed
with self.assertRaises(ValueError):
frozen_pos,_ = generate_5g_ranking(32, 64)
PolarBPDecoder(frozen_pos, 64, output_dtype=tf.complex64)
def test_output_dim(self):
"""Test that output dims are correct (=n) and output is all-zero
codeword."""
# batch size
bs = 10
# (k, n)
param_valid = [[1, 32],[10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for hard_out in [True, False]:
for p in param_valid:
frozen_pos, _ = generate_5g_ranking(p[0],p[1])
dec = PolarBPDecoder(frozen_pos,
p[1],
hard_out=hard_out)
# all-zero with BPSK (no noise);logits
c = -10. * np.ones([bs, p[1]])
u = dec(c).numpy()
self.assertTrue(u.shape[-1]==p[0])
if hard_out:
# also check that all-zero input yields all-zero output
u_hat = np.zeros([bs, p[0]])
self.assertTrue(np.array_equal(u, u_hat))
def test_identity(self):
"""Test that info bits can be recovered if no noise is added."""
bs = 10
# (k, n)
param_valid = [[1, 32], [10, 32], [32, 32], [100, 256], [123, 1024],
[1024, 1024]]
for p in param_valid:
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(p[0], p[1])
enc = PolarEncoder(frozen_pos, p[1])
dec = PolarBPDecoder(frozen_pos, p[1])
u = source([bs, p[0]])
c = enc(u)
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 128
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = PolarBPDecoder(frozen_pos, n)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, n])
model(b2)
model.summary()
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
n = 256
frozen_pos, _ = generate_5g_ranking(k, n)
source = BinarySource()
dec = PolarBPDecoder(frozen_pos, n)
b = source([1, 15, n])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_numerics(self):
"""Test for numerical stability with large llrs and many iterations.
"""
bs = 100
k = 120
n = 256
num_iter = 200
for hard_out in [False, True]:
frozen_pos, _ = generate_5g_ranking(k, n)
source = GaussianPriorSource()
dec = PolarBPDecoder(frozen_pos,
n,
hard_out=hard_out,
num_iter=num_iter)
b = source([[bs,n], 0.001]) # very large llrs
c = dec(b).numpy()
# all values are finite (not nan and not inf)
self.assertTrue(np.sum(np.abs(1 - np.isfinite(c)))==0)
def test_tf_fun(self):
"""Test that graph mode works and XLA is supported."""
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
bs = 10
k = 32
n = 64
num_iter = 10
source = BinarySource()
frozen_pos, _ = generate_5g_ranking(k, n)
dec = PolarBPDecoder(frozen_pos, n, num_iter=num_iter)
# test that for arbitrary input only 0,1 values are returned
u = source([bs, n])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, n])
x = run_graph(u).numpy()
x = run_graph(u).numpy()
# Currently not supported
# run same test for XLA (jit_compile=True)
#u = source([bs, n])
#x = run_graph_xla(u).numpy()
#x = run_graph_xla(u).numpy()
#u = source([bs+1, n])
#x = run_graph_xla(u).numpy()
def test_ref_implementation(self):
"""Test against Numpy reference implementation.
Test hard and soft output.
"""
def boxplus_np(x, y):
"""Check node update (boxplus) for LLRs in numpy.
See [Stimming_LLR]_ and [Hashemi_SSCL]_ for detailed equations.
"""
x_in = np.maximum(np.minimum(x, llr_max), -llr_max)
y_in = np.maximum(np.minimum(y, llr_max), -llr_max)
# avoid division for numerical stability
llr_out = np.log(1 + np.exp(x_in + y_in))
llr_out -= np.log(np.exp(x_in) + np.exp(y_in))
return llr_out
def decode_bp(llr_ch, n_iter, frozen_pos, info_pos):
n = llr_ch.shape[-1]
bs = llr_ch.shape[0]
n_stages = int(np.log2(n))
msg_r = np.zeros([bs, n_stages+1, n])
msg_l = np.zeros([bs, n_stages+1, n])
# init llr_ch
msg_l[:, n_stages, :] = -1*llr_ch.numpy()
# init frozen positions with infty
msg_r[:, 0, frozen_pos] = llr_max
# and decode
for iter in range(n_iter):
# update r messages
for s in range(n_stages):
# calc indices
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
# load messages
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# r1_out
msg_r[:, s+1, ind_1] = boxplus_np(r1_in, l2_in + r2_in)
# r2_out
msg_r[:, s+1, ind_2] = boxplus_np(r1_in, l1_in) + r2_in
# update l messages
for s in range(n_stages-1, -1, -1):
ind_range = np.arange(int(n/2))
ind_1 = ind_range * 2 - np.mod(ind_range, 2**(s))
ind_2 = ind_1 + 2**s
l1_in = msg_l[:, s+1, ind_1]
l2_in = msg_l[:, s+1, ind_2]
r1_in = msg_r[:, s, ind_1]
r2_in = msg_r[:, s, ind_2]
# l1_out
msg_l[:, s, ind_1] = boxplus_np(l1_in, l2_in + r2_in)
# l2_out
msg_l[:, s, ind_2] = boxplus_np(r1_in, l1_in) + l2_in
# recover u_hat
u_hat_soft = msg_l[:, 0, info_pos]
u_hat = 0.5 * (1 - np.sign(u_hat_soft))
return u_hat, u_hat_soft
# generate llr_ch
noise_var = 0.3
num_iters = [5, 10, 20, 40]
llr_max = 19.3
bs = 100
n = 128
k = 64
frozen_pos, info_pos = generate_5g_ranking(k, n)
for num_iter in num_iters:
source = GaussianPriorSource()
llr_ch = source([[bs, n], noise_var])
# and decode
dec_bp = PolarBPDecoder(frozen_pos, n,
hard_out=True, num_iter=num_iter)
dec_bp_soft = PolarBPDecoder(frozen_pos, n,
hard_out=False, num_iter=num_iter)
u_hat_bp = dec_bp(llr_ch).numpy()
u_hat_bp_soft = dec_bp_soft(llr_ch,).numpy()
# and run BP decoder
u_hat_ref, u_hat_ref_soft = decode_bp(llr_ch,
num_iter,
frozen_pos,
info_pos)
# the output should be equal to the reference
self.assertTrue(np.array_equal(u_hat_bp, u_hat_ref))
self.assertTrue(np.allclose(-u_hat_bp_soft,
u_hat_ref_soft,
rtol=5e-2,
atol=5e-3))
def test_dtype_flexible(self):
"""Test that output dtype is variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
frozen_pos, _ = generate_5g_ranking(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = PolarBPDecoder(frozen_pos, n, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = PolarBPDecoder(frozen_pos, n, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
class TestPolarDecoding5G(unittest.TestCase):
def test_invalid_inputs(self):
"""Test against invalid input values.
Note: consistency of code parameters is already checked by the encoder.
"""
enc = Polar5GEncoder(40, 60)
with self.assertRaises(AssertionError):
Polar5GDecoder(enc, dec_type=1)
with self.assertRaises(ValueError):
Polar5GDecoder(enc, dec_type="ABC")
with self.assertRaises(AssertionError):
Polar5GDecoder("SC")
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_identity_de_ratematching(self):
"""Test that info bits can be recovered if no noise is added and
dimensions are correct."""
bs = 10
# (k,n)
param_valid = [[12, 32], [20, 32], [100, 257], [123, 897],
[1013, 1088]]
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for p in param_valid:
for dec_type in dec_types:
source = BinarySource()
enc = Polar5GEncoder(p[0], p[1])
dec = Polar5GDecoder(enc, dec_type=dec_type)
u = source([bs, p[0]])
c = enc(u)
self.assertTrue(c.numpy().shape[-1]==p[1])
llr_ch = 20.*(2.*c-1) # demod BPSK witout noise
u_hat = dec(llr_ch)
self.assertTrue(np.array_equal(u.numpy(), u_hat.numpy()))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_keras(self):
"""Test that Keras model can be compiled (supports dynamic shapes)."""
bs = 10
k = 100
n = 145
source = BinarySource()
enc = Polar5GEncoder(k, n)
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
inputs = tf.keras.Input(shape=(n), dtype=tf.float32)
x = Polar5GDecoder(enc, dec_type=dec_type)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs,n])
model(b)
# call twice to see that bs can change
b2 = source([bs+1,n])
model(b2)
model.summary()
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_multi_dimensional(self):
"""Test against arbitrary shapes."""
k = 120
n = 237
enc = Polar5GEncoder(k, n)
source = BinarySource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
b = source([100, n])
b_res = tf.reshape(b, [4, 5, 5, n])
# encode 2D Tensor
c = dec(b).numpy()
# encode 4D Tensor
c_res = dec(b_res).numpy()
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100, k])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
# Filter warnings related to large ressource allocation
@pytest.mark.filterwarnings("ignore: Required ressource allocation")
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 95
n = 145
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
dec_types = ["SC", "SCL", "hybSCL", "BP"]
for dec_type in dec_types:
dec = Polar5GDecoder(enc, dec_type=dec_type)
llr = source([[1,4,n], 0.5])
llr_rep = tf.tile(llr, [bs, 1, 1])
# and run tf version (to be tested)
c = dec(llr_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_tf_fun(self):
"""Test that tf.function decorator works
include xla compiler test."""
bs = 10
k = 45
n = 67
enc = Polar5GEncoder(k, n)
source = GaussianPriorSource()
# hybSCL does not support graph mode!
dec_types = ["SC", "SCL", "BP"]
for dec_type in dec_types:
print(dec_type)
dec = Polar5GDecoder(enc, dec_type=dec_type)
@tf.function
def run_graph(u):
return dec(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return dec(u)
# test that for arbitrary input only binary values are returned
u = source([[bs, n], 0.5])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([[bs+1, n], 0.5])
x = run_graph(u).numpy()
# run same test for XLA (jit_compile=True)
# BP does currently not support XLA
if dec_type != "BP":
u = source([[bs, n], 0.5])
x = run_graph_xla(u).numpy()
x = run_graph_xla(u).numpy()
u = source([[bs+1, n], 0.5])
x = run_graph_xla(u).numpy()
def test_dtype_flexible(self):
"""Test that output dtype can be variable."""
batch_size = 100
k = 30
n = 64
source = GaussianPriorSource()
enc = Polar5GEncoder(k, n)
dtypes_supported = (tf.float16, tf.float32, tf.float64)
for dt_in in dtypes_supported:
for dt_out in dtypes_supported:
llr = source([[batch_size, n], 0.5])
llr = tf.cast(llr, dt_in)
dec = Polar5GDecoder(enc, output_dtype=dt_out)
x = dec(llr)
self.assertTrue(x.dtype==dt_out)
# test that complex inputs raise error
llr = source([[batch_size, n], 0.5])
llr_c = tf.complex(llr, tf.zeros_like(llr))
dec = Polar5GDecoder(enc, output_dtype=tf.float32)
with self.assertRaises(TypeError):
x = dec(llr_c)
|
[
"numpy.load",
"numpy.array_equal",
"sionna.fec.polar.utils.generate_5g_ranking",
"numpy.allclose",
"tensorflow.reshape",
"tensorflow.zeros_like",
"numpy.ones",
"numpy.isnan",
"sionna.fec.polar.decoding.Polar5GDecoder",
"numpy.arange",
"numpy.exp",
"sionna.fec.polar.decoding.PolarSCLDecoder",
"sionna.fec.polar.decoding.PolarSCDecoder",
"sys.path.append",
"tensorflow.keras.Input",
"sionna.fec.polar.encoding.PolarEncoder",
"numpy.isfinite",
"tensorflow.cast",
"sionna.fec.polar.encoding.Polar5GEncoder",
"numpy.minimum",
"numpy.log2",
"numpy.isneginf",
"sionna.fec.polar.decoding.PolarBPDecoder",
"tensorflow.config.experimental.set_memory_growth",
"numpy.isinf",
"numpy.mod",
"tensorflow.keras.Model",
"tensorflow.tile",
"sionna.utils.BinarySource",
"sionna.fec.crc.CRCEncoder",
"sionna.fec.utils.GaussianPriorSource",
"tensorflow.config.set_visible_devices",
"tensorflow.config.list_physical_devices",
"numpy.zeros",
"tensorflow.zeros",
"numpy.where",
"numpy.sign",
"tensorflow.function",
"pytest.mark.filterwarnings"
] |
[((267, 305), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (298, 305), True, 'import tensorflow as tf\n'), ((9161, 9228), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (9187, 9228), False, 'import pytest\n'), ((10308, 10375), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (10334, 10375), False, 'import pytest\n'), ((12882, 12949), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (12908, 12949), False, 'import pytest\n'), ((14689, 14756), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (14715, 14756), False, 'import pytest\n'), ((18282, 18349), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (18308, 18349), False, 'import pytest\n'), ((22085, 22152), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (22111, 22152), False, 'import pytest\n'), ((37736, 37803), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (37762, 37803), False, 'import pytest\n'), ((38730, 38797), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (38756, 38797), False, 'import pytest\n'), ((39548, 39615), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (39574, 39615), False, 'import pytest\n'), ((40416, 40483), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore: Required ressource allocation"""'], {}), "('ignore: Required ressource allocation')\n", (40442, 40483), False, 'import pytest\n'), ((213, 235), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (228, 235), False, 'import sys\n'), ((426, 477), 'tensorflow.config.set_visible_devices', 'tf.config.set_visible_devices', (['gpus[gpu_num]', '"""GPU"""'], {}), "(gpus[gpu_num], 'GPU')\n", (455, 477), True, 'import tensorflow as tf\n'), ((537, 598), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[gpu_num]', '(True)'], {}), '(gpus[gpu_num], True)\n', (577, 598), True, 'import tensorflow as tf\n'), ((1323, 1339), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (1332, 1339), True, 'import numpy as np\n'), ((1538, 1563), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (1557, 1563), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((3129, 3150), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (3148, 3150), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((4831, 4845), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (4843, 4845), False, 'from sionna.utils import BinarySource\n'), ((4870, 4895), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (4889, 4895), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((4913, 4954), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (4927, 4954), True, 'import tensorflow as tf\n'), ((5023, 5063), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (5037, 5063), True, 'import tensorflow as tf\n'), ((5381, 5406), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (5400, 5406), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((5424, 5438), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (5436, 5438), False, 'from sionna.utils import BinarySource\n'), ((5453, 5482), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (5467, 5482), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((5529, 5556), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (5539, 5556), True, 'import tensorflow as tf\n'), ((5724, 5751), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (5734, 5751), True, 'import tensorflow as tf\n'), ((6042, 6067), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (6061, 6067), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((6085, 6099), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (6097, 6099), False, 'from sionna.utils import BinarySource\n'), ((6114, 6143), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (6128, 6143), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((6190, 6212), 'tensorflow.tile', 'tf.tile', (['b', '[bs, 1, 1]'], {}), '(b, [bs, 1, 1])\n', (6197, 6212), True, 'import tensorflow as tf\n'), ((6558, 6587), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (6569, 6587), True, 'import tensorflow as tf\n'), ((6710, 6724), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (6722, 6724), False, 'from sionna.utils import BinarySource\n'), ((6749, 6774), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (6768, 6774), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((6789, 6818), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (6803, 6818), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((8285, 8306), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (8304, 8306), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((8331, 8356), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (8350, 8356), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((8920, 8974), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {'output_dtype': 'tf.float32'}), '(frozen_pos, n, output_dtype=tf.float32)\n', (8934, 8974), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((9393, 9409), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (9402, 9409), True, 'import numpy as np\n'), ((9609, 9634), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (9628, 9634), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((11808, 11833), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (11827, 11833), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((13172, 13193), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (13191, 13193), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((14974, 14988), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (14986, 14988), False, 'from sionna.utils import BinarySource\n'), ((16059, 16084), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (16078, 16084), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((16099, 16126), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (16111, 16126), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((16145, 16167), 'sionna.fec.crc.CRCEncoder', 'CRCEncoder', (['crc_degree'], {}), '(crc_degree)\n', (16155, 16167), False, 'from sionna.fec.crc import CRCEncoder\n'), ((18649, 18674), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (18668, 18674), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((18692, 18706), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (18704, 18706), False, 'from sionna.utils import BinarySource\n'), ((18721, 18751), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (18736, 18751), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((18798, 18825), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (18808, 18825), True, 'import tensorflow as tf\n'), ((18993, 19020), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (19003, 19020), True, 'import tensorflow as tf\n'), ((19309, 19334), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (19328, 19334), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((19352, 19366), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (19364, 19366), False, 'from sionna.utils import BinarySource\n'), ((20244, 20258), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (20256, 20258), False, 'from sionna.utils import BinarySource\n'), ((20283, 20308), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (20302, 20308), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((23910, 23935), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (23929, 23935), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((23953, 23967), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (23965, 23967), False, 'from sionna.utils import BinarySource\n'), ((23982, 24009), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (23994, 24009), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((24028, 24050), 'sionna.fec.crc.CRCEncoder', 'CRCEncoder', (['crc_degree'], {}), '(crc_degree)\n', (24038, 24050), False, 'from sionna.fec.crc import CRCEncoder\n'), ((25157, 25178), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (25176, 25178), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((25203, 25228), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (25222, 25228), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((25793, 25848), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'output_dtype': 'tf.float32'}), '(frozen_pos, n, output_dtype=tf.float32)\n', (25808, 25848), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26165, 26181), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (26174, 26181), True, 'import numpy as np\n'), ((26380, 26405), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (26399, 26405), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((28865, 28879), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (28877, 28879), False, 'from sionna.utils import BinarySource\n'), ((28904, 28929), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (28923, 28929), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((28947, 28988), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (28961, 28988), True, 'import tensorflow as tf\n'), ((29057, 29097), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (29071, 29097), True, 'import tensorflow as tf\n'), ((29406, 29431), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (29425, 29431), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((29449, 29463), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (29461, 29463), False, 'from sionna.utils import BinarySource\n'), ((29478, 29507), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (29492, 29507), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((29554, 29581), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (29564, 29581), True, 'import tensorflow as tf\n'), ((29749, 29776), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (29759, 29776), True, 'import tensorflow as tf\n'), ((30067, 30092), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (30086, 30092), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((30110, 30124), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (30122, 30124), False, 'from sionna.utils import BinarySource\n'), ((30139, 30168), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (30153, 30168), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((30217, 30239), 'tensorflow.tile', 'tf.tile', (['b', '[bs, 1, 1]'], {}), '(b, [bs, 1, 1])\n', (30224, 30239), True, 'import tensorflow as tf\n'), ((31316, 31345), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (31327, 31345), True, 'import tensorflow as tf\n'), ((31488, 31502), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (31500, 31502), False, 'from sionna.utils import BinarySource\n'), ((31527, 31552), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (31546, 31552), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((31567, 31615), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'num_iter': 'num_iter'}), '(frozen_pos, n, num_iter=num_iter)\n', (31581, 31615), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((35087, 35112), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (35106, 35112), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((36392, 36413), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (36411, 36413), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((36438, 36463), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (36457, 36463), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((37020, 37074), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'output_dtype': 'tf.float32'}), '(frozen_pos, n, output_dtype=tf.float32)\n', (37034, 37074), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((37382, 37404), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['(40)', '(60)'], {}), '(40, 60)\n', (37396, 37404), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((38969, 38983), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (38981, 38983), False, 'from sionna.utils import BinarySource\n'), ((38998, 39018), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (39012, 39018), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((39747, 39767), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (39761, 39767), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((39785, 39799), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (39797, 39799), False, 'from sionna.utils import BinarySource\n'), ((40663, 40683), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (40677, 40683), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((40701, 40722), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (40720, 40722), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((41319, 41339), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (41333, 41339), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((41357, 41378), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (41376, 41378), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((42648, 42669), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (42667, 42669), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((42684, 42704), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['k', 'n'], {}), '(k, n)\n', (42698, 42704), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((43251, 43295), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'output_dtype': 'tf.float32'}), '(enc, output_dtype=tf.float32)\n', (43265, 43295), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((1398, 1427), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (1412, 1427), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((1624, 1657), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', '(n + 1)'], {}), '(frozen_pos, n + 1)\n', (1638, 1657), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((1875, 1906), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (1894, 1906), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((1919, 1951), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (1933, 1951), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((2066, 2093), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['(32)', '(64)'], {}), '(32, 64)\n', (2085, 2093), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((2106, 2163), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', '(64)'], {'output_dtype': 'tf.complex64'}), '(frozen_pos, 64, output_dtype=tf.complex64)\n', (2120, 2163), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((2502, 2533), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (2521, 2533), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((2551, 2583), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (2565, 2583), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((2832, 2852), 'numpy.zeros', 'np.zeros', (['[bs, p[0]]'], {}), '([bs, p[0]])\n', (2840, 2852), True, 'import numpy as np\n'), ((3210, 3241), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (3229, 3241), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((3259, 3291), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (3273, 3291), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((3688, 3708), 'tensorflow.zeros', 'tf.zeros', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (3696, 3708), True, 'import tensorflow as tf\n'), ((4263, 4277), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (4275, 4277), False, 'from sionna.utils import BinarySource\n'), ((4306, 4337), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (4325, 4337), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((4355, 4385), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (4367, 4385), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((4404, 4436), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (4418, 4436), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((4969, 4998), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (4983, 4998), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((5824, 5848), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (5838, 5848), True, 'import numpy as np\n'), ((7540, 7575), 'numpy.load', 'np.load', (["(ref_path + f + '_Avec.npy')"], {}), "(ref_path + f + '_Avec.npy')\n", (7547, 7575), True, 'import numpy as np\n'), ((7597, 7631), 'numpy.load', 'np.load', (["(ref_path + f + '_Lch.npy')"], {}), "(ref_path + f + '_Lch.npy')\n", (7604, 7631), True, 'import numpy as np\n'), ((7652, 7687), 'numpy.load', 'np.load', (["(ref_path + f + '_uhat.npy')"], {}), "(ref_path + f + '_uhat.npy')\n", (7659, 7687), True, 'import numpy as np\n'), ((7890, 7919), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (7904, 7919), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((8886, 8904), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (8899, 8904), True, 'import tensorflow as tf\n'), ((9468, 9498), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (9483, 9498), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((9695, 9729), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', '(n + 1)'], {}), '(frozen_pos, n + 1)\n', (9710, 9729), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((9952, 9983), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (9971, 9983), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((9995, 10028), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (10010, 10028), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((10143, 10170), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['(32)', '(64)'], {}), '(32, 64)\n', (10162, 10170), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((10183, 10241), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', '(64)'], {'output_dtype': 'tf.complex64'}), '(frozen_pos, 64, output_dtype=tf.complex64)\n', (10198, 10241), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((10770, 10801), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (10789, 10801), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((13253, 13284), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (13272, 13284), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((15106, 15137), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (15125, 15137), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((15156, 15186), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (15168, 15186), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((19093, 19117), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (19107, 19117), True, 'import numpy as np\n'), ((22556, 22591), 'numpy.load', 'np.load', (["(ref_path + f + '_Avec.npy')"], {}), "(ref_path + f + '_Avec.npy')\n", (22563, 22591), True, 'import numpy as np\n'), ((22613, 22647), 'numpy.load', 'np.load', (["(ref_path + f + '_Lch.npy')"], {}), "(ref_path + f + '_Lch.npy')\n", (22620, 22647), True, 'import numpy as np\n'), ((22668, 22703), 'numpy.load', 'np.load', (["(ref_path + f + '_uhat.npy')"], {}), "(ref_path + f + '_uhat.npy')\n", (22675, 22703), True, 'import numpy as np\n'), ((24283, 24381), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': 'list_size', 'use_hybrid_sc': '(True)', 'crc_degree': 'crc_degree'}), '(frozen_pos, n, list_size=list_size, use_hybrid_sc=True,\n crc_degree=crc_degree)\n', (24298, 24381), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((25759, 25777), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (25772, 25777), True, 'import tensorflow as tf\n'), ((26240, 26269), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (26254, 26269), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26466, 26499), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', '(n + 1)'], {}), '(frozen_pos, n + 1)\n', (26480, 26499), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26722, 26753), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (26741, 26753), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((26765, 26797), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (26779, 26797), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((26912, 26939), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['(32)', '(64)'], {}), '(32, 64)\n', (26931, 26939), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((26952, 27009), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', '(64)'], {'output_dtype': 'tf.complex64'}), '(frozen_pos, 64, output_dtype=tf.complex64)\n', (26966, 27009), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((28296, 28310), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (28308, 28310), False, 'from sionna.utils import BinarySource\n'), ((28339, 28370), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (28358, 28370), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((28389, 28419), 'sionna.fec.polar.encoding.PolarEncoder', 'PolarEncoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (28401, 28419), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((28438, 28470), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'p[1]'], {}), '(frozen_pos, p[1])\n', (28452, 28470), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((29003, 29032), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {}), '(frozen_pos, n)\n', (29017, 29032), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((29849, 29873), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (29863, 29873), True, 'import numpy as np\n'), ((30669, 30694), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (30688, 30694), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((30716, 30737), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (30735, 30737), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((30756, 30823), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'hard_out': 'hard_out', 'num_iter': 'num_iter'}), '(frozen_pos, n, hard_out=hard_out, num_iter=num_iter)\n', (30770, 30823), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((33045, 33076), 'numpy.zeros', 'np.zeros', (['[bs, n_stages + 1, n]'], {}), '([bs, n_stages + 1, n])\n', (33053, 33076), True, 'import numpy as np\n'), ((33095, 33126), 'numpy.zeros', 'np.zeros', (['[bs, n_stages + 1, n]'], {}), '([bs, n_stages + 1, n])\n', (33103, 33126), True, 'import numpy as np\n'), ((35171, 35192), 'sionna.fec.utils.GaussianPriorSource', 'GaussianPriorSource', ([], {}), '()\n', (35190, 35192), False, 'from sionna.fec.utils import GaussianPriorSource\n'), ((35290, 35353), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'hard_out': '(True)', 'num_iter': 'num_iter'}), '(frozen_pos, n, hard_out=True, num_iter=num_iter)\n', (35304, 35353), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((35416, 35480), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'hard_out': '(False)', 'num_iter': 'num_iter'}), '(frozen_pos, n, hard_out=False, num_iter=num_iter)\n', (35430, 35480), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((36986, 37004), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (36999, 37004), True, 'import tensorflow as tf\n'), ((37465, 37496), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': '(1)'}), '(enc, dec_type=1)\n', (37479, 37496), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((37553, 37588), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': '"""ABC"""'}), "(enc, dec_type='ABC')\n", (37567, 37588), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((37649, 37669), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['"""SC"""'], {}), "('SC')\n", (37663, 37669), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((39126, 39167), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (39140, 39167), True, 'import tensorflow as tf\n'), ((39253, 39293), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (39267, 39293), True, 'import tensorflow as tf\n'), ((39904, 39942), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (39918, 39942), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((39997, 40024), 'tensorflow.reshape', 'tf.reshape', (['b', '[4, 5, 5, n]'], {}), '(b, [4, 5, 5, n])\n', (40007, 40024), True, 'import tensorflow as tf\n'), ((40216, 40243), 'tensorflow.reshape', 'tf.reshape', (['c_res', '[100, k]'], {}), '(c_res, [100, k])\n', (40226, 40243), True, 'import tensorflow as tf\n'), ((40827, 40865), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (40841, 40865), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((40930, 40954), 'tensorflow.tile', 'tf.tile', (['llr', '[bs, 1, 1]'], {}), '(llr, [bs, 1, 1])\n', (40937, 40954), True, 'import tensorflow as tf\n'), ((41547, 41585), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (41561, 41585), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((41686, 41715), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (41697, 41715), True, 'import tensorflow as tf\n'), ((43217, 43235), 'tensorflow.zeros_like', 'tf.zeros_like', (['llr'], {}), '(llr)\n', (43230, 43235), True, 'import tensorflow as tf\n'), ((2607, 2626), 'numpy.ones', 'np.ones', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (2614, 2626), True, 'import numpy as np\n'), ((2881, 2905), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (2895, 2905), True, 'import numpy as np\n'), ((6346, 6384), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (6360, 6384), True, 'import numpy as np\n'), ((8089, 8120), 'numpy.array_equal', 'np.array_equal', (['u_hat_tf', 'u_hat'], {}), '(u_hat_tf, u_hat)\n', (8103, 8120), True, 'import numpy as np\n'), ((8580, 8599), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (8587, 8599), True, 'import tensorflow as tf\n'), ((8623, 8673), 'sionna.fec.polar.decoding.PolarSCDecoder', 'PolarSCDecoder', (['frozen_pos', 'n'], {'output_dtype': 'dt_out'}), '(frozen_pos, n, output_dtype=dt_out)\n', (8637, 8673), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((25452, 25471), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (25459, 25471), True, 'import tensorflow as tf\n'), ((25495, 25546), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'output_dtype': 'dt_out'}), '(frozen_pos, n, output_dtype=dt_out)\n', (25510, 25546), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((27410, 27441), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (27429, 27441), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((27463, 27514), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'p[1]'], {'hard_out': 'hard_out'}), '(frozen_pos, p[1], hard_out=hard_out)\n', (27477, 27514), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((30373, 30411), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (30387, 30411), True, 'import numpy as np\n'), ((32564, 32586), 'numpy.minimum', 'np.minimum', (['x', 'llr_max'], {}), '(x, llr_max)\n', (32574, 32586), True, 'import numpy as np\n'), ((32628, 32650), 'numpy.minimum', 'np.minimum', (['y', 'llr_max'], {}), '(y, llr_max)\n', (32638, 32650), True, 'import numpy as np\n'), ((33012, 33022), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (33019, 33022), True, 'import numpy as np\n'), ((35981, 36016), 'numpy.array_equal', 'np.array_equal', (['u_hat_bp', 'u_hat_ref'], {}), '(u_hat_bp, u_hat_ref)\n', (35995, 36016), True, 'import numpy as np\n'), ((36046, 36112), 'numpy.allclose', 'np.allclose', (['(-u_hat_bp_soft)', 'u_hat_ref_soft'], {'rtol': '(0.05)', 'atol': '(0.005)'}), '(-u_hat_bp_soft, u_hat_ref_soft, rtol=0.05, atol=0.005)\n', (36057, 36112), True, 'import numpy as np\n'), ((36687, 36706), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (36694, 36706), True, 'import tensorflow as tf\n'), ((36730, 36780), 'sionna.fec.polar.decoding.PolarBPDecoder', 'PolarBPDecoder', (['frozen_pos', 'n'], {'output_dtype': 'dt_out'}), '(frozen_pos, n, output_dtype=dt_out)\n', (36744, 36780), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((38238, 38252), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (38250, 38252), False, 'from sionna.utils import BinarySource\n'), ((38275, 38301), 'sionna.fec.polar.encoding.Polar5GEncoder', 'Polar5GEncoder', (['p[0]', 'p[1]'], {}), '(p[0], p[1])\n', (38289, 38301), False, 'from sionna.fec.polar.encoding import PolarEncoder, Polar5GEncoder\n'), ((38324, 38362), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (38338, 38362), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((39186, 39224), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'dec_type': 'dec_type'}), '(enc, dec_type=dec_type)\n', (39200, 39224), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((40324, 40348), 'numpy.array_equal', 'np.array_equal', (['c', 'c_res'], {}), '(c, c_res)\n', (40338, 40348), True, 'import numpy as np\n'), ((42928, 42947), 'tensorflow.cast', 'tf.cast', (['llr', 'dt_in'], {}), '(llr, dt_in)\n', (42935, 42947), True, 'import tensorflow as tf\n'), ((42971, 43011), 'sionna.fec.polar.decoding.Polar5GDecoder', 'Polar5GDecoder', (['enc'], {'output_dtype': 'dt_out'}), '(enc, output_dtype=dt_out)\n', (42985, 43011), False, 'from sionna.fec.polar.decoding import Polar5GDecoder\n'), ((3491, 3503), 'numpy.isnan', 'np.isnan', (['u1'], {}), '(u1)\n', (3499, 3503), True, 'import numpy as np\n'), ((3565, 3577), 'numpy.isinf', 'np.isinf', (['u1'], {}), '(u1)\n', (3573, 3577), True, 'import numpy as np\n'), ((3616, 3631), 'numpy.isneginf', 'np.isneginf', (['u1'], {}), '(u1)\n', (3627, 3631), True, 'import numpy as np\n'), ((3817, 3829), 'numpy.isnan', 'np.isnan', (['u2'], {}), '(u2)\n', (3825, 3829), True, 'import numpy as np\n'), ((3891, 3903), 'numpy.isinf', 'np.isinf', (['u2'], {}), '(u2)\n', (3899, 3903), True, 'import numpy as np\n'), ((3942, 3957), 'numpy.isneginf', 'np.isneginf', (['u2'], {}), '(u2)\n', (3953, 3957), True, 'import numpy as np\n'), ((7722, 7738), 'numpy.where', 'np.where', (['(A == 0)'], {}), '(A == 0)\n', (7730, 7738), True, 'import numpy as np\n'), ((7773, 7789), 'numpy.where', 'np.where', (['(A == 1)'], {}), '(A == 1)\n', (7781, 7789), True, 'import numpy as np\n'), ((17467, 17481), 'sionna.utils.BinarySource', 'BinarySource', ([], {}), '()\n', (17479, 17481), False, 'from sionna.utils import BinarySource\n'), ((17518, 17543), 'sionna.fec.polar.utils.generate_5g_ranking', 'generate_5g_ranking', (['k', 'n'], {}), '(k, n)\n', (17537, 17543), False, 'from sionna.fec.polar.utils import generate_5g_ranking\n'), ((17573, 17614), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'n', 'dtype': 'tf.float32'}), '(shape=n, dtype=tf.float32)\n', (17587, 17614), True, 'import tensorflow as tf\n'), ((17939, 17979), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), '(inputs=inputs, outputs=x)\n', (17953, 17979), True, 'import tensorflow as tf\n'), ((19529, 19634), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, use_fast_scl=use_fast_scl, cpu_only=cpu_only,\n use_scatter=use_scatter)\n', (19544, 19634), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((19849, 19871), 'tensorflow.tile', 'tf.tile', (['b', '[bs, 1, 1]'], {}), '(b, [bs, 1, 1])\n', (19856, 19871), True, 'import tensorflow as tf\n'), ((22738, 22754), 'numpy.where', 'np.where', (['(A == 0)'], {}), '(A == 0)\n', (22746, 22754), True, 'import numpy as np\n'), ((22789, 22805), 'numpy.where', 'np.where', (['(A == 1)'], {}), '(A == 1)\n', (22797, 22805), True, 'import numpy as np\n'), ((27671, 27690), 'numpy.ones', 'np.ones', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (27678, 27690), True, 'import numpy as np\n'), ((27910, 27930), 'numpy.zeros', 'np.zeros', (['[bs, p[0]]'], {}), '([bs, p[0]])\n', (27918, 27930), True, 'import numpy as np\n'), ((32748, 32767), 'numpy.exp', 'np.exp', (['(x_in + y_in)'], {}), '(x_in + y_in)\n', (32754, 32767), True, 'import numpy as np\n'), ((32799, 32811), 'numpy.exp', 'np.exp', (['x_in'], {}), '(x_in)\n', (32805, 32811), True, 'import numpy as np\n'), ((32814, 32826), 'numpy.exp', 'np.exp', (['y_in'], {}), '(y_in)\n', (32820, 32826), True, 'import numpy as np\n'), ((34840, 34859), 'numpy.sign', 'np.sign', (['u_hat_soft'], {}), '(u_hat_soft)\n', (34847, 34859), True, 'import numpy as np\n'), ((41106, 41144), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (41120, 41144), True, 'import numpy as np\n'), ((10980, 11089), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, p[1], use_fast_scl=use_fast_scl, cpu_only=\n cpu_only, use_scatter=use_scatter)\n', (10995, 11089), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((11625, 11645), 'numpy.zeros', 'np.zeros', (['[bs, p[0]]'], {}), '([bs, p[0]])\n', (11633, 11645), True, 'import numpy as np\n'), ((12084, 12211), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': 'list_size', 'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, list_size=list_size, use_fast_scl=\n use_fast_scl, cpu_only=cpu_only, use_scatter=use_scatter)\n', (12099, 12211), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((12732, 12749), 'numpy.zeros', 'np.zeros', (['[bs, k]'], {}), '([bs, k])\n', (12740, 12749), True, 'import numpy as np\n'), ((13463, 13572), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, p[1], use_fast_scl=use_fast_scl, cpu_only=\n cpu_only, use_scatter=use_scatter)\n', (13478, 13572), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((14267, 14287), 'tensorflow.zeros', 'tf.zeros', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (14275, 14287), True, 'import tensorflow as tf\n'), ((15484, 15593), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'p[1]'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, p[1], use_fast_scl=use_fast_scl, cpu_only=\n cpu_only, use_scatter=use_scatter)\n', (15499, 15593), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((16572, 16727), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': 'list_size', 'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter', 'crc_degree': 'crc_degree'}), '(frozen_pos, n, list_size=list_size, use_fast_scl=\n use_fast_scl, cpu_only=cpu_only, use_scatter=use_scatter, crc_degree=\n crc_degree)\n', (16587, 16727), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((17641, 17746), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, use_fast_scl=use_fast_scl, cpu_only=cpu_only,\n use_scatter=use_scatter)\n', (17656, 17746), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((20681, 20710), 'tensorflow.function', 'tf.function', ([], {'jit_compile': '(True)'}), '(jit_compile=True)\n', (20692, 20710), True, 'import tensorflow as tf\n'), ((20829, 20957), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter', 'crc_degree': 'crc_degree'}), '(frozen_pos, n, use_fast_scl=use_fast_scl, cpu_only=cpu_only,\n use_scatter=use_scatter, crc_degree=crc_degree)\n', (20844, 20957), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((23066, 23184), 'sionna.fec.polar.decoding.PolarSCLDecoder', 'PolarSCLDecoder', (['frozen_pos', 'n'], {'list_size': '(1)', 'use_fast_scl': 'use_fast_scl', 'cpu_only': 'cpu_only', 'use_scatter': 'use_scatter'}), '(frozen_pos, n, list_size=1, use_fast_scl=use_fast_scl,\n cpu_only=cpu_only, use_scatter=use_scatter)\n', (23081, 23184), False, 'from sionna.fec.polar.decoding import PolarSCDecoder, PolarSCLDecoder, PolarBPDecoder\n'), ((27967, 27991), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (27981, 27991), True, 'import numpy as np\n'), ((33575, 33600), 'numpy.mod', 'np.mod', (['ind_range', '(2 ** s)'], {}), '(ind_range, 2 ** s)\n', (33581, 33600), True, 'import numpy as np\n'), ((34266, 34291), 'numpy.mod', 'np.mod', (['ind_range', '(2 ** s)'], {}), '(ind_range, 2 ** s)\n', (34272, 34291), True, 'import numpy as np\n'), ((11360, 11379), 'numpy.ones', 'np.ones', (['[bs, p[1]]'], {}), '([bs, p[1]])\n', (11367, 11379), True, 'import numpy as np\n'), ((11686, 11710), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (11700, 11710), True, 'import numpy as np\n'), ((12511, 12527), 'numpy.ones', 'np.ones', (['[bs, n]'], {}), '([bs, n])\n', (12518, 12527), True, 'import numpy as np\n'), ((12790, 12814), 'numpy.array_equal', 'np.array_equal', (['u', 'u_hat'], {}), '(u, u_hat)\n', (12804, 12814), True, 'import numpy as np\n'), ((20053, 20091), 'numpy.array_equal', 'np.array_equal', (['c[0, :, :]', 'c[i, :, :]'], {}), '(c[0, :, :], c[i, :, :])\n', (20067, 20091), True, 'import numpy as np\n'), ((23603, 23634), 'numpy.array_equal', 'np.array_equal', (['u_hat_tf', 'u_hat'], {}), '(u_hat_tf, u_hat)\n', (23617, 23634), True, 'import numpy as np\n'), ((14014, 14026), 'numpy.isnan', 'np.isnan', (['u1'], {}), '(u1)\n', (14022, 14026), True, 'import numpy as np\n'), ((14112, 14124), 'numpy.isinf', 'np.isinf', (['u1'], {}), '(u1)\n', (14120, 14124), True, 'import numpy as np\n'), ((14175, 14190), 'numpy.isneginf', 'np.isneginf', (['u1'], {}), '(u1)\n', (14186, 14190), True, 'import numpy as np\n'), ((14444, 14456), 'numpy.isnan', 'np.isnan', (['u2'], {}), '(u2)\n', (14452, 14456), True, 'import numpy as np\n'), ((14542, 14554), 'numpy.isinf', 'np.isinf', (['u2'], {}), '(u2)\n', (14550, 14554), True, 'import numpy as np\n'), ((14605, 14620), 'numpy.isneginf', 'np.isneginf', (['u2'], {}), '(u2)\n', (14616, 14620), True, 'import numpy as np\n'), ((31119, 31133), 'numpy.isfinite', 'np.isfinite', (['c'], {}), '(c)\n', (31130, 31133), True, 'import numpy as np\n')]
|
from scipy.integrate import odeint
from scipy.optimize import fsolve
import numpy as np
import itertools
import matplotlib.pyplot as plt
from colorlines import colorline
from matplotlib import style
class PhaseDiagram:
def __init__(self, system):
self.system = system
self.fig, self.ax = plt.subplots(1, 1)
def steady_states(self, search_space, discretization=5):
linspaces = [np.linspace(axis[0], axis[1], discretization) for axis in search_space]
guesses = list(itertools.product(*linspaces))
ss_system = lambda x: self.system(x, 0)
results = []
for guess in guesses:
calc_result, _, convergence_success, info = fsolve(ss_system, guess, full_output=True)
if convergence_success:
if len(results) == 0:
results.append(calc_result)
else:
new_guess = True
for result in results:
if all(np.isclose(calc_result, result, atol=1e-2)):
new_guess = False
if new_guess:
results.append(calc_result)
else:
print('convergence failure')
return results
def plot_trajectory(self, x0, time_sequence, ax, fade=0.1, linewidth=1):
r = odeint(f, x0, time_sequence)
colorline(x=r[:,0], y=r[:,1], ax=ax, cmap='bone_r', fade=fade, linewidth=linewidth)
# plt.plot(r[:,0], r[:,1])
def random_paths(self, n, time_sequence, x_rand_interval, y_rand_interval, fade=0.1, linewidth=1):
self.fig.subplots_adjust(
top=0.981,
bottom=0.043,
left=0.029,
right=0.981,
hspace=0.2,
wspace=0.2
)
for _ in range(n):
x_random = np.random.uniform(x_rand_interval[0], x_rand_interval[1])
y_random = np.random.uniform(y_rand_interval[0], y_rand_interval[1])
self.plot_trajectory([x_random, y_random], time_sequence=time_sequence, ax=self.ax, fade=fade, linewidth=linewidth)
plt.show()
def f(x, t):
y = np.zeros(shape=2)
y[0] = x[0] - x[1]*x[0]
y[1] = x[0]*x[1] - x[1]
return y
PD = PhaseDiagram(f)
steady_states = PD.steady_states(search_space=[[-10,40],[-10,40]])
print(steady_states)
time_sequence=np.linspace(0.1,2.5,1000)
PD.random_paths(n=150, time_sequence=time_sequence, x_rand_interval=[-.4, 1.5], y_rand_interval=[0, 2], fade=1.0)
# PD.fig.savefig('PD1.png', dpi=300)
|
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"scipy.integrate.odeint",
"numpy.zeros",
"scipy.optimize.fsolve",
"numpy.isclose",
"numpy.linspace",
"itertools.product",
"matplotlib.pyplot.subplots",
"colorlines.colorline"
] |
[((2448, 2475), 'numpy.linspace', 'np.linspace', (['(0.1)', '(2.5)', '(1000)'], {}), '(0.1, 2.5, 1000)\n', (2459, 2475), True, 'import numpy as np\n'), ((2227, 2244), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2)'}), '(shape=2)\n', (2235, 2244), True, 'import numpy as np\n'), ((324, 342), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (336, 342), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1424), 'scipy.integrate.odeint', 'odeint', (['f', 'x0', 'time_sequence'], {}), '(f, x0, time_sequence)\n', (1402, 1424), False, 'from scipy.integrate import odeint\n'), ((1434, 1524), 'colorlines.colorline', 'colorline', ([], {'x': 'r[:, 0]', 'y': 'r[:, 1]', 'ax': 'ax', 'cmap': '"""bone_r"""', 'fade': 'fade', 'linewidth': 'linewidth'}), "(x=r[:, 0], y=r[:, 1], ax=ax, cmap='bone_r', fade=fade, linewidth=\n linewidth)\n", (1443, 1524), False, 'from colorlines import colorline\n'), ((2189, 2199), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2197, 2199), True, 'import matplotlib.pyplot as plt\n'), ((429, 474), 'numpy.linspace', 'np.linspace', (['axis[0]', 'axis[1]', 'discretization'], {}), '(axis[0], axis[1], discretization)\n', (440, 474), True, 'import numpy as np\n'), ((527, 556), 'itertools.product', 'itertools.product', (['*linspaces'], {}), '(*linspaces)\n', (544, 556), False, 'import itertools\n'), ((719, 761), 'scipy.optimize.fsolve', 'fsolve', (['ss_system', 'guess'], {'full_output': '(True)'}), '(ss_system, guess, full_output=True)\n', (725, 761), False, 'from scipy.optimize import fsolve\n'), ((1911, 1968), 'numpy.random.uniform', 'np.random.uniform', (['x_rand_interval[0]', 'x_rand_interval[1]'], {}), '(x_rand_interval[0], x_rand_interval[1])\n', (1928, 1968), True, 'import numpy as np\n'), ((1993, 2050), 'numpy.random.uniform', 'np.random.uniform', (['y_rand_interval[0]', 'y_rand_interval[1]'], {}), '(y_rand_interval[0], y_rand_interval[1])\n', (2010, 2050), True, 'import numpy as np\n'), ((1030, 1072), 'numpy.isclose', 'np.isclose', (['calc_result', 'result'], {'atol': '(0.01)'}), '(calc_result, result, atol=0.01)\n', (1040, 1072), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
from torchvision import models
import numpy as np
from torch.autograd import Variable
import os
class Model:
def __init__(self, key = 'abnormal'):
self.INPUT_DIM = 224
self.MAX_PIXEL_VAL = 255
self.MEAN = 58.09
self.STDDEV = 49.73
self.model_ab=MRI_alex(False)
if key == 'abnormal':
self.model_ab.load_state_dict(torch.load(r"models/abnormal.pt", map_location='cpu'))
elif key =='acl':
self.model_ab.load_state_dict(torch.load(r"models/acl.pt", map_location='cpu'))
else:
self.model_ab.load_state_dict(torch.load(r"models/men.pt", map_location='cpu'))
self.model_ab.cuda()
def preprocess(self, series):
pad = int((series.shape[2] - self.INPUT_DIM)/2)
series = series[:,pad:-pad,pad:-pad]
series = (series-np.min(series))/(np.max(series)-np.min(series))*self.MAX_PIXEL_VAL
series = (series - self.MEAN) / self.STDDEV
series = np.stack((series,)*3, axis=1)
series_float = torch.FloatTensor(series)
return series_float
def study(self, axial_path, sagit_path, coron_path):
vol_axial = np.load(axial_path)
vol_sagit = np.load(sagit_path)
vol_coron = np.load(coron_path)
vol_axial_tensor = self.preprocess(vol_axial)
vol_sagit_tensor = self.preprocess(vol_sagit)
vol_coron_tensor = self.preprocess(vol_coron)
return {"axial": vol_axial_tensor,
"sagit": vol_sagit_tensor,
"coron": vol_coron_tensor}
def predict(self, model, tensors, abnormality_prior=None):
vol_axial = tensors["axial"].cuda()
vol_sagit = tensors["sagit"].cuda()
vol_coron = tensors["coron"].cuda()
vol_axial = Variable(vol_axial)
vol_sagit = Variable(vol_sagit)
vol_coron = Variable(vol_coron)
logit = model.forward(vol_axial, vol_sagit, vol_coron)
pred = torch.sigmoid(logit)
pred_npy = pred.data.cpu().numpy()[0][0]
if abnormality_prior:
pred_npy = pred_npy * abnormality_prior
return pred_npy
def get_prediction(self):
self.predict(self.model_ab, self.study(axial_path, coronal_path, sagittal_path))
class MRI_alex(nn.Module):
def __init__(self, training=True):
super().__init__()
self.axial_net = models.alexnet(pretrained=training)
self.sagit_net = models.alexnet(pretrained=training)
self.coron_net = models.alexnet(pretrained=training)
self.gap_axial = nn.AdaptiveAvgPool2d(1)
self.gap_sagit = nn.AdaptiveAvgPool2d(1)
self.gap_coron = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(3*256, 1)
return
def forward(self,vol_axial, vol_sagit, vol_coron):
vol_axial = torch.squeeze(vol_axial, dim=0)
vol_sagit = torch.squeeze(vol_sagit, dim=0)
vol_coron = torch.squeeze(vol_coron, dim=0)
vol_axial = self.axial_net.features(vol_axial)
vol_sagit = self.sagit_net.features(vol_sagit)
vol_coron = self.coron_net.features(vol_coron)
vol_axial = self.gap_axial(vol_axial).view(vol_axial.size(0), -1)
x = torch.max(vol_axial, 0, keepdim=True)[0]
vol_sagit = self.gap_sagit(vol_sagit).view(vol_sagit.size(0), -1)
y = torch.max(vol_sagit, 0, keepdim=True)[0]
vol_coron = self.gap_coron(vol_coron).view(vol_coron.size(0), -1)
z = torch.max(vol_coron, 0, keepdim=True)[0]
w = torch.cat((x, y, z), 1)
out = self.classifier(w)
return out
|
[
"numpy.stack",
"torch.nn.AdaptiveAvgPool2d",
"numpy.load",
"torch.autograd.Variable",
"torch.load",
"torchvision.models.alexnet",
"torch.FloatTensor",
"torch.cat",
"torch.squeeze",
"torch.sigmoid",
"numpy.min",
"torch.max",
"numpy.max",
"torch.nn.Linear"
] |
[((1031, 1062), 'numpy.stack', 'np.stack', (['((series,) * 3)'], {'axis': '(1)'}), '((series,) * 3, axis=1)\n', (1039, 1062), True, 'import numpy as np\n'), ((1084, 1109), 'torch.FloatTensor', 'torch.FloatTensor', (['series'], {}), '(series)\n', (1101, 1109), False, 'import torch\n'), ((1220, 1239), 'numpy.load', 'np.load', (['axial_path'], {}), '(axial_path)\n', (1227, 1239), True, 'import numpy as np\n'), ((1260, 1279), 'numpy.load', 'np.load', (['sagit_path'], {}), '(sagit_path)\n', (1267, 1279), True, 'import numpy as np\n'), ((1300, 1319), 'numpy.load', 'np.load', (['coron_path'], {}), '(coron_path)\n', (1307, 1319), True, 'import numpy as np\n'), ((1835, 1854), 'torch.autograd.Variable', 'Variable', (['vol_axial'], {}), '(vol_axial)\n', (1843, 1854), False, 'from torch.autograd import Variable\n'), ((1875, 1894), 'torch.autograd.Variable', 'Variable', (['vol_sagit'], {}), '(vol_sagit)\n', (1883, 1894), False, 'from torch.autograd import Variable\n'), ((1915, 1934), 'torch.autograd.Variable', 'Variable', (['vol_coron'], {}), '(vol_coron)\n', (1923, 1934), False, 'from torch.autograd import Variable\n'), ((2013, 2033), 'torch.sigmoid', 'torch.sigmoid', (['logit'], {}), '(logit)\n', (2026, 2033), False, 'import torch\n'), ((2448, 2483), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'training'}), '(pretrained=training)\n', (2462, 2483), False, 'from torchvision import models\n'), ((2509, 2544), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'training'}), '(pretrained=training)\n', (2523, 2544), False, 'from torchvision import models\n'), ((2570, 2605), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': 'training'}), '(pretrained=training)\n', (2584, 2605), False, 'from torchvision import models\n'), ((2631, 2654), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2651, 2654), True, 'import torch.nn as nn\n'), ((2680, 2703), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2700, 2703), True, 'import torch.nn as nn\n'), ((2729, 2752), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2749, 2752), True, 'import torch.nn as nn\n'), ((2779, 2800), 'torch.nn.Linear', 'nn.Linear', (['(3 * 256)', '(1)'], {}), '(3 * 256, 1)\n', (2788, 2800), True, 'import torch.nn as nn\n'), ((2894, 2925), 'torch.squeeze', 'torch.squeeze', (['vol_axial'], {'dim': '(0)'}), '(vol_axial, dim=0)\n', (2907, 2925), False, 'import torch\n'), ((2946, 2977), 'torch.squeeze', 'torch.squeeze', (['vol_sagit'], {'dim': '(0)'}), '(vol_sagit, dim=0)\n', (2959, 2977), False, 'import torch\n'), ((2998, 3029), 'torch.squeeze', 'torch.squeeze', (['vol_coron'], {'dim': '(0)'}), '(vol_coron, dim=0)\n', (3011, 3029), False, 'import torch\n'), ((3588, 3611), 'torch.cat', 'torch.cat', (['(x, y, z)', '(1)'], {}), '((x, y, z), 1)\n', (3597, 3611), False, 'import torch\n'), ((3281, 3318), 'torch.max', 'torch.max', (['vol_axial', '(0)'], {'keepdim': '(True)'}), '(vol_axial, 0, keepdim=True)\n', (3290, 3318), False, 'import torch\n'), ((3408, 3445), 'torch.max', 'torch.max', (['vol_sagit', '(0)'], {'keepdim': '(True)'}), '(vol_sagit, 0, keepdim=True)\n', (3417, 3445), False, 'import torch\n'), ((3535, 3572), 'torch.max', 'torch.max', (['vol_coron', '(0)'], {'keepdim': '(True)'}), '(vol_coron, 0, keepdim=True)\n', (3544, 3572), False, 'import torch\n'), ((422, 474), 'torch.load', 'torch.load', (['"""models/abnormal.pt"""'], {'map_location': '"""cpu"""'}), "('models/abnormal.pt', map_location='cpu')\n", (432, 474), False, 'import torch\n'), ((545, 592), 'torch.load', 'torch.load', (['"""models/acl.pt"""'], {'map_location': '"""cpu"""'}), "('models/acl.pt', map_location='cpu')\n", (555, 592), False, 'import torch\n'), ((651, 698), 'torch.load', 'torch.load', (['"""models/men.pt"""'], {'map_location': '"""cpu"""'}), "('models/men.pt', map_location='cpu')\n", (661, 698), False, 'import torch\n'), ((895, 909), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (901, 909), True, 'import numpy as np\n'), ((912, 926), 'numpy.max', 'np.max', (['series'], {}), '(series)\n', (918, 926), True, 'import numpy as np\n'), ((927, 941), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (933, 941), True, 'import numpy as np\n')]
|
from scipy import optimize
import matplotlib.pyplot as plt
import numpy as np
x = np.array([1, 1.1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ,11, 12, 13, 14, 15], dtype=float)
y = np.array([5, 3, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59,
84.47, 98.36, 112.25, 126.14, 140.03])
# 一个输入序列,4个未知参数,2个分段函数
def piecewise_linear(x, x0, y0, k1, k2):
# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0
# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0
return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0,
lambda x:k2*x + y0-k2*x0])
def gauss(mean, scale, x=np.linspace(1,22,22), sigma=4):
return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))
# # 用已有的 (x, y) 去拟合 piecewise_linear 分段函数
# p , e = optimize.curve_fit(piecewise_linear, x, y)
# xd = np.linspace(0, 15, 100)
# plt.plot(x, y, "o")
# plt.plot(xd, piecewise_linear(xd, *p))
# plt.savefig('123.png')
xi = np.linspace(1,22,22)
information_matrix = np.zeros((22))
x = [1, 13]
for i in range(len(x)):
information_matrix += gauss(x[i],1)
# plt.plot(xi, information_matrix)
plt.plot(xi, information_matrix)
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.square",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"numpy.piecewise"
] |
[((87, 166), 'numpy.array', 'np.array', (['[1, 1.1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]'], {'dtype': 'float'}), '([1, 1.1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], dtype=float)\n', (95, 166), True, 'import numpy as np\n'), ((172, 276), 'numpy.array', 'np.array', (['[5, 3, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59, 84.47, 98.36, 112.25, \n 126.14, 140.03]'], {}), '([5, 3, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59, 84.47, 98.36, \n 112.25, 126.14, 140.03])\n', (180, 276), True, 'import numpy as np\n'), ((935, 957), 'numpy.linspace', 'np.linspace', (['(1)', '(22)', '(22)'], {}), '(1, 22, 22)\n', (946, 957), True, 'import numpy as np\n'), ((978, 990), 'numpy.zeros', 'np.zeros', (['(22)'], {}), '(22)\n', (986, 990), True, 'import numpy as np\n'), ((1107, 1139), 'matplotlib.pyplot.plot', 'plt.plot', (['xi', 'information_matrix'], {}), '(xi, information_matrix)\n', (1115, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1149, 1151), True, 'import matplotlib.pyplot as plt\n'), ((451, 557), 'numpy.piecewise', 'np.piecewise', (['x', '[x < x0, x >= x0]', '[lambda x: k1 * x + y0 - k1 * x0, lambda x: k2 * x + y0 - k2 * x0]'], {}), '(x, [x < x0, x >= x0], [lambda x: k1 * x + y0 - k1 * x0, lambda\n x: k2 * x + y0 - k2 * x0])\n', (463, 557), True, 'import numpy as np\n'), ((605, 627), 'numpy.linspace', 'np.linspace', (['(1)', '(22)', '(22)'], {}), '(1, 22, 22)\n', (616, 627), True, 'import numpy as np\n'), ((665, 684), 'numpy.square', 'np.square', (['(x - mean)'], {}), '(x - mean)\n', (674, 684), True, 'import numpy as np\n')]
|
# %%
"""
<NAME> любит французские багеты. Длина французского
багета равна 1 метру. За один заглот <NAME> заглатывает
кусок случайной длины равномерно распределенной на отрезке
[0; 1]. Для того, чтобы съесть весь багет удаву потребуется случайное
количество N заглотов.
Оцените P(N=2), P(N=3), E(N)
"""
# %%
import numpy as np
import pandas as pd
from random import uniform
# %%
uniform(a=0, b=1)
list(range(7))
# %%
def eat_baget():
"""
Симулятор <NAME>.
Возвращает число укусов, потребовавшееся на один багет.
"""
n_ukusov = 0
baget = 1
while baget > 0:
zaglot = uniform(a=0, b=1)
baget -= zaglot
n_ukusov += 1
return n_ukusov
# %%
eat_baget()
# %%
n_exp = 1000
udaff_life = [eat_baget() for i in range(n_exp)]
udaff_life
EN_hat = np.mean(udaff_life)
EN_hat
PNeq2_hat = udaff_life.count(2) / n_exp
PNeq2_hat
PNeq3_hat = udaff_life.count(3) / n_exp
PNeq3_hat
# %%
"""
<NAME> подбрасывает кубик до первой шестёрки.
Обозначим: величина N — число бросков.
Событие A — при подбрасываниях выпадала только чётная грань.
Оцените P(N=2), P(N=3), E(N)
Оцените P(A), P(N=2|A), P(A|N=2), P(A OR N=2), P(A AND N=2)
"""
# %%
from random import randint
# %%
randint(a=1, b=2)
# %%
7 // 2
# %%
7 % 2
def throw_until_six():
"""
Подбрасываем кубик до первой шестёрки.
Считаем число бросков. И следим за тем, выпадали ли только четные числа.
Возвращает: (число бросков, True/False)
"""
n_broskov = 0
tolko_chet = True
brosok = -1 # вымышленный бросок, только чтобы зайти в цикл
while brosok < 6:
brosok = randint(1, 6)
n_broskov += 1
if brosok % 2 == 1:
tolko_chet = False
return (n_broskov, tolko_chet)
# %%
throw_until_six()
n_exp = 1000
throw_list = [throw_until_six() for i in range(n_exp)]
throw_list
# %%
throw_df = pd.DataFrame(throw_list, columns=['n_throw', 'only_even'])
throw_df.describe()
# %%
"""
Накануне войны Жестокий Тиран очень большой страны издал
указ. Отныне за каждого новорождённого мальчика семья получает
денежную премию, но если в семье рождается вторая девочка, то
всю семью убивают. Бедные жители страны запуганы и остро
нуждаются в деньгах, поэтому в каждой семье дети будут появляться
до тех пор, пока не родится первая девочка.
а) Каким будет среднее число детей в семье?
б) Какой будет доля мальчиков в стране?
в) Какой будет средняя доля мальчиков в случайной семье?
г) Сколько в среднем мальчиков в случайно выбираемой семье?
"""
|
[
"pandas.DataFrame",
"numpy.mean",
"random.randint",
"random.uniform"
] |
[((381, 398), 'random.uniform', 'uniform', ([], {'a': '(0)', 'b': '(1)'}), '(a=0, b=1)\n', (388, 398), False, 'from random import uniform\n'), ((795, 814), 'numpy.mean', 'np.mean', (['udaff_life'], {}), '(udaff_life)\n', (802, 814), True, 'import numpy as np\n'), ((1218, 1235), 'random.randint', 'randint', ([], {'a': '(1)', 'b': '(2)'}), '(a=1, b=2)\n', (1225, 1235), False, 'from random import randint\n'), ((1862, 1920), 'pandas.DataFrame', 'pd.DataFrame', (['throw_list'], {'columns': "['n_throw', 'only_even']"}), "(throw_list, columns=['n_throw', 'only_even'])\n", (1874, 1920), True, 'import pandas as pd\n'), ((604, 621), 'random.uniform', 'uniform', ([], {'a': '(0)', 'b': '(1)'}), '(a=0, b=1)\n', (611, 621), False, 'from random import uniform\n'), ((1610, 1623), 'random.randint', 'randint', (['(1)', '(6)'], {}), '(1, 6)\n', (1617, 1623), False, 'from random import randint\n')]
|
import pandas as pd
import numpy as np
from matplotlib.collections import PatchCollection, LineCollection
from descartes.patch import PolygonPatch
try:
import geopandas # noqa: F401
except ImportError:
HAS_GEOPANDAS = False
else:
HAS_GEOPANDAS = True
from ..doctools import document
from ..exceptions import PlotnineError
from ..utils import to_rgba, SIZE_FACTOR
from .geom import geom
from .geom_point import geom_point
@document
class geom_map(geom):
"""
Draw map feature
The map feature are drawn without any special projections.
{usage}
Parameters
----------
{common_parameters}
Notes
-----
This geom is best suited for plotting a shapefile read into
geopandas dataframe. The dataframe should have a ``geometry``
column.
"""
DEFAULT_AES = {'alpha': 1, 'color': '#111111', 'fill': '#333333',
'linetype': 'solid', 'shape': 'o', 'size': 0.5,
'stroke': 0.5}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False}
REQUIRED_AES = {'geometry'}
legend_geom = 'polygon'
def __init__(self, mapping=None, data=None, **kwargs):
if not HAS_GEOPANDAS:
raise PlotnineError(
"geom_map requires geopandas. "
"Please install geopandas."
)
geom.__init__(self, mapping, data, **kwargs)
# Almost all geodataframes loaded from shapefiles
# have a geometry column.
if 'geometry' not in self.mapping:
self.mapping['geometry'] = 'geometry'
def setup_data(self, data):
if not len(data):
return data
# Remove any NULL geometries, and remember
# All the non-Null shapes in a shapefile are required to be
# of the same shape type.
bool_idx = np.array([g is not None for g in data['geometry']])
if not np.all(bool_idx):
data = data.loc[bool_idx]
# Add polygon limits. Scale training uses them
try:
bounds = data['geometry'].bounds
except AttributeError:
# The geometry is not a GeoSeries
# Bounds calculation is extracted from
# geopandas.base.GeoPandasBase.bounds
bounds = pd.DataFrame(
np.array([x.bounds for x in data['geometry']]),
columns=['xmin', 'ymin', 'xmax', 'ymax'],
index=data.index)
else:
bounds.rename(
columns={
'minx': 'xmin',
'maxx': 'xmax',
'miny': 'ymin',
'maxy': 'ymax'
},
inplace=True)
data = pd.concat([data, bounds], axis=1)
return data
def draw_panel(self, data, panel_params, coord, ax, **params):
if not len(data):
return data
data.loc[data['color'].isnull(), 'color'] = 'none'
data.loc[data['fill'].isnull(), 'fill'] = 'none'
data['fill'] = to_rgba(data['fill'], data['alpha'])
geom_type = data.geometry.iloc[0].geom_type
if geom_type in ('Polygon', 'MultiPolygon'):
data['size'] *= SIZE_FACTOR
patches = [PolygonPatch(g) for g in data['geometry']]
coll = PatchCollection(
patches,
edgecolor=data['color'],
facecolor=data['fill'],
linestyle=data['linetype'],
linewidth=data['size'],
zorder=params['zorder'],
)
ax.add_collection(coll)
elif geom_type == 'Point':
# Extract point coordinates from shapely geom
# and plot with geom_point
arr = np.array([list(g.coords)[0] for g in data['geometry']])
data['x'] = arr[:, 0]
data['y'] = arr[:, 1]
for _, gdata in data.groupby('group'):
gdata.reset_index(inplace=True, drop=True)
gdata.is_copy = None
geom_point.draw_group(
gdata, panel_params, coord, ax, **params)
elif geom_type == 'LineString':
data['size'] *= SIZE_FACTOR
data['color'] = to_rgba(data['color'], data['alpha'])
segments = [list(g.coords) for g in data['geometry']]
coll = LineCollection(
segments,
edgecolor=data['color'],
linewidth=data['size'],
linestyle=data['linetype'],
zorder=params['zorder'])
ax.add_collection(coll)
|
[
"matplotlib.collections.LineCollection",
"descartes.patch.PolygonPatch",
"numpy.array",
"matplotlib.collections.PatchCollection",
"pandas.concat",
"numpy.all"
] |
[((1860, 1913), 'numpy.array', 'np.array', (["[(g is not None) for g in data['geometry']]"], {}), "([(g is not None) for g in data['geometry']])\n", (1868, 1913), True, 'import numpy as np\n'), ((2741, 2774), 'pandas.concat', 'pd.concat', (['[data, bounds]'], {'axis': '(1)'}), '([data, bounds], axis=1)\n', (2750, 2774), True, 'import pandas as pd\n'), ((1927, 1943), 'numpy.all', 'np.all', (['bool_idx'], {}), '(bool_idx)\n', (1933, 1943), True, 'import numpy as np\n'), ((3321, 3480), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['patches'], {'edgecolor': "data['color']", 'facecolor': "data['fill']", 'linestyle': "data['linetype']", 'linewidth': "data['size']", 'zorder': "params['zorder']"}), "(patches, edgecolor=data['color'], facecolor=data['fill'],\n linestyle=data['linetype'], linewidth=data['size'], zorder=params['zorder']\n )\n", (3336, 3480), False, 'from matplotlib.collections import PatchCollection, LineCollection\n'), ((3259, 3274), 'descartes.patch.PolygonPatch', 'PolygonPatch', (['g'], {}), '(g)\n', (3271, 3274), False, 'from descartes.patch import PolygonPatch\n'), ((2326, 2372), 'numpy.array', 'np.array', (["[x.bounds for x in data['geometry']]"], {}), "([x.bounds for x in data['geometry']])\n", (2334, 2372), True, 'import numpy as np\n'), ((4372, 4502), 'matplotlib.collections.LineCollection', 'LineCollection', (['segments'], {'edgecolor': "data['color']", 'linewidth': "data['size']", 'linestyle': "data['linetype']", 'zorder': "params['zorder']"}), "(segments, edgecolor=data['color'], linewidth=data['size'],\n linestyle=data['linetype'], zorder=params['zorder'])\n", (4386, 4502), False, 'from matplotlib.collections import PatchCollection, LineCollection\n')]
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import random
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, StratifiedKFold
class DataLoader:
def __init__(self, config, logger):
# Initialize Dataloader Configuration
logging.info('[DATALOADER]: Initializing Spectrometer Dataloader')
self.config = config
self.dl_cfg = self.config['dataloader']
# Initialize PRNG Seed Values
if self.dl_cfg.enable_seed:
random.seed(self.dl_cfg.seed)
np.random.seed(self.dl_cfg.seed)
# Load Dataset
logging.info('[DATALOADER]: Loading Dataset Files')
logging.info('[DATALOADER]: > Loading File: ' + self.dl_cfg.data_path)
# Preprocess Data
raw_data = open(self.dl_cfg.data_path, 'r').read().split('\n\n')
data = []
for row in raw_data:
if len(row) == 0: continue
row = row.replace('(', '').replace(')', '').strip()
row = row.replace('\n', ' ').split(' ')[2:]
row = list(map(lambda x: float(x), row))
if row[0] == 0: row[0] = 1.0 # Replace Encoding
data.append(row)
self.data = np.array(data)
logging.info('[DATALOADER]: > Loaded: ' + self.dl_cfg.data_path + '\t' + \
'Data Shape: ' + str(self.data.shape))
def get_data(self):
# Initialize Crossfold Validation
if self.dl_cfg.crossval.stratified:
kf = StratifiedKFold(n_splits=self.dl_cfg.crossval.folds,
shuffle=self.dl_cfg.crossval.shuffle,
random_state=self.dl_cfg.crossval.random_state)
else:
kf = KFold(n_splits=self.dl_cfg.crossval.folds,
shuffle=self.dl_cfg.crossval.shuffle,
random_state=self.dl_cfg.crossval.random_state)
for fold, (train_index, test_index) in enumerate(kf.split(self.data)):
# Initialize Data Features and Labels
X_train = self.data[train_index, 1:]
y_train = self.data[train_index, 0].astype(int) - 1
X_test = self.data[test_index, 1:]
y_test = self.data[test_index, 0].astype(int) - 1
# Set Dataloader Attributes
self.num_class = len(np.unique(y_train))
yield fold, X_train, y_train, X_test, y_test
|
[
"numpy.random.seed",
"sklearn.model_selection.KFold",
"logging.info",
"random.seed",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"numpy.unique"
] |
[((854, 920), 'logging.info', 'logging.info', (['"""[DATALOADER]: Initializing Spectrometer Dataloader"""'], {}), "('[DATALOADER]: Initializing Spectrometer Dataloader')\n", (866, 920), False, 'import logging\n'), ((1192, 1243), 'logging.info', 'logging.info', (['"""[DATALOADER]: Loading Dataset Files"""'], {}), "('[DATALOADER]: Loading Dataset Files')\n", (1204, 1243), False, 'import logging\n'), ((1252, 1322), 'logging.info', 'logging.info', (["('[DATALOADER]: > Loading File: ' + self.dl_cfg.data_path)"], {}), "('[DATALOADER]: > Loading File: ' + self.dl_cfg.data_path)\n", (1264, 1322), False, 'import logging\n'), ((1793, 1807), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1801, 1807), True, 'import numpy as np\n'), ((1085, 1114), 'random.seed', 'random.seed', (['self.dl_cfg.seed'], {}), '(self.dl_cfg.seed)\n', (1096, 1114), False, 'import random\n'), ((1127, 1159), 'numpy.random.seed', 'np.random.seed', (['self.dl_cfg.seed'], {}), '(self.dl_cfg.seed)\n', (1141, 1159), True, 'import numpy as np\n'), ((2079, 2222), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'self.dl_cfg.crossval.folds', 'shuffle': 'self.dl_cfg.crossval.shuffle', 'random_state': 'self.dl_cfg.crossval.random_state'}), '(n_splits=self.dl_cfg.crossval.folds, shuffle=self.dl_cfg.\n crossval.shuffle, random_state=self.dl_cfg.crossval.random_state)\n', (2094, 2222), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((2315, 2448), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.dl_cfg.crossval.folds', 'shuffle': 'self.dl_cfg.crossval.shuffle', 'random_state': 'self.dl_cfg.crossval.random_state'}), '(n_splits=self.dl_cfg.crossval.folds, shuffle=self.dl_cfg.crossval.\n shuffle, random_state=self.dl_cfg.crossval.random_state)\n', (2320, 2448), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((2916, 2934), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (2925, 2934), True, 'import numpy as np\n')]
|
#!/home/roberto/anaconda3/envs/tensorflow/bin/python
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import sys
import multiprocessing
import time
import pandas as pd
import tensorflow as tf
import cv2
from utils import detector_utils as detector_utils
from utils import label_map_util
class FasterRCNN(multiprocessing.Process):
def __init__(self, input_pipe, kcf_pipe, gpu_id, num_classes, jump, video_name, player, model_name):
multiprocessing.Process.__init__(self)
self.input_pipe = input_pipe
self.kcf_pipe = kcf_pipe
self.gpu_id = gpu_id
self.num_classes = num_classes
self.jump = jump
self.video_name = video_name
self.player = player
self.model_name = model_name
def run(self):
cwd_path = os.getcwd()
path_to_ckpt = os.path.join(cwd_path, self.model_name,'frozen_inference_graph.pb')
path_to_labels = os.path.join(cwd_path,'training','labelmap.pbtxt')
path_to_video = os.path.join(cwd_path,self.video_name)
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(self.gpu_id)
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_ckpt, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.4
sess = tf.Session(config=config, graph=detection_graph)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
print(detection_classes)
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
video = cv2.VideoCapture(path_to_video)
num_iter = 0
while(video.isOpened()):
_, frame = video.read()
if not (num_iter % self.jump):
if frame is None:
break
frame_expanded = np.expand_dims(frame, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: frame_expanded})
(box, score) = self.best_score_box(boxes, scores, classes)
# Send info to both threads
self.input_pipe.send((num_iter, box, score))
self.kcf_pipe.send((num_iter, box, score))
num_iter+=1
return
def best_score_box(self, boxes, scores, classes):
pos_max = np.where(scores==np.amax(scores[np.where(classes==self.player)]))
return boxes[pos_max], scores[pos_max]
|
[
"os.getcwd",
"utils.label_map_util.create_category_index",
"utils.label_map_util.load_labelmap",
"tensorflow.GraphDef",
"tensorflow.Session",
"numpy.expand_dims",
"utils.label_map_util.convert_label_map_to_categories",
"cv2.VideoCapture",
"tensorflow.ConfigProto",
"numpy.where",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"multiprocessing.Process.__init__",
"os.path.join"
] |
[((996, 1034), 'multiprocessing.Process.__init__', 'multiprocessing.Process.__init__', (['self'], {}), '(self)\n', (1028, 1034), False, 'import multiprocessing\n'), ((1340, 1351), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1349, 1351), False, 'import os\n'), ((1375, 1443), 'os.path.join', 'os.path.join', (['cwd_path', 'self.model_name', '"""frozen_inference_graph.pb"""'], {}), "(cwd_path, self.model_name, 'frozen_inference_graph.pb')\n", (1387, 1443), False, 'import os\n'), ((1468, 1520), 'os.path.join', 'os.path.join', (['cwd_path', '"""training"""', '"""labelmap.pbtxt"""'], {}), "(cwd_path, 'training', 'labelmap.pbtxt')\n", (1480, 1520), False, 'import os\n'), ((1543, 1582), 'os.path.join', 'os.path.join', (['cwd_path', 'self.video_name'], {}), '(cwd_path, self.video_name)\n', (1555, 1582), False, 'import os\n'), ((1682, 1726), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['path_to_labels'], {}), '(path_to_labels)\n', (1710, 1726), False, 'from utils import label_map_util\n'), ((1748, 1867), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'self.num_classes', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n self.num_classes, use_display_name=True)\n', (1794, 1867), False, 'from utils import label_map_util\n'), ((1888, 1936), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1924, 1936), False, 'from utils import label_map_util\n'), ((1972, 1982), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1980, 1982), True, 'import tensorflow as tf\n'), ((2983, 3014), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path_to_video'], {}), '(path_to_video)\n', (2999, 3014), False, 'import cv2\n'), ((2053, 2066), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2064, 2066), True, 'import tensorflow as tf\n'), ((2317, 2333), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2331, 2333), True, 'import tensorflow as tf\n'), ((2473, 2521), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config', 'graph': 'detection_graph'}), '(config=config, graph=detection_graph)\n', (2483, 2521), True, 'import tensorflow as tf\n'), ((2084, 2118), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['path_to_ckpt', '"""rb"""'], {}), "(path_to_ckpt, 'rb')\n", (2098, 2118), True, 'import tensorflow as tf\n'), ((2252, 2294), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (2271, 2294), True, 'import tensorflow as tf\n'), ((3252, 3281), 'numpy.expand_dims', 'np.expand_dims', (['frame'], {'axis': '(0)'}), '(frame, axis=0)\n', (3266, 3281), True, 'import numpy as np\n'), ((3914, 3946), 'numpy.where', 'np.where', (['(classes == self.player)'], {}), '(classes == self.player)\n', (3922, 3946), True, 'import numpy as np\n')]
|
import objax
from jax import vmap, grad, jacrev
import jax.numpy as np
from jax.scipy.linalg import cholesky, cho_factor
from .utils import inv, solve, gaussian_first_derivative_wrt_mean, gaussian_second_derivative_wrt_mean
from numpy.polynomial.hermite import hermgauss
import numpy as onp
import itertools
class Cubature(objax.Module):
def __init__(self, dim=None):
if dim is None: # dimension of cubature not known upfront
self.store = False
else: # dimension known, store sigma points and weights
self.store = True
self.x, self.w = self.get_cubature_points_and_weights(dim)
def __call__(self, dim):
if self.store:
return self.x, self.w
else:
return self.get_cubature_points_and_weights(dim)
def get_cubature_points_and_weights(self, dim):
raise NotImplementedError
class GaussHermite(Cubature):
def __init__(self, dim=None, num_cub_points=20):
self.num_cub_points = num_cub_points
super().__init__(dim)
def get_cubature_points_and_weights(self, dim):
return gauss_hermite(dim, self.num_cub_points)
class UnscentedThirdOrder(Cubature):
def get_cubature_points_and_weights(self, dim):
return symmetric_cubature_third_order(dim)
class UnscentedFifthOrder(Cubature):
def get_cubature_points_and_weights(self, dim):
return symmetric_cubature_fifth_order(dim)
class Unscented(UnscentedFifthOrder):
pass
def mvhermgauss(H: int, D: int):
"""
This function is adapted from GPflow: https://github.com/GPflow/GPflow
Return the evaluation locations 'xn', and weights 'wn' for a multivariate
Gauss-Hermite quadrature.
The outputs can be used to approximate the following type of integral:
int exp(-x)*f(x) dx ~ sum_i w[i,:]*f(x[i,:])
:param H: Number of Gauss-Hermite evaluation points.
:param D: Number of input dimensions. Needs to be known at call-time.
:return: eval_locations 'x' (H**DxD), weights 'w' (H**D)
"""
gh_x, gh_w = hermgauss(H)
x = np.array(list(itertools.product(*(gh_x,) * D))) # H**DxD
w = np.prod(np.array(list(itertools.product(*(gh_w,) * D))), 1) # H**D
return x, w
def gauss_hermite(dim=1, num_quad_pts=20):
"""
Return weights and sigma-points for Gauss-Hermite cubature
"""
# sigma_pts, weights = hermgauss(num_quad_pts) # Gauss-Hermite sigma points and weights
sigma_pts, weights = mvhermgauss(num_quad_pts, dim)
sigma_pts = np.sqrt(2) * sigma_pts.T
weights = weights.T * np.pi ** (-0.5 * dim) # scale weights by 1/√π
return sigma_pts, weights
def symmetric_cubature_third_order(dim=1, kappa=None):
"""
Return weights and sigma-points for the symmetric cubature rule of order 3.
Uses 2dim+1 sigma-points
"""
if kappa is None:
# kappa = 1 - dim
kappa = 0 # CKF
w0 = kappa / (dim + kappa)
wm = 1 / (2 * (dim + kappa))
u = onp.sqrt(dim + kappa)
if (dim == 1) and (kappa == 0):
weights = onp.array([w0, wm, wm])
sigma_pts = onp.array([0., u, -u])
# sigma_pts = onp.array([-u, 0., u])
# weights = onp.array([wm, w0, wm])
elif (dim == 2) and (kappa == 0):
weights = onp.array([w0, wm, wm, wm, wm])
sigma_pts = onp.block([[0., u, 0., -u, 0.],
[0., 0., u, 0., -u]])
elif (dim == 3) and (kappa == 0):
weights = onp.array([w0, wm, wm, wm, wm, wm, wm])
sigma_pts = onp.block([[0., u, 0., 0., -u, 0., 0.],
[0., 0., u, 0., 0., -u, 0.],
[0., 0., 0., u, 0., 0., -u]])
else:
weights = onp.concatenate([onp.array([[kappa / (dim + kappa)]]), wm * onp.ones([1, 2*dim])], axis=1)
sigma_pts = onp.sqrt(dim + kappa) * onp.block([onp.zeros([dim, 1]), onp.eye(dim), -onp.eye(dim)])
return sigma_pts, weights
def symmetric_cubature_fifth_order(dim=1):
"""
Return weights and sigma-points for the symmetric cubature rule of order 5
Uses 2(dim**2)+1 sigma-points
"""
# The weights and sigma-points from McNamee & Stenger
I0 = 1
I2 = 1
I4 = 3
I22 = 1
u = onp.sqrt(I4 / I2)
A0 = I0 - dim * (I2 / I4) ** 2 * (I4 - 0.5 * (dim - 1) * I22)
A1 = 0.5 * (I2 / I4) ** 2 * (I4 - (dim - 1) * I22)
A2 = 0.25 * (I2 / I4) ** 2 * I22
# we implement specific cases manually to save compute
if dim == 1:
weights = onp.array([A0, A1, A1])
sigma_pts = onp.array([0., u, -u])
elif dim == 2:
weights = onp.array([A0, A1, A1, A1, A1, A2, A2, A2, A2])
sigma_pts = onp.block([[0., u, -u, 0., 0., u, -u, u, -u],
[0., 0., 0., u, -u, u, -u, -u, u]])
elif dim == 3:
weights = onp.array([A0, A1, A1, A1, A1, A1, A1, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2])
sigma_pts = onp.block([[0., u, -u, 0., 0., 0., 0., u, -u, u, -u, u, -u, u, -u, 0., 0., 0., 0.],
[0., 0., 0., u, -u, 0., 0., u, -u, -u, u, 0., 0., 0., 0., u, -u, u, -u],
[0., 0., 0., 0., 0., u, -u, 0., 0., 0., 0., u, -u, -u, u, u, -u, -u, u]])
else:
# general case
U0 = sym_set(dim, [])
U1 = sym_set(dim, [u])
U2 = sym_set(dim, [u, u])
sigma_pts = onp.concatenate([U0, U1, U2], axis=1)
weights = onp.concatenate([A0 * onp.ones(U0.shape[1]),
A1 * onp.ones(U1.shape[1]),
A2 * onp.ones(U2.shape[1])])
return sigma_pts, weights
def sym_set(n, gen=None):
if (gen is None) or (len(gen) == 0):
U = onp.zeros([n, 1])
else:
lengen = len(gen)
if lengen == 1:
U = onp.zeros([n, 2 * n])
elif lengen == 2:
U = onp.zeros([n, 2 * n * (n - 1)])
else:
raise NotImplementedError
ind = 0
for i in range(n):
u = onp.zeros(n)
u[i] = gen[0]
if lengen > 1:
if abs(gen[0] - gen[1]) < 1e-10:
V = sym_set(n-i-1, gen[1:])
for j in range(V.shape[1]):
u[i+1:] = V[:, j]
U[:, 2*ind] = u
U[:, 2*ind + 1] = -u
ind += 1
else:
raise NotImplementedError
# V = sym_set(n-1, gen[1:])
# for j in range(V.shape[1]):
# u[:i-1, i+1:] = V[:, j]
# U = onp.concatenate([U, u, -u])
# ind += 1
else:
U[:, 2*i] = u
U[:, 2*i+1] = -u
return U
def variational_expectation_cubature(likelihood, y, post_mean, post_cov, cubature=None):
"""
Computes the "variational expectation" via cubature, i.e. the
expected log-likelihood, and its derivatives w.r.t. the posterior mean
E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param post_mean: posterior mean (mₙ) [scalar]
:param post_cov: posterior variance (vₙ) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
exp_log_lik: the expected log likelihood, E[log p(yₙ|fₙ)] [scalar]
dE_dm: derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
d2E_dm2: second derivative of E[log p(yₙ|fₙ)] w.r.t. mₙ [scalar]
"""
if cubature is None:
x, w = gauss_hermite(post_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(post_mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(post_cov) @ np.atleast_2d(x) + post_mean
# pre-compute wᵢ log p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_log_likelihood_eval = w * likelihood.evaluate_log_likelihood(y, sigma_points)
# Compute expected log likelihood via cubature:
# E[log p(yₙ|fₙ)] = ∫ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
exp_log_lik = np.sum(
weighted_log_likelihood_eval
)
# Compute first derivative via cubature:
# dE[log p(yₙ|fₙ)]/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ log p(yₙ|fsigᵢ)
invv = np.diag(post_cov)[:, None] ** -1
dE_dm = np.sum(
invv * (sigma_points - post_mean)
* weighted_log_likelihood_eval, axis=-1
)[:, None]
# Compute second derivative via cubature (deriv. w.r.t. var = 0.5 * 2nd deriv. w.r.t. mean):
# dE[log p(yₙ|fₙ)]/dvₙ = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹]/2 log p(yₙ|fsigᵢ)
dE_dv = np.sum(
(0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv)
* weighted_log_likelihood_eval, axis=-1
)
dE_dv = np.diag(dE_dv)
d2E_dm2 = 2 * dE_dv
return exp_log_lik, dE_dm, d2E_dm2
def log_density_cubature(likelihood, y, mean, cov, cubature=None):
"""
logZₙ = log ∫ p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param mean: cavity mean (mₙ) [scalar]
:param cov: cavity covariance (cₙ) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log density, logZₙ [scalar]
"""
if cubature is None:
x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean.shape[0])
cav_cho, low = cho_factor(cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + mean
# pre-compute wᵢ p(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * likelihood.evaluate_likelihood(y, sigma_points)
# Compute partition function via cubature:
# Zₙ = ∫ p(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ ≈ ∑ᵢ wᵢ p(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(np.maximum(Z, 1e-8))
return lZ
def moment_match_cubature(likelihood, y, cav_mean, cav_cov, power=1.0, cubature=None):
"""
TODO: N.B. THIS VERSION ALLOWS MULTI-DIMENSIONAL MOMENT MATCHING, BUT CAN BE UNSTABLE
Perform moment matching via cubature.
Moment matching involves computing the log partition function, logZₙ, and its derivatives w.r.t. the cavity mean
logZₙ = log ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
with EP power a.
:param likelihood: the likelihood model
:param y: observed data (yₙ) [scalar]
:param cav_mean: cavity mean (mₙ) [scalar]
:param cav_cov: cavity covariance (cₙ) [scalar]
:param power: EP power / fraction (a) [scalar]
:param cubature: the function to compute sigma points and weights to use during cubature
:return:
lZ: the log partition function, logZₙ [scalar]
dlZ: first derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
d2lZ: second derivative of logZₙ w.r.t. mₙ (if derivatives=True) [scalar]
"""
if cubature is None:
x, w = gauss_hermite(cav_mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(cav_mean.shape[0])
cav_cho, low = cho_factor(cav_cov)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to cavity dist.
sigma_points = cav_cho @ np.atleast_2d(x) + cav_mean
# pre-compute wᵢ pᵃ(yₙ|xᵢ√(2vₙ) + mₙ)
weighted_likelihood_eval = w * likelihood.evaluate_likelihood(y, sigma_points) ** power
# Compute partition function via cubature:
# Zₙ = ∫ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ pᵃ(yₙ|fsigᵢ)
Z = np.sum(
weighted_likelihood_eval, axis=-1
)
lZ = np.log(np.maximum(Z, 1e-8))
Zinv = 1.0 / np.maximum(Z, 1e-8)
# Compute derivative of partition function via cubature:
# dZₙ/dmₙ = ∫ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fₙ-mₙ) vₙ⁻¹ pᵃ(yₙ|fsigᵢ)
d1 = vmap(
gaussian_first_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
dZ = np.sum(d1, axis=0)
# dlogZₙ/dmₙ = (dZₙ/dmₙ) / Zₙ
dlZ = Zinv * dZ
# Compute second derivative of partition function via cubature:
# d²Zₙ/dmₙ² = ∫ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ [(fₙ-mₙ)² vₙ⁻² - vₙ⁻¹] pᵃ(yₙ|fsigᵢ)
d2 = vmap(
gaussian_second_derivative_wrt_mean, (1, None, None, 1)
)(sigma_points[..., None], cav_mean, cav_cov, weighted_likelihood_eval)
d2Z = np.sum(d2, axis=0)
# d²logZₙ/dmₙ² = d[(dZₙ/dmₙ) / Zₙ]/dmₙ
# = (d²Zₙ/dmₙ² * Zₙ - (dZₙ/dmₙ)²) / Zₙ²
# = d²Zₙ/dmₙ² / Zₙ - (dlogZₙ/dmₙ)²
d2lZ = -dlZ @ dlZ.T + Zinv * d2Z
return lZ, dlZ, d2lZ
# def statistical_linear_regression_cubature(likelihood, mean, cov, cubature=None):
# """
# Perform statistical linear regression (SLR) using cubature.
# We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω).
# TODO: this currently assumes an additive noise model (ok for our current applications), make more general
# """
# if cubature is None:
# x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
# else:
# x, w = cubature(mean.shape[0])
# # fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
# sigma_points = cholesky(cov) @ np.atleast_2d(x) + mean
# lik_expectation, lik_covariance = likelihood.conditional_moments(sigma_points)
# # Compute muₙ via cubature:
# # muₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
# mu = np.sum(
# w * lik_expectation, axis=-1
# )[:, None]
# # Compute variance S via cubature:
# # S = ∫ [(E[yₙ|fₙ]-muₙ) (E[yₙ|fₙ]-muₙ)' + Cov[yₙ|fₙ]] 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ [(E[yₙ|fsigᵢ]-muₙ) (E[yₙ|fsigᵢ]-muₙ)' + Cov[yₙ|fₙ]]
# # TODO: allow for multi-dim cubature
# S = np.sum(
# w * ((lik_expectation - mu) * (lik_expectation - mu) + lik_covariance), axis=-1
# )[:, None]
# # Compute cross covariance C via cubature:
# # C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-muₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-muₙ)'
# C = np.sum(
# w * (sigma_points - mean) * (lik_expectation - mu), axis=-1
# )[:, None]
# # compute equivalent likelihood noise, omega
# omega = S - C.T @ solve(cov, C)
# # Compute derivative of z via cubature:
# # d_mu = ∫ E[yₙ|fₙ] vₙ⁻¹ (fₙ-mₙ) 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] vₙ⁻¹ (fsigᵢ-mₙ)
# prec = inv(cov)
# d_mu = np.sum(
# # w * lik_expectation * (solve(cov, sigma_points - mean)), axis=-1
# w * lik_expectation * (prec @ (sigma_points - mean)), axis=-1
# )[None, :]
# # Second derivative:
# # d2_mu = -∫ E[yₙ|fₙ] vₙ⁻¹ 𝓝(fₙ|mₙ,vₙ) dfₙ + ∫ E[yₙ|fₙ] (vₙ⁻¹ (fₙ-mₙ))² 𝓝(fₙ|mₙ,vₙ) dfₙ
# # ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ] ((vₙ⁻¹ (fsigᵢ-mₙ))² - vₙ⁻¹)
# d2_mu = np.sum(
# w * lik_expectation * (prec @ (sigma_points - mean) ** 2 - prec), axis=-1
# )[None, :]
# return mu, omega, d_mu, d2_mu
def statistical_linear_regression_cubature(likelihood, mean, cov, cubature=None):
"""
Perform statistical linear regression (SLR) using cubature.
We aim to find a likelihood approximation p(yₙ|fₙ) ≈ 𝓝(yₙ|Afₙ+b,Ω).
TODO: this currently assumes an additive noise model (ok for our current applications), make more general
"""
mu, omega = expected_conditional_mean(likelihood, mean, cov, cubature)
dmu_dm = expected_conditional_mean_dm(likelihood, mean, cov, cubature)
d2mu_dm2 = expected_conditional_mean_dm2(likelihood, mean, cov, cubature)
return mu.reshape(-1, 1), omega, dmu_dm.reshape(1, -1), d2mu_dm2
# return mu.reshape(-1, 1), omega, dmu_dm[None], np.swapaxes(d2mu_dm2, axis1=0, axis2=2)
def expected_conditional_mean(likelihood, mean, cov, cubature=None):
"""
Compute Eq[E[y|f]] = ∫ Ey[p(y|f)] 𝓝(f|mean,cov) dfₙ
"""
if cubature is None:
x, w = gauss_hermite(mean.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean.shape[0])
# fsigᵢ=xᵢ√(vₙ) + mₙ: scale locations according to cavity dist.
sigma_points = cholesky(cov) @ np.atleast_2d(x) + mean
lik_expectation, lik_covariance = likelihood.conditional_moments(sigma_points)
# Compute muₙ via cubature:
# muₙ = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fsigᵢ]
mu = np.sum(
w * lik_expectation, axis=-1
)[:, None]
S = np.sum(
# w * ((lik_expectation - mu) @ (lik_expectation - mu).T + lik_covariance), axis=-1 # TODO: CHECK MULTI-DIM
w * ((lik_expectation - mu) ** 2 + lik_covariance), axis=-1
)[:, None]
# Compute cross covariance C via cubature:
# C = ∫ (fₙ-mₙ) (E[yₙ|fₙ]-muₙ)' 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (fsigᵢ -mₙ) (E[yₙ|fsigᵢ]-muₙ)'
C = np.sum(
w * (sigma_points - mean) * (lik_expectation - mu), axis=-1
)[:, None]
# compute equivalent likelihood noise, omega
omega = S - C.T @ solve(cov, C)
return np.squeeze(mu), omega
def expected_conditional_mean_dm(likelihood, mean, cov, cubature=None):
"""
"""
dmu_dm, _ = grad(expected_conditional_mean, argnums=1, has_aux=True)(likelihood, mean, cov, cubature)
return np.squeeze(dmu_dm)
def expected_conditional_mean_dm2(likelihood, mean, cov, cubature=None):
"""
"""
d2mu_dm2 = jacrev(expected_conditional_mean_dm, argnums=1)(likelihood, mean, cov, cubature)
return d2mu_dm2
def predict_cubature(likelihood, mean_f, var_f, cubature=None):
"""
predict in data space given predictive mean and var of the latent function
"""
if cubature is None:
x, w = gauss_hermite(mean_f.shape[0], 20) # Gauss-Hermite sigma points and weights
else:
x, w = cubature(mean_f.shape[0])
chol_f, low = cho_factor(var_f)
# fsigᵢ=xᵢ√cₙ + mₙ: scale locations according to latent dist.
sigma_points = chol_f @ np.atleast_2d(x) + mean_f
# Compute moments via cubature:
# E[y] = ∫ E[yₙ|fₙ] 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ E[yₙ|fₙ]
# E[y^2] = ∫ (Cov[yₙ|fₙ] + E[yₙ|fₙ]^2) 𝓝(fₙ|mₙ,vₙ) dfₙ
# ≈ ∑ᵢ wᵢ (Cov[yₙ|fₙ] + E[yₙ|fₙ]^2)
conditional_expectation, conditional_covariance = likelihood.conditional_moments(sigma_points)
expected_y = np.sum(w * conditional_expectation, axis=-1)
expected_y_squared = np.sum(w * (conditional_covariance + conditional_expectation ** 2), axis=-1)
# Cov[y] = E[y^2] - E[y]^2
covariance_y = expected_y_squared - expected_y ** 2
return expected_y, covariance_y
|
[
"numpy.polynomial.hermite.hermgauss",
"jax.numpy.atleast_2d",
"numpy.ones",
"jax.numpy.squeeze",
"itertools.product",
"jax.numpy.diag",
"jax.numpy.sum",
"jax.vmap",
"jax.numpy.maximum",
"jax.scipy.linalg.cho_factor",
"numpy.concatenate",
"numpy.block",
"jax.jacrev",
"jax.scipy.linalg.cholesky",
"numpy.zeros",
"numpy.array",
"jax.grad",
"numpy.eye",
"jax.numpy.sqrt",
"numpy.sqrt"
] |
[((2062, 2074), 'numpy.polynomial.hermite.hermgauss', 'hermgauss', (['H'], {}), '(H)\n', (2071, 2074), False, 'from numpy.polynomial.hermite import hermgauss\n'), ((2977, 2998), 'numpy.sqrt', 'onp.sqrt', (['(dim + kappa)'], {}), '(dim + kappa)\n', (2985, 2998), True, 'import numpy as onp\n'), ((4231, 4248), 'numpy.sqrt', 'onp.sqrt', (['(I4 / I2)'], {}), '(I4 / I2)\n', (4239, 4248), True, 'import numpy as onp\n'), ((8243, 8279), 'jax.numpy.sum', 'np.sum', (['weighted_log_likelihood_eval'], {}), '(weighted_log_likelihood_eval)\n', (8249, 8279), True, 'import jax.numpy as np\n'), ((8917, 9035), 'jax.numpy.sum', 'np.sum', (['((0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv) *\n weighted_log_likelihood_eval)'], {'axis': '(-1)'}), '((0.5 * (invv ** 2 * (sigma_points - post_mean) ** 2) - 0.5 * invv) *\n weighted_log_likelihood_eval, axis=-1)\n', (8923, 9035), True, 'import jax.numpy as np\n'), ((9066, 9080), 'jax.numpy.diag', 'np.diag', (['dE_dv'], {}), '(dE_dv)\n', (9073, 9080), True, 'import jax.numpy as np\n'), ((9783, 9798), 'jax.scipy.linalg.cho_factor', 'cho_factor', (['cov'], {}), '(cov)\n', (9793, 9798), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((10155, 10196), 'jax.numpy.sum', 'np.sum', (['weighted_likelihood_eval'], {'axis': '(-1)'}), '(weighted_likelihood_eval, axis=-1)\n', (10161, 10196), True, 'import jax.numpy as np\n'), ((11438, 11457), 'jax.scipy.linalg.cho_factor', 'cho_factor', (['cav_cov'], {}), '(cav_cov)\n', (11448, 11457), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((11840, 11881), 'jax.numpy.sum', 'np.sum', (['weighted_likelihood_eval'], {'axis': '(-1)'}), '(weighted_likelihood_eval, axis=-1)\n', (11846, 11881), True, 'import jax.numpy as np\n'), ((12300, 12318), 'jax.numpy.sum', 'np.sum', (['d1'], {'axis': '(0)'}), '(d1, axis=0)\n', (12306, 12318), True, 'import jax.numpy as np\n'), ((12736, 12754), 'jax.numpy.sum', 'np.sum', (['d2'], {'axis': '(0)'}), '(d2, axis=0)\n', (12742, 12754), True, 'import jax.numpy as np\n'), ((17507, 17525), 'jax.numpy.squeeze', 'np.squeeze', (['dmu_dm'], {}), '(dmu_dm)\n', (17517, 17525), True, 'import jax.numpy as np\n'), ((18080, 18097), 'jax.scipy.linalg.cho_factor', 'cho_factor', (['var_f'], {}), '(var_f)\n', (18090, 18097), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((18544, 18588), 'jax.numpy.sum', 'np.sum', (['(w * conditional_expectation)'], {'axis': '(-1)'}), '(w * conditional_expectation, axis=-1)\n', (18550, 18588), True, 'import jax.numpy as np\n'), ((18614, 18690), 'jax.numpy.sum', 'np.sum', (['(w * (conditional_covariance + conditional_expectation ** 2))'], {'axis': '(-1)'}), '(w * (conditional_covariance + conditional_expectation ** 2), axis=-1)\n', (18620, 18690), True, 'import jax.numpy as np\n'), ((2522, 2532), 'jax.numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2529, 2532), True, 'import jax.numpy as np\n'), ((3053, 3076), 'numpy.array', 'onp.array', (['[w0, wm, wm]'], {}), '([w0, wm, wm])\n', (3062, 3076), True, 'import numpy as onp\n'), ((3097, 3120), 'numpy.array', 'onp.array', (['[0.0, u, -u]'], {}), '([0.0, u, -u])\n', (3106, 3120), True, 'import numpy as onp\n'), ((4501, 4524), 'numpy.array', 'onp.array', (['[A0, A1, A1]'], {}), '([A0, A1, A1])\n', (4510, 4524), True, 'import numpy as onp\n'), ((4545, 4568), 'numpy.array', 'onp.array', (['[0.0, u, -u]'], {}), '([0.0, u, -u])\n', (4554, 4568), True, 'import numpy as onp\n'), ((5714, 5731), 'numpy.zeros', 'onp.zeros', (['[n, 1]'], {}), '([n, 1])\n', (5723, 5731), True, 'import numpy as onp\n'), ((8532, 8617), 'jax.numpy.sum', 'np.sum', (['(invv * (sigma_points - post_mean) * weighted_log_likelihood_eval)'], {'axis': '(-1)'}), '(invv * (sigma_points - post_mean) * weighted_log_likelihood_eval,\n axis=-1)\n', (8538, 8617), True, 'import jax.numpy as np\n'), ((10227, 10247), 'jax.numpy.maximum', 'np.maximum', (['Z', '(1e-08)'], {}), '(Z, 1e-08)\n', (10237, 10247), True, 'import jax.numpy as np\n'), ((11912, 11932), 'jax.numpy.maximum', 'np.maximum', (['Z', '(1e-08)'], {}), '(Z, 1e-08)\n', (11922, 11932), True, 'import jax.numpy as np\n'), ((11950, 11970), 'jax.numpy.maximum', 'np.maximum', (['Z', '(1e-08)'], {}), '(Z, 1e-08)\n', (11960, 11970), True, 'import jax.numpy as np\n'), ((12146, 12206), 'jax.vmap', 'vmap', (['gaussian_first_derivative_wrt_mean', '(1, None, None, 1)'], {}), '(gaussian_first_derivative_wrt_mean, (1, None, None, 1))\n', (12150, 12206), False, 'from jax import vmap, grad, jacrev\n'), ((12580, 12641), 'jax.vmap', 'vmap', (['gaussian_second_derivative_wrt_mean', '(1, None, None, 1)'], {}), '(gaussian_second_derivative_wrt_mean, (1, None, None, 1))\n', (12584, 12641), False, 'from jax import vmap, grad, jacrev\n'), ((16661, 16697), 'jax.numpy.sum', 'np.sum', (['(w * lik_expectation)'], {'axis': '(-1)'}), '(w * lik_expectation, axis=-1)\n', (16667, 16697), True, 'import jax.numpy as np\n'), ((16729, 16796), 'jax.numpy.sum', 'np.sum', (['(w * ((lik_expectation - mu) ** 2 + lik_covariance))'], {'axis': '(-1)'}), '(w * ((lik_expectation - mu) ** 2 + lik_covariance), axis=-1)\n', (16735, 16796), True, 'import jax.numpy as np\n'), ((17091, 17158), 'jax.numpy.sum', 'np.sum', (['(w * (sigma_points - mean) * (lik_expectation - mu))'], {'axis': '(-1)'}), '(w * (sigma_points - mean) * (lik_expectation - mu), axis=-1)\n', (17097, 17158), True, 'import jax.numpy as np\n'), ((17278, 17292), 'jax.numpy.squeeze', 'np.squeeze', (['mu'], {}), '(mu)\n', (17288, 17292), True, 'import jax.numpy as np\n'), ((17406, 17462), 'jax.grad', 'grad', (['expected_conditional_mean'], {'argnums': '(1)', 'has_aux': '(True)'}), '(expected_conditional_mean, argnums=1, has_aux=True)\n', (17410, 17462), False, 'from jax import vmap, grad, jacrev\n'), ((17632, 17679), 'jax.jacrev', 'jacrev', (['expected_conditional_mean_dm'], {'argnums': '(1)'}), '(expected_conditional_mean_dm, argnums=1)\n', (17638, 17679), False, 'from jax import vmap, grad, jacrev\n'), ((2097, 2130), 'itertools.product', 'itertools.product', (['*((gh_x,) * D)'], {}), '(*((gh_x,) * D))\n', (2114, 2130), False, 'import itertools\n'), ((3265, 3296), 'numpy.array', 'onp.array', (['[w0, wm, wm, wm, wm]'], {}), '([w0, wm, wm, wm, wm])\n', (3274, 3296), True, 'import numpy as onp\n'), ((3317, 3376), 'numpy.block', 'onp.block', (['[[0.0, u, 0.0, -u, 0.0], [0.0, 0.0, u, 0.0, -u]]'], {}), '([[0.0, u, 0.0, -u, 0.0], [0.0, 0.0, u, 0.0, -u]])\n', (3326, 3376), True, 'import numpy as onp\n'), ((4605, 4652), 'numpy.array', 'onp.array', (['[A0, A1, A1, A1, A1, A2, A2, A2, A2]'], {}), '([A0, A1, A1, A1, A1, A2, A2, A2, A2])\n', (4614, 4652), True, 'import numpy as onp\n'), ((4673, 4765), 'numpy.block', 'onp.block', (['[[0.0, u, -u, 0.0, 0.0, u, -u, u, -u], [0.0, 0.0, 0.0, u, -u, u, -u, -u, u]]'], {}), '([[0.0, u, -u, 0.0, 0.0, u, -u, u, -u], [0.0, 0.0, 0.0, u, -u, u, \n -u, -u, u]])\n', (4682, 4765), True, 'import numpy as onp\n'), ((5809, 5830), 'numpy.zeros', 'onp.zeros', (['[n, 2 * n]'], {}), '([n, 2 * n])\n', (5818, 5830), True, 'import numpy as onp\n'), ((6017, 6029), 'numpy.zeros', 'onp.zeros', (['n'], {}), '(n)\n', (6026, 6029), True, 'import numpy as onp\n'), ((7890, 7908), 'jax.scipy.linalg.cholesky', 'cholesky', (['post_cov'], {}), '(post_cov)\n', (7898, 7908), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((7911, 7927), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (7924, 7927), True, 'import jax.numpy as np\n'), ((8487, 8504), 'jax.numpy.diag', 'np.diag', (['post_cov'], {}), '(post_cov)\n', (8494, 8504), True, 'import jax.numpy as np\n'), ((9894, 9910), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (9907, 9910), True, 'import jax.numpy as np\n'), ((11553, 11569), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (11566, 11569), True, 'import jax.numpy as np\n'), ((16429, 16442), 'jax.scipy.linalg.cholesky', 'cholesky', (['cov'], {}), '(cov)\n', (16437, 16442), False, 'from jax.scipy.linalg import cholesky, cho_factor\n'), ((16445, 16461), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (16458, 16461), True, 'import jax.numpy as np\n'), ((18192, 18208), 'jax.numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (18205, 18208), True, 'import jax.numpy as np\n'), ((2171, 2204), 'itertools.product', 'itertools.product', (['*((gh_w,) * D)'], {}), '(*((gh_w,) * D))\n', (2188, 2204), False, 'import itertools\n'), ((3459, 3498), 'numpy.array', 'onp.array', (['[w0, wm, wm, wm, wm, wm, wm]'], {}), '([w0, wm, wm, wm, wm, wm, wm])\n', (3468, 3498), True, 'import numpy as onp\n'), ((3519, 3637), 'numpy.block', 'onp.block', (['[[0.0, u, 0.0, 0.0, -u, 0.0, 0.0], [0.0, 0.0, u, 0.0, 0.0, -u, 0.0], [0.0, \n 0.0, 0.0, u, 0.0, 0.0, -u]]'], {}), '([[0.0, u, 0.0, 0.0, -u, 0.0, 0.0], [0.0, 0.0, u, 0.0, 0.0, -u, \n 0.0], [0.0, 0.0, 0.0, u, 0.0, 0.0, -u]])\n', (3528, 3637), True, 'import numpy as onp\n'), ((4823, 4914), 'numpy.array', 'onp.array', (['[A0, A1, A1, A1, A1, A1, A1, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2, A2]'], {}), '([A0, A1, A1, A1, A1, A1, A1, A2, A2, A2, A2, A2, A2, A2, A2, A2,\n A2, A2, A2])\n', (4832, 4914), True, 'import numpy as onp\n'), ((4931, 5200), 'numpy.block', 'onp.block', (['[[0.0, u, -u, 0.0, 0.0, 0.0, 0.0, u, -u, u, -u, u, -u, u, -u, 0.0, 0.0, 0.0,\n 0.0], [0.0, 0.0, 0.0, u, -u, 0.0, 0.0, u, -u, -u, u, 0.0, 0.0, 0.0, 0.0,\n u, -u, u, -u], [0.0, 0.0, 0.0, 0.0, 0.0, u, -u, 0.0, 0.0, 0.0, 0.0, u, \n -u, -u, u, u, -u, -u, u]]'], {}), '([[0.0, u, -u, 0.0, 0.0, 0.0, 0.0, u, -u, u, -u, u, -u, u, -u, 0.0,\n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, u, -u, 0.0, 0.0, u, -u, -u, u, 0.0, 0.0,\n 0.0, 0.0, u, -u, u, -u], [0.0, 0.0, 0.0, 0.0, 0.0, u, -u, 0.0, 0.0, 0.0,\n 0.0, u, -u, -u, u, u, -u, -u, u]])\n', (4940, 5200), True, 'import numpy as onp\n'), ((5373, 5410), 'numpy.concatenate', 'onp.concatenate', (['[U0, U1, U2]'], {'axis': '(1)'}), '([U0, U1, U2], axis=1)\n', (5388, 5410), True, 'import numpy as onp\n'), ((5873, 5904), 'numpy.zeros', 'onp.zeros', (['[n, 2 * n * (n - 1)]'], {}), '([n, 2 * n * (n - 1)])\n', (5882, 5904), True, 'import numpy as onp\n'), ((3830, 3851), 'numpy.sqrt', 'onp.sqrt', (['(dim + kappa)'], {}), '(dim + kappa)\n', (3838, 3851), True, 'import numpy as onp\n'), ((3736, 3772), 'numpy.array', 'onp.array', (['[[kappa / (dim + kappa)]]'], {}), '([[kappa / (dim + kappa)]])\n', (3745, 3772), True, 'import numpy as onp\n'), ((3779, 3801), 'numpy.ones', 'onp.ones', (['[1, 2 * dim]'], {}), '([1, 2 * dim])\n', (3787, 3801), True, 'import numpy as onp\n'), ((3865, 3884), 'numpy.zeros', 'onp.zeros', (['[dim, 1]'], {}), '([dim, 1])\n', (3874, 3884), True, 'import numpy as onp\n'), ((3886, 3898), 'numpy.eye', 'onp.eye', (['dim'], {}), '(dim)\n', (3893, 3898), True, 'import numpy as onp\n'), ((5451, 5472), 'numpy.ones', 'onp.ones', (['U0.shape[1]'], {}), '(U0.shape[1])\n', (5459, 5472), True, 'import numpy as onp\n'), ((5514, 5535), 'numpy.ones', 'onp.ones', (['U1.shape[1]'], {}), '(U1.shape[1])\n', (5522, 5535), True, 'import numpy as onp\n'), ((5577, 5598), 'numpy.ones', 'onp.ones', (['U2.shape[1]'], {}), '(U2.shape[1])\n', (5585, 5598), True, 'import numpy as onp\n'), ((3901, 3913), 'numpy.eye', 'onp.eye', (['dim'], {}), '(dim)\n', (3908, 3913), True, 'import numpy as onp\n')]
|
from RandomGenerator.randomInt import randomInt
from numpy import random
def randomIntSeed (start, end, seed):
state = random.get_state()
random.seed(seed)
try:
randIntSeeded = randomInt(start, end)
return randIntSeeded
finally:
random.set_state(state)
|
[
"numpy.random.get_state",
"numpy.random.seed",
"RandomGenerator.randomInt.randomInt",
"numpy.random.set_state"
] |
[((124, 142), 'numpy.random.get_state', 'random.get_state', ([], {}), '()\n', (140, 142), False, 'from numpy import random\n'), ((147, 164), 'numpy.random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (158, 164), False, 'from numpy import random\n'), ((198, 219), 'RandomGenerator.randomInt.randomInt', 'randomInt', (['start', 'end'], {}), '(start, end)\n', (207, 219), False, 'from RandomGenerator.randomInt import randomInt\n'), ((270, 293), 'numpy.random.set_state', 'random.set_state', (['state'], {}), '(state)\n', (286, 293), False, 'from numpy import random\n')]
|
from numpy import random, pi
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
Ntrials, Nhits = 1_000_000, 0
for n in range(Ntrials):
x, y, z = random.uniform(-1, 1, 3) # draw 2 samples, each uniformly distributed over (-1,1)
if x**2 + y**2 + z**2 < 1:
Nhits += 1
print("Monte Carlo estimator of V(3): %.5f" % ((2**3)*(Nhits / Ntrials)))
print("Actual value of V(3) up to 5 decimal digits: %.5f" % (4*pi/3))
print("The relative error is %.5f%%" % (100 * abs((2**3)*(Nhits / Ntrials) - (4*pi/3))))
|
[
"numpy.random.uniform"
] |
[((171, 195), 'numpy.random.uniform', 'random.uniform', (['(-1)', '(1)', '(3)'], {}), '(-1, 1, 3)\n', (185, 195), False, 'from numpy import random, pi\n')]
|
# Author: <NAME>
# License: BSD
import warnings
from nilearn.input_data import NiftiMasker
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
from os.path import expanduser, join
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from joblib import Memory, dump
from joblib import Parallel, delayed
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from modl.datasets import fetch_adhd
from modl.decomposition.fmri import fMRIDictFact
from modl.decomposition.stability import mean_amari_discrepency
from modl.plotting.fmri import display_maps
from nilearn.datasets import fetch_atlas_smith_2009
from modl.utils.system import get_cache_dirs
batch_size = 200
learning_rate = .92
method = 'masked'
step_size = 0.01
reduction_ = 8
alpha = 1e-3
n_epochs = 4
verbose = 15
n_jobs = 70
smoothing_fwhm = 6
components_list = [20, 40, 80, 120, 200, 300, 500]
n_runs = 20
dict_init = fetch_atlas_smith_2009().rsn20
dataset = fetch_adhd(n_subjects=40)
data = dataset.rest.values
train_data, test_data = train_test_split(data, test_size=2, random_state=0)
train_imgs, train_confounds = zip(*train_data)
test_imgs, test_confounds = zip(*test_data)
mask = dataset.mask
mem = Memory(location=get_cache_dirs()[0])
masker = NiftiMasker(mask_img=mask).fit()
def fit_single(train_imgs, test_imgs, n_components, random_state):
dict_fact = fMRIDictFact(smoothing_fwhm=smoothing_fwhm,
method=method,
step_size=step_size,
mask=mask,
memory=mem,
memory_level=2,
verbose=verbose,
n_epochs=n_epochs,
n_jobs=1,
random_state=random_state,
n_components=n_components,
positive=True,
learning_rate=learning_rate,
batch_size=batch_size,
reduction=reduction_,
alpha=alpha,
callback=None,
)
dict_fact.fit(train_imgs, confounds=train_confounds)
score = dict_fact.score(test_imgs)
return dict_fact.components_, score
def fit_many_runs(train_imgs, test_imgs, components_list, n_runs=10, n_jobs=1):
random_states = check_random_state(0).randint(0, int(1e7), size=n_runs)
cached_fit = mem.cache(fit_single)
res = Parallel(n_jobs=n_jobs)(delayed(cached_fit)(
train_imgs, test_imgs, n_components, random_state)
for n_components in components_list
for random_state in random_states
)
components, scores = zip(*res)
shape = (len(components_list), len(random_states))
components = np.array(components).reshape(shape).tolist()
scores = np.array(scores).reshape(shape).tolist()
discrepencies = []
var_discrepencies = []
best_components = []
for n_components, these_components, these_scores in zip(components_list,
components,
scores):
discrepency, var_discrepency = mean_amari_discrepency(
these_components)
best_estimator = these_components[np.argmin(these_scores)]
discrepencies.append(var_discrepency)
var_discrepencies.append(var_discrepency)
best_components.append(best_estimator)
discrepencies = np.array(discrepencies)
var_discrepencies = np.array(var_discrepencies)
best_components = np.array(best_components)
components = best_components[np.argmin(discrepencies)]
return discrepencies, var_discrepencies, components
output_dir = expanduser('~/output_drago4/modl/fmri_stability2')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
discrepencies, var_discrepencies, components = fit_many_runs(
train_imgs, test_imgs,
components_list,
n_jobs=n_jobs,
n_runs=n_runs)
components_img = masker.inverse_transform(components)
components_img.to_filename(
join(output_dir, 'components.nii.gz'))
dump((components_list, discrepencies, var_discrepencies),
join(output_dir, 'discrepencies.pkl'))
fig = plt.figure()
display_maps(fig, components_img)
plt.savefig(join(output_dir, 'components.pdf'))
fig, ax = plt.subplots(1, 1)
ax.fill_between(components_list, discrepencies - var_discrepencies,
discrepencies + var_discrepencies, alpha=0.5)
ax.plot(components_list, discrepencies, marker='o')
ax.set_xlabel('Number of components')
ax.set_ylabel('Mean Amari discrepency')
sns.despine(fig)
fig.suptitle('Stability selection using DL')
plt.savefig(join(output_dir, 'discrepencies.pdf'))
|
[
"sklearn.utils.check_random_state",
"modl.decomposition.fmri.fMRIDictFact",
"sklearn.model_selection.train_test_split",
"numpy.argmin",
"matplotlib.pyplot.figure",
"modl.plotting.fmri.display_maps",
"os.path.join",
"os.path.exists",
"nilearn.datasets.fetch_atlas_smith_2009",
"nilearn.input_data.NiftiMasker",
"matplotlib.pyplot.subplots",
"modl.datasets.fetch_adhd",
"modl.decomposition.stability.mean_amari_discrepency",
"modl.utils.system.get_cache_dirs",
"os.makedirs",
"warnings.filterwarnings",
"seaborn.despine",
"numpy.array",
"joblib.Parallel",
"joblib.delayed",
"os.path.expanduser"
] |
[((93, 155), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (116, 155), False, 'import warnings\n'), ((1015, 1040), 'modl.datasets.fetch_adhd', 'fetch_adhd', ([], {'n_subjects': '(40)'}), '(n_subjects=40)\n', (1025, 1040), False, 'from modl.datasets import fetch_adhd\n'), ((1092, 1143), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(2)', 'random_state': '(0)'}), '(data, test_size=2, random_state=0)\n', (1108, 1143), False, 'from sklearn.model_selection import train_test_split\n'), ((3957, 4007), 'os.path.expanduser', 'expanduser', (['"""~/output_drago4/modl/fmri_stability2"""'], {}), "('~/output_drago4/modl/fmri_stability2')\n", (3967, 4007), False, 'from os.path import expanduser, join\n'), ((4456, 4468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4466, 4468), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4502), 'modl.plotting.fmri.display_maps', 'display_maps', (['fig', 'components_img'], {}), '(fig, components_img)\n', (4481, 4502), False, 'from modl.plotting.fmri import display_maps\n'), ((4561, 4579), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4573, 4579), True, 'import matplotlib.pyplot as plt\n'), ((4840, 4856), 'seaborn.despine', 'sns.despine', (['fig'], {}), '(fig)\n', (4851, 4856), True, 'import seaborn as sns\n'), ((973, 997), 'nilearn.datasets.fetch_atlas_smith_2009', 'fetch_atlas_smith_2009', ([], {}), '()\n', (995, 997), False, 'from nilearn.datasets import fetch_atlas_smith_2009\n'), ((1425, 1779), 'modl.decomposition.fmri.fMRIDictFact', 'fMRIDictFact', ([], {'smoothing_fwhm': 'smoothing_fwhm', 'method': 'method', 'step_size': 'step_size', 'mask': 'mask', 'memory': 'mem', 'memory_level': '(2)', 'verbose': 'verbose', 'n_epochs': 'n_epochs', 'n_jobs': '(1)', 'random_state': 'random_state', 'n_components': 'n_components', 'positive': '(True)', 'learning_rate': 'learning_rate', 'batch_size': 'batch_size', 'reduction': 'reduction_', 'alpha': 'alpha', 'callback': 'None'}), '(smoothing_fwhm=smoothing_fwhm, method=method, step_size=\n step_size, mask=mask, memory=mem, memory_level=2, verbose=verbose,\n n_epochs=n_epochs, n_jobs=1, random_state=random_state, n_components=\n n_components, positive=True, learning_rate=learning_rate, batch_size=\n batch_size, reduction=reduction_, alpha=alpha, callback=None)\n', (1437, 1779), False, 'from modl.decomposition.fmri import fMRIDictFact\n'), ((3702, 3725), 'numpy.array', 'np.array', (['discrepencies'], {}), '(discrepencies)\n', (3710, 3725), True, 'import numpy as np\n'), ((3750, 3777), 'numpy.array', 'np.array', (['var_discrepencies'], {}), '(var_discrepencies)\n', (3758, 3777), True, 'import numpy as np\n'), ((3800, 3825), 'numpy.array', 'np.array', (['best_components'], {}), '(best_components)\n', (3808, 3825), True, 'import numpy as np\n'), ((4016, 4042), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (4030, 4042), False, 'import os\n'), ((4048, 4071), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4059, 4071), False, 'import os\n'), ((4308, 4345), 'os.path.join', 'join', (['output_dir', '"""components.nii.gz"""'], {}), "(output_dir, 'components.nii.gz')\n", (4312, 4345), False, 'from os.path import expanduser, join\n'), ((4410, 4447), 'os.path.join', 'join', (['output_dir', '"""discrepencies.pkl"""'], {}), "(output_dir, 'discrepencies.pkl')\n", (4414, 4447), False, 'from os.path import expanduser, join\n'), ((4515, 4549), 'os.path.join', 'join', (['output_dir', '"""components.pdf"""'], {}), "(output_dir, 'components.pdf')\n", (4519, 4549), False, 'from os.path import expanduser, join\n'), ((4914, 4951), 'os.path.join', 'join', (['output_dir', '"""discrepencies.pdf"""'], {}), "(output_dir, 'discrepencies.pdf')\n", (4918, 4951), False, 'from os.path import expanduser, join\n'), ((1307, 1333), 'nilearn.input_data.NiftiMasker', 'NiftiMasker', ([], {'mask_img': 'mask'}), '(mask_img=mask)\n', (1318, 1333), False, 'from nilearn.input_data import NiftiMasker\n'), ((2600, 2623), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (2608, 2623), False, 'from joblib import Parallel, delayed\n'), ((3417, 3457), 'modl.decomposition.stability.mean_amari_discrepency', 'mean_amari_discrepency', (['these_components'], {}), '(these_components)\n', (3439, 3457), False, 'from modl.decomposition.stability import mean_amari_discrepency\n'), ((3859, 3883), 'numpy.argmin', 'np.argmin', (['discrepencies'], {}), '(discrepencies)\n', (3868, 3883), True, 'import numpy as np\n'), ((1277, 1293), 'modl.utils.system.get_cache_dirs', 'get_cache_dirs', ([], {}), '()\n', (1291, 1293), False, 'from modl.utils.system import get_cache_dirs\n'), ((2494, 2515), 'sklearn.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (2512, 2515), False, 'from sklearn.utils import check_random_state\n'), ((3513, 3536), 'numpy.argmin', 'np.argmin', (['these_scores'], {}), '(these_scores)\n', (3522, 3536), True, 'import numpy as np\n'), ((2624, 2643), 'joblib.delayed', 'delayed', (['cached_fit'], {}), '(cached_fit)\n', (2631, 2643), False, 'from joblib import Parallel, delayed\n'), ((2985, 3005), 'numpy.array', 'np.array', (['components'], {}), '(components)\n', (2993, 3005), True, 'import numpy as np\n'), ((3043, 3059), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3051, 3059), True, 'import numpy as np\n')]
|
"""
stateinterpreter
Interpretation of metastable states from MD simulations
"""
import sys
from setuptools import setup, find_packages, Extension
import versioneer
import numpy
os_name = sys.platform
compile_args = ["-O3", "-ffast-math", "-march=native", "-fopenmp" ]
libraries = ["m"]
link_args = ['-fopenmp']
if os_name.startswith('darwin'):
#clang compilation
compile_args.insert(-1, "-Xpreprocessor")
libraries.append("omp")
link_args.insert(-1, "-Xpreprocessor")
__cython__ = False # command line option, try-import, ...
try:
import Cython
__cython__ = True
except ModuleNotFoundError:
__cython__ = False
ext = '.pyx' if __cython__ else '.c'
short_description = "Interpretation of metastable states from MD simulations".split("\n")[0]
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = None
ext_modules=[
Extension("stateinterpreter.utils._compiled_numerics",
["stateinterpreter/utils/_compiled_numerics"+ext],
libraries=libraries,
include_dirs=[numpy.get_include()],
extra_compile_args = compile_args,
extra_link_args= link_args
)
]
if __cython__:
from Cython.Build import cythonize
ext_modules = cythonize(ext_modules)
setup(
# Self-descriptive entries which should always be present
name='stateinterpreter',
author='<NAME> <<EMAIL>>, <NAME> <pietro.<EMAIL>li.iit>"',
description=short_description,
long_description=long_description,
long_description_content_type="text/markdown",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
# Which Python importable modules should be included when your package is installed
# Handled automatically by setuptools. Use 'exclude' to prevent some specific
# subpackage(s) from being added, if needed
packages=find_packages(),
# Optional include package data to ship with your package
# Customize MANIFEST.in if the general case does not suit your needs
# Comment out this line to prevent the files from being packaged with your software
include_package_data=True,
# Allows `setup.py test` to work correctly with pytest
setup_requires=[] + pytest_runner,
ext_modules = ext_modules,
zip_safe = False,
# Additional entries you may want simply uncomment the lines you want and fill in the data
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
|
[
"versioneer.get_version",
"Cython.Build.cythonize",
"versioneer.get_cmdclass",
"numpy.get_include",
"setuptools.find_packages"
] |
[((1487, 1509), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {}), '(ext_modules)\n', (1496, 1509), False, 'from Cython.Build import cythonize\n'), ((1809, 1833), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (1831, 1833), False, 'import versioneer\n'), ((1848, 1873), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (1871, 1873), False, 'import versioneer\n'), ((2126, 2141), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2139, 2141), False, 'from setuptools import setup, find_packages, Extension\n'), ((1297, 1316), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1314, 1316), False, 'import numpy\n')]
|
# Here we provide the key functions for tile-coding. To avoid huge dimensionality expansion, we have tiled
# per feature variable, but using feature-column cross functionality a pair of feature-variables
# also can be tiled, and also higher orders.
from typing import List
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
class Tilings(object):
def __init__(self, tile_strategy_boundaries, num_tilings):
self.num_tilings = num_tilings
self.tile_strategy_boundaries = tile_strategy_boundaries
def _get_stack_tiling_boundaries(self, boundaries) -> List[List[float]]:
boundaries = np.array(boundaries)
each_bucket_resolution = np.array(
[float(boundaries[i + 1] - boundaries[i]) / self.num_tilings for i in range(len(boundaries) - 1)] + [0])
return [list(boundaries + i * each_bucket_resolution) for i in range(self.num_tilings)]
@staticmethod
def _get_tiles(input_data, list_boundaries: List[List[float]]):
all_tiles = []
input_tensor = tf.cast(input_data, tf.float64)
for i, boundaries in enumerate(list_boundaries):
bucketized_tensor = math_ops.bucketize(input_tensor, boundaries)
bucketized_tensor = tf.reshape(bucketized_tensor, (-1, 1))
bucketized_tensor = tf.math.add(bucketized_tensor, i * (len(boundaries) - 1))
all_tiles.append(bucketized_tensor)
return tf.concat(all_tiles, axis=1)
def get_features_tiles(self, features):
features_tiles = dict()
for feature_name, boundaries in self.tile_strategy_boundaries.items():
list_boundaries = self._get_stack_tiling_boundaries(boundaries)
features_tiles[feature_name] = Tilings._get_tiles(features[feature_name], list_boundaries)
return features_tiles
|
[
"tensorflow.python.ops.math_ops.bucketize",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.cast",
"numpy.array"
] |
[((652, 672), 'numpy.array', 'np.array', (['boundaries'], {}), '(boundaries)\n', (660, 672), True, 'import numpy as np\n'), ((1062, 1093), 'tensorflow.cast', 'tf.cast', (['input_data', 'tf.float64'], {}), '(input_data, tf.float64)\n', (1069, 1093), True, 'import tensorflow as tf\n'), ((1452, 1480), 'tensorflow.concat', 'tf.concat', (['all_tiles'], {'axis': '(1)'}), '(all_tiles, axis=1)\n', (1461, 1480), True, 'import tensorflow as tf\n'), ((1183, 1227), 'tensorflow.python.ops.math_ops.bucketize', 'math_ops.bucketize', (['input_tensor', 'boundaries'], {}), '(input_tensor, boundaries)\n', (1201, 1227), False, 'from tensorflow.python.ops import math_ops\n'), ((1260, 1298), 'tensorflow.reshape', 'tf.reshape', (['bucketized_tensor', '(-1, 1)'], {}), '(bucketized_tensor, (-1, 1))\n', (1270, 1298), True, 'import tensorflow as tf\n')]
|
import tensorflow as tf
import numpy as np
import src.utils as utils
"""
Implementation of InfoVAE
https://arxiv.org/abs/1706.02262
"""
def reparameterise(x, n, stddev):
"""
Model each output as bing guassian distributed.
Use the reparameterisation trick so we can sample while remaining
differentiable.
"""
with tf.name_scope('reparameterise'):
z_mean = x[:,:,:,:n]
z_stddev = x[:,:,:,n:]
e = tf.random_normal(tf.shape(z_mean), stddev=stddev)
# TODO log_var or stddev?
return z_mean + tf.square(z_stddev)*e
def compute_kernel(x, y):
"""
Compute the distance between x and y using a guassian kernel.
"""
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, [x_size, 1, dim]), [1, y_size, 1])
tiled_y = tf.tile(tf.reshape(y, [1, y_size, dim]), [x_size, 1, 1])
return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))
def compute_mmd(x, y):
"""
Calculate the maximum mean disrepancy..
"""
x_kernel = compute_kernel(x, x)
y_kernel = compute_kernel(y, y)
xy_kernel = compute_kernel(x, y)
return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)
def gaussian_d(x, y):
"""
A conceptual lack of understanding here.
Do I need a dx to calculate this over?
Doesnt make sense for a single point!?
"""
d = tf.norm(x - y, axis=1)
return tf.exp(-0.5*d)/(tf.sqrt(2*tf.constant(np.pi)))
def pz(z):
"""
Estimate p(z) using our prior on z.
"""
z = tf.layers.flatten(z)
return gaussian_d(z , tf.zeros_like(z))
def px_z(x_, y):
# the added noise in the hidden layer.
return gaussian_d(tf.layers.flatten(y[:,:,:,:1]),
tf.layers.flatten(x_))
def pz_x(h, z):
# the added noise in the final layer.
shape = h.get_shape().as_list()
return gaussian_d(tf.layers.flatten(h[:,:,:,:shape[-1]//2]),
tf.layers.flatten(z))
def p_bayes(x_, y, h, z):
"""
If p(z | x) is far away from p(z) then p(x) is low
p(x) = p(x | z) p(z) / p(z | x)
"""
return px_z(x_, y) * pz(z) / pz_x(h, z)
# def KL_divergence(p, q):
# return tf.reduce_sum(p * tf.log(p/q), axis=-1)
#
# def bayesian_surprise(z):
# """
#
# """
# return kl(z, prior)
class InfoVAE():
def __init__(self, n_hidden, width, depth, stddev=0.0001):
"""
Args:
"""
self.n_hidden = n_hidden
self.width = width
self.depth = depth
self.n_channels = 1
self.stddev = stddev
self.construct()
def construct(self):
"""
Constructs:
encoder (tf.keras.Model): encode the gradient into the hidden space
decoder (tf.keras.Model): decodes a hidden state into an image
"""
layers = []
layers.append(tf.keras.layers.Conv2D(self.width, 4, strides=(2, 2),
padding='same',
# input_shape=(28,28,1)
))
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
for i in range(self.depth):
layers.append(tf.keras.layers.Conv2D(self.width,
4,
strides=(2, 2),
padding='same'),)
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
layers.append(tf.keras.layers.Conv2D(self.n_hidden*2,
1,
strides=(1, 1),
padding='same'))
self.encoder = tf.keras.Sequential(layers)
# decoder
layers = []
layers.append(tf.keras.layers.Conv2DTranspose(self.width, 4, strides=(2, 2),
padding='same',
# input_shape=(1,1,self.n_hidden)
))
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
for _ in range(self.depth):
layers.append(tf.keras.layers.Conv2DTranspose(self.width, 4, strides=(2, 2), padding='same'))
layers.append(tf.keras.layers.Activation(tf.keras.activations.selu))
layers.append(tf.keras.layers.Conv2DTranspose(self.n_channels*2, 1, strides=(1, 1), padding='same'))
self.decoder = tf.keras.Sequential(layers)
def __call__(self, x):
"""
Args:
x (tf.tensor): the input
shape is [None, width, height, channels],
dtype is tf.float32
"""
with tf.name_scope('infovae'):
self.h = self.encoder(x)
self.z = reparameterise(self.h, self.n_hidden, self.stddev)
self.y = self.decoder(self.z)
self.x_ = reparameterise(self.y, self.n_channels, self.stddev)
return self.x_
def make_losses(self, x, y=None):
self.x = x
if y is None:
print('...')
y = self.__call__(self.x)
with tf.name_scope('loss'):
recon_loss = tf.losses.sigmoid_cross_entropy(
logits=tf.layers.flatten(y),
multi_class_labels=tf.layers.flatten(self.x))
latent_loss = compute_mmd(tf.layers.flatten(self.z),
tf.layers.flatten(tf.random_normal(shape=tf.shape(self.z))))
return recon_loss, latent_loss
def make_contractive_loss(self):
# assumes make_losses has already been called
print(self.h, self.x)
dhdx = tf.gradients(self.h, self.x)[0]
print(dhdx)
if dhdx is None:
raise ValueError()
return tf.reduce_mean(tf.reduce_sum(tf.square(dhdx), axis=[1,2,3]))
def estimate_density(self, x):
x_ = self.__call__(x)
return p_bayes(x_, self.y, self.h, self.z)
@staticmethod
def preprocess(x):
im = np.reshape(x, [-1, 28, 28, 1])
im = np.round(im).astype(np.float32) # NOTE important !?
return np.pad(im, [(0,0), (2,2), (2,2), (0,0)], 'constant', constant_values=0)
if __name__ == '__main__':
tf.enable_eager_execution()
x = tf.random_normal((100, 28, 28, 1))
nn = InfoVAE(12, 16, 3)
x_ = nn(x)
# loss = nn.make_losses(x)
assert x_.shape == x.shape
|
[
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.keras.Sequential",
"numpy.round",
"numpy.pad",
"tensorflow.cast",
"tensorflow.keras.layers.Activation",
"tensorflow.exp",
"numpy.reshape",
"tensorflow.gradients",
"tensorflow.name_scope",
"tensorflow.norm",
"tensorflow.layers.flatten",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.random_normal",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.enable_eager_execution",
"tensorflow.keras.layers.Conv2D",
"tensorflow.shape",
"tensorflow.square"
] |
[((1473, 1495), 'tensorflow.norm', 'tf.norm', (['(x - y)'], {'axis': '(1)'}), '(x - y, axis=1)\n', (1480, 1495), True, 'import tensorflow as tf\n'), ((1630, 1650), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['z'], {}), '(z)\n', (1647, 1650), True, 'import tensorflow as tf\n'), ((6352, 6379), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (6377, 6379), True, 'import tensorflow as tf\n'), ((6388, 6422), 'tensorflow.random_normal', 'tf.random_normal', (['(100, 28, 28, 1)'], {}), '((100, 28, 28, 1))\n', (6404, 6422), True, 'import tensorflow as tf\n'), ((339, 370), 'tensorflow.name_scope', 'tf.name_scope', (['"""reparameterise"""'], {}), "('reparameterise')\n", (352, 370), True, 'import tensorflow as tf\n'), ((697, 708), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (705, 708), True, 'import tensorflow as tf\n'), ((725, 736), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (733, 736), True, 'import tensorflow as tf\n'), ((750, 761), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (758, 761), True, 'import tensorflow as tf\n'), ((787, 818), 'tensorflow.reshape', 'tf.reshape', (['x', '[x_size, 1, dim]'], {}), '(x, [x_size, 1, dim])\n', (797, 818), True, 'import tensorflow as tf\n'), ((858, 889), 'tensorflow.reshape', 'tf.reshape', (['y', '[1, y_size, dim]'], {}), '(y, [1, y_size, dim])\n', (868, 889), True, 'import tensorflow as tf\n'), ((1507, 1523), 'tensorflow.exp', 'tf.exp', (['(-0.5 * d)'], {}), '(-0.5 * d)\n', (1513, 1523), True, 'import tensorflow as tf\n'), ((1677, 1693), 'tensorflow.zeros_like', 'tf.zeros_like', (['z'], {}), '(z)\n', (1690, 1693), True, 'import tensorflow as tf\n'), ((1778, 1811), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['y[:, :, :, :1]'], {}), '(y[:, :, :, :1])\n', (1795, 1811), True, 'import tensorflow as tf\n'), ((1832, 1853), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['x_'], {}), '(x_)\n', (1849, 1853), True, 'import tensorflow as tf\n'), ((1972, 2018), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['h[:, :, :, :shape[-1] // 2]'], {}), '(h[:, :, :, :shape[-1] // 2])\n', (1989, 2018), True, 'import tensorflow as tf\n'), ((2037, 2057), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['z'], {}), '(z)\n', (2054, 2057), True, 'import tensorflow as tf\n'), ((3812, 3839), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['layers'], {}), '(layers)\n', (3831, 3839), True, 'import tensorflow as tf\n'), ((4581, 4608), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['layers'], {}), '(layers)\n', (4600, 4608), True, 'import tensorflow as tf\n'), ((6136, 6166), 'numpy.reshape', 'np.reshape', (['x', '[-1, 28, 28, 1]'], {}), '(x, [-1, 28, 28, 1])\n', (6146, 6166), True, 'import numpy as np\n'), ((6248, 6323), 'numpy.pad', 'np.pad', (['im', '[(0, 0), (2, 2), (2, 2), (0, 0)]', '"""constant"""'], {'constant_values': '(0)'}), "(im, [(0, 0), (2, 2), (2, 2), (0, 0)], 'constant', constant_values=0)\n", (6254, 6323), True, 'import numpy as np\n'), ((461, 477), 'tensorflow.shape', 'tf.shape', (['z_mean'], {}), '(z_mean)\n', (469, 477), True, 'import tensorflow as tf\n'), ((981, 1005), 'tensorflow.cast', 'tf.cast', (['dim', 'tf.float32'], {}), '(dim, tf.float32)\n', (988, 1005), True, 'import tensorflow as tf\n'), ((1211, 1235), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x_kernel'], {}), '(x_kernel)\n', (1225, 1235), True, 'import tensorflow as tf\n'), ((1238, 1262), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['y_kernel'], {}), '(y_kernel)\n', (1252, 1262), True, 'import tensorflow as tf\n'), ((1269, 1294), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['xy_kernel'], {}), '(xy_kernel)\n', (1283, 1294), True, 'import tensorflow as tf\n'), ((2954, 3023), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['self.width', '(4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(self.width, 4, strides=(2, 2), padding='same')\n", (2976, 3023), True, 'import tensorflow as tf\n'), ((3178, 3231), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['tf.keras.activations.selu'], {}), '(tf.keras.activations.selu)\n', (3204, 3231), True, 'import tensorflow as tf\n'), ((3617, 3693), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(self.n_hidden * 2)', '(1)'], {'strides': '(1, 1)', 'padding': '"""same"""'}), "(self.n_hidden * 2, 1, strides=(1, 1), padding='same')\n", (3639, 3693), True, 'import tensorflow as tf\n'), ((3901, 3979), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['self.width', '(4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(self.width, 4, strides=(2, 2), padding='same')\n", (3932, 3979), True, 'import tensorflow as tf\n'), ((4171, 4224), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['tf.keras.activations.selu'], {}), '(tf.keras.activations.selu)\n', (4197, 4224), True, 'import tensorflow as tf\n'), ((4471, 4562), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(self.n_channels * 2)', '(1)'], {'strides': '(1, 1)', 'padding': '"""same"""'}), "(self.n_channels * 2, 1, strides=(1, 1),\n padding='same')\n", (4502, 4562), True, 'import tensorflow as tf\n'), ((4819, 4843), 'tensorflow.name_scope', 'tf.name_scope', (['"""infovae"""'], {}), "('infovae')\n", (4832, 4843), True, 'import tensorflow as tf\n'), ((5255, 5276), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (5268, 5276), True, 'import tensorflow as tf\n'), ((5780, 5808), 'tensorflow.gradients', 'tf.gradients', (['self.h', 'self.x'], {}), '(self.h, self.x)\n', (5792, 5808), True, 'import tensorflow as tf\n'), ((553, 572), 'tensorflow.square', 'tf.square', (['z_stddev'], {}), '(z_stddev)\n', (562, 572), True, 'import tensorflow as tf\n'), ((1533, 1551), 'tensorflow.constant', 'tf.constant', (['np.pi'], {}), '(np.pi)\n', (1544, 1551), True, 'import tensorflow as tf\n'), ((3295, 3364), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['self.width', '(4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(self.width, 4, strides=(2, 2), padding='same')\n", (3317, 3364), True, 'import tensorflow as tf\n'), ((3540, 3593), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['tf.keras.activations.selu'], {}), '(tf.keras.activations.selu)\n', (3566, 3593), True, 'import tensorflow as tf\n'), ((4288, 4366), 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['self.width', '(4)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(self.width, 4, strides=(2, 2), padding='same')\n", (4319, 4366), True, 'import tensorflow as tf\n'), ((4394, 4447), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['tf.keras.activations.selu'], {}), '(tf.keras.activations.selu)\n', (4420, 4447), True, 'import tensorflow as tf\n'), ((5481, 5506), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['self.z'], {}), '(self.z)\n', (5498, 5506), True, 'import tensorflow as tf\n'), ((5932, 5947), 'tensorflow.square', 'tf.square', (['dhdx'], {}), '(dhdx)\n', (5941, 5947), True, 'import tensorflow as tf\n'), ((6180, 6192), 'numpy.round', 'np.round', (['im'], {}), '(im)\n', (6188, 6192), True, 'import numpy as np\n'), ((941, 969), 'tensorflow.square', 'tf.square', (['(tiled_x - tiled_y)'], {}), '(tiled_x - tiled_y)\n', (950, 969), True, 'import tensorflow as tf\n'), ((5359, 5379), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['y'], {}), '(y)\n', (5376, 5379), True, 'import tensorflow as tf\n'), ((5416, 5441), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['self.x'], {}), '(self.x)\n', (5433, 5441), True, 'import tensorflow as tf\n'), ((5583, 5599), 'tensorflow.shape', 'tf.shape', (['self.z'], {}), '(self.z)\n', (5591, 5599), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
import argparse
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('path', type=str)
p.add_argument('-m', '--minpow', type=int, default=3)
p.add_argument('-M', '--maxpow', type=int, default=7)
p.add_argument('-s', '--step', type=int, default=2)
p.add_argument('-t', '--trials', type=int, default=10)
p.add_argument('--speed_funcs', type=str)
return p.parse_args()
# We do this ahead of time so that if we end up only printing the
# usage message we don't bother with the other (e.g. MPI-related)
# setup below here
if __name__ == '__main__':
args = parse_args()
import sys
if '../../build/Release' not in sys.path:
sys.path.insert(0, '../../build/Release')
import pyolim as olim
import h5py
import mpi4py.MPI
import numpy as np
import os.path
from common3d import compute_soln, get_exact_soln, get_marcher_name, marchers, \
time_marcher
from itertools import product
from speedfuncs3d import get_speed_func_name, get_speed_func_by_name, \
get_soln_func, speed_funcs
comm = mpi4py.MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def rms(x):
y = x.flatten()
n = y.size
assert(n > 0)
return np.sqrt(y.dot(y)/n)
def linf_error(x):
return np.linalg.norm(x.flatten(), np.inf)
def get_ns(args):
minpow = args.minpow
maxpow = args.maxpow
steps = args.step
ns = np.logspace(minpow, maxpow, steps*(maxpow - minpow) + 1, base=2)
return (2*np.round(ns/2)).astype(int) + 1
def get_dataset_name(Marcher, s):
mname = get_marcher_name(Marcher)
sname = get_speed_func_name(s)
return '%s/%s' % (mname.replace(' ', '_'), sname)
def create_datasets(f, M_by_s, ns):
for Marcher, s in M_by_s:
name = get_dataset_name(Marcher, s)
f.create_dataset(name + '/n', (len(ns),), dtype=np.int)
for n in ns:
shape = (n, n, n)
f.create_dataset(name + '/u' + str(n), shape, dtype=np.float)
f.create_dataset(name + '/U' + str(n), shape, dtype=np.float)
f.create_dataset(name + '/rms', (len(ns),), dtype=np.float)
f.create_dataset(name + '/max', (len(ns),), dtype=np.float)
f.create_dataset(name + '/t', (len(ns),), dtype=np.float)
def populate_datasets(Marcher, s, ns, t):
name = get_dataset_name(Marcher, s)
print(name)
f[name + '/n'][:] = ns
print('- computing exact solutions')
us = [get_exact_soln(get_soln_func(s), n) for n in ns]
for n, u in zip(ns, us):
f[name + '/u' + str(n)][:, :, :] = u
print('- computing numerical solutions')
Us = [compute_soln(Marcher, s, n) for n in ns]
for n, U in zip(ns, Us):
f[name + '/U' + str(n)][:, :, :] = U
print('- evaluating errors')
f[name + '/rms'][:] = [rms(u - U) for u, U in zip(us, Us)]
f[name + '/max'][:] = [linf_error(u - U) for u, U in zip(us, Us)]
print('- collecting CPU times')
f[name + '/t'][:] = [time_marcher(Marcher, s, n, ntrials=t) for n in ns]
if __name__ == '__main__':
with h5py.File(args.path, 'w', driver='mpio', comm=comm) as f:
if args.speed_funcs is not None:
speed_funcs_ = [
get_speed_func_by_name(name) for name in
args.speed_funcs.split(',')]
else:
speed_funcs_ = speed_funcs()
ns = get_ns(args)
if rank == 0:
print('Test problem sizes: ' + ', '.join(map(str, ns)))
if rank == 0:
print('Creating datasets')
create_datasets(f, product(marchers, speed_funcs_), ns)
for i, (Marcher, s) in enumerate(product(marchers, speed_funcs_)):
if i % size != rank:
continue
populate_datasets(Marcher, s, ns, args.trials)
|
[
"speedfuncs3d.speed_funcs",
"common3d.time_marcher",
"h5py.File",
"argparse.ArgumentParser",
"common3d.get_marcher_name",
"numpy.logspace",
"sys.path.insert",
"speedfuncs3d.get_soln_func",
"itertools.product",
"numpy.round",
"common3d.compute_soln",
"speedfuncs3d.get_speed_func_name",
"speedfuncs3d.get_speed_func_by_name"
] |
[((67, 92), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (90, 92), False, 'import argparse\n'), ((694, 735), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../build/Release"""'], {}), "(0, '../../build/Release')\n", (709, 735), False, 'import sys\n'), ((1395, 1461), 'numpy.logspace', 'np.logspace', (['minpow', 'maxpow', '(steps * (maxpow - minpow) + 1)'], {'base': '(2)'}), '(minpow, maxpow, steps * (maxpow - minpow) + 1, base=2)\n', (1406, 1461), True, 'import numpy as np\n'), ((1553, 1578), 'common3d.get_marcher_name', 'get_marcher_name', (['Marcher'], {}), '(Marcher)\n', (1569, 1578), False, 'from common3d import compute_soln, get_exact_soln, get_marcher_name, marchers, time_marcher\n'), ((1591, 1613), 'speedfuncs3d.get_speed_func_name', 'get_speed_func_name', (['s'], {}), '(s)\n', (1610, 1613), False, 'from speedfuncs3d import get_speed_func_name, get_speed_func_by_name, get_soln_func, speed_funcs\n'), ((2602, 2629), 'common3d.compute_soln', 'compute_soln', (['Marcher', 's', 'n'], {}), '(Marcher, s, n)\n', (2614, 2629), False, 'from common3d import compute_soln, get_exact_soln, get_marcher_name, marchers, time_marcher\n'), ((2946, 2984), 'common3d.time_marcher', 'time_marcher', (['Marcher', 's', 'n'], {'ntrials': 't'}), '(Marcher, s, n, ntrials=t)\n', (2958, 2984), False, 'from common3d import compute_soln, get_exact_soln, get_marcher_name, marchers, time_marcher\n'), ((3036, 3087), 'h5py.File', 'h5py.File', (['args.path', '"""w"""'], {'driver': '"""mpio"""', 'comm': 'comm'}), "(args.path, 'w', driver='mpio', comm=comm)\n", (3045, 3087), False, 'import h5py\n'), ((2438, 2454), 'speedfuncs3d.get_soln_func', 'get_soln_func', (['s'], {}), '(s)\n', (2451, 2454), False, 'from speedfuncs3d import get_speed_func_name, get_speed_func_by_name, get_soln_func, speed_funcs\n'), ((3308, 3321), 'speedfuncs3d.speed_funcs', 'speed_funcs', ([], {}), '()\n', (3319, 3321), False, 'from speedfuncs3d import get_speed_func_name, get_speed_func_by_name, get_soln_func, speed_funcs\n'), ((3528, 3559), 'itertools.product', 'product', (['marchers', 'speed_funcs_'], {}), '(marchers, speed_funcs_)\n', (3535, 3559), False, 'from itertools import product\n'), ((3607, 3638), 'itertools.product', 'product', (['marchers', 'speed_funcs_'], {}), '(marchers, speed_funcs_)\n', (3614, 3638), False, 'from itertools import product\n'), ((3181, 3209), 'speedfuncs3d.get_speed_func_by_name', 'get_speed_func_by_name', (['name'], {}), '(name)\n', (3203, 3209), False, 'from speedfuncs3d import get_speed_func_name, get_speed_func_by_name, get_soln_func, speed_funcs\n'), ((1474, 1490), 'numpy.round', 'np.round', (['(ns / 2)'], {}), '(ns / 2)\n', (1482, 1490), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 18:03:59 2016
@author: jones_000
"""
import copy as cp
import numpy as np
import math
import Solver
import Physics
import Body
import vector
import matplotlib.pyplot as plt
class Simulation(object):
'''Parent Simulation class
Attributes
----------
stop_condition : callable
sets the stop condition for simulation
physics : Physics
the physics being simulated with a solver
body : array, GravBody
the array of bodies with position, velocity, and mass
'''
def __init__(self,stop_condition=None,physics=None,body=None):
'''Make body a list'''
if type(body) == list:
self.body = body
else:
self.body = [body]
self.physics = physics
self.stop_condition = stop_condition
def get_results(self):
'''This advances the sim and returns results'''
body = self.body
time = 0
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
while self.stop_condition(self.bodies) == True:
body, time = self.physics.advance(body,time)
self.bodies.append(cp.deepcopy(body))
self.t.append(time)
return self.t, self.bodies
class OrbitSim(Simulation):
'''Drives the Central Grav sim for orbits
Attributes
----------
stop_condition : callable
sets the stop condition for simulation
physics : Physics
the physics being simulated with a solver
body : GravBody
the body with position, velocity, and mass
'''
def __init__(self,stop_condition=None,physics=None,body=None,apnd=True):
Simulation.__init__(self,stop_condition,physics,body)
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
self.apnd = apnd
def get_results(self):
'''Returns time and bodies lists'''
return self.t, self.bodies
def advance(self,time=None,step=None):
'''Advances sim to a certain time or step
Parameters
----------
time : float
the target time for the sim
step : float
the number of steps to run
'''
if time != None:
dt = self.physics.solver.stepsize
time = time - dt
self.run(self.time_stop(time))
self.physics.solver.stepsize = time + dt - self.t[-1]
self.run(self.time_stop(time+dt))
self.physics.solver.stepsize = dt
if step != None:
self.run(self.step_stop(step))
if time == None and step == None:
self.run(self.stop_condition)
def step_stop(self,step):
'''Reference to stop function to end at a certain step'''
def stop(time,bodies):
steps = math.floor(time/self.physics.solver.step_size)
if steps < step:
return True
else:
return False
return stop
def time_stop(self,goal):
'''Reference to a stop function to end at a certain time'''
def stop(time,bodies):
if time < goal:
return True
else:
return False
return stop
def run(self,stop_condition):
'''Internal run function that advances bodies
Parameters
----------
stop_condition : callable
the stop function to end the sim
'''
time = self.t[-1]
while stop_condition(time,self.bodies) == True:
self.body, time = self.physics.advance(self.body,time)
if self.apnd:
self.bodies.append(cp.deepcopy(self.body))
self.t.append(time)
if not self.apnd:
self.bodies.append(cp.deepcopy(self.body))
self.t.append(cp.deepcopy(time))
class BinarySim(OrbitSim):
'''Takes in Elliptical Inputs and produces a Binary Sim
Attributes
----------
M1 : float
mass of the first body
M2 : float
mass of the second body
a1 : float
the semi-major axis of the first body's orbit
e : float
the orbits' eccentricity
'''
def __init__(self,M1=None,M2=None,a1=1,e=0,apnd=True):
'''Build GravBodies'''
self.G = 4*math.pi**2.
r1p = a1-e*a1
r2p = -(M1/M2)*r1p
v1p = math.sqrt(((self.G*M2**3.)/(a1*(M1+M2)**2.))*((1.+e)/(1.-e)))
v2p = -(M1/M2)*v1p
r1 = vector.Vector(r1p,0.,0.)
r2 = vector.Vector(r2p,0.,0.)
v1 = vector.Vector(0.,v1p,0.)
v2 = vector.Vector(0.,v2p,0.)
body1 = Body.GravBody(M1,r1,v1)
body2 = Body.GravBody(M2,r2,v2)
'''Set up Sim'''
self.body = [body1,body2]
solver = Solver.RK2(0.01)
self.physics = Physics.NBody(solver,self.G)
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
self.apnd = apnd
class ExoSim(BinarySim):
'''Runs a siim for an exoplant search
Attributes
----------
Ms : float
mass of the star
Mp : float
mass of the plant
ap : float
the semi-major axis of the planet's orbit
e : float
the orbits' eccentricity
Rs : float
the radius of the star in Solar Radii
Rp : float
the radius of the planet in Solar Radii
omega : float
the angle of periastron
i : float
the angle of inclination
'''
def __init__(self,Ms=None,Mp=None,ap=None,e=0,Rs=None,Rp=None,omega=None,i=None,apnd=True):
'''Save Values'''
self.apnd = apnd
self.Rs = Rs
self.Rp = Rp
self.G = 4*math.pi**2.
self.period = np.sqrt(((ap**3.)*((Ms+Mp)**2.))/Ms**3.)
'''Set up Vectors'''
rpp = ap-e*ap
rsp = -(Mp/Ms)*rpp
vpp = math.sqrt(((self.G*Ms**3.)/(ap*(Ms+Mp)**2.))*((1.+e)/(1.-e)))
vsp = -(Mp/Ms)*vpp
'''Rotate Vectors into Viewer frame'''
rs = vector.Vector(rsp,0.,0.)
rs.rot_z(omega)
rs.rot_x(i)
rp = vector.Vector(rpp,0.,0.)
rp.rot_z(omega)
rp.rot_x(i)
vs = vector.Vector(0.,vsp,0.)
vs.rot_z(omega)
vs.rot_x(i)
vp = vector.Vector(0.,vpp,0.)
vp.rot_z(omega)
vp.rot_x(i)
'''Set Up Sim'''
star = Body.GravBody(Ms,rs,vs)
planet = Body.GravBody(Mp,rp,vp)
self.body = [star,planet]
solver = Solver.RK2(0.01)
self.physics = Physics.NBody(solver,self.G)
self.bodies = [cp.deepcopy(self.body)]
self.t = [0]
def advance(self,time=None):
'''Advances Sim to a certain time or for one orbital period
Parameters
----------
time : float
the target time for the simulation, defaults to one orbital period
'''
if time == None:
time = self.period
dt = self.physics.solver.stepsize
time = time - dt
self.run(self.time_stop(time))
self.physics.solver.stepsize = time + dt - self.t[-1]
self.run(self.time_stop(time+dt))
self.physics.solver.stepsize = dt
def light_curve(self,time,bodies):
'''Creates and plots an exoplanet transit light curve for the orbit
Paramters
---------
time : list, float
a list of the independant variable, time
bodies : list, GravBody
a list of the Gravbodies at each time in time list
Returns
-------
a graph of the light curve
'''
r_list = np.array([b[0].position - b[1].position for b in bodies])
p = np.array([r.cart for r in r_list])
d = np.sqrt((p[:,0])**2. + (p[:,1])**2.)
x = (self.Rp**2. - self.Rs**2. + d**2.)/(2.*d)
h = np.sqrt(self.Rp**2. - x**2.)
theta = np.arccos(x/self.Rp)
psi = np.arccos((d-x)/self.Rs)
'''Areas of Arcs and Triangles'''
a1 = 0.5*x*h
ap = 0.5*theta*(self.Rp**2.)
A1 = ap - a1
a2 = 0.5*(d-x)*h
As = 0.5*psi*(self.Rs**2.)
A2 = As -a2
A = 2*(A1 + A2)
'''Fix Failures'''
A[d>=(self.Rp+self.Rs)] = 0.
A[d<=(self.Rs-self.Rp)] = np.pi*(self.Rp**2.)
A[p[:,2]<=0] = 0
I = ((np.pi*self.Rs**2.) - A)/(np.pi*self.Rs**2.)
plt.figure()
plt.plot(time,I,'.')
plt.title('Exo Planet Light Curve')
plt.xlabel('Time [Years]')
plt.ylabel('Intensity')
|
[
"matplotlib.pyplot.title",
"Solver.RK2",
"copy.deepcopy",
"math.sqrt",
"matplotlib.pyplot.plot",
"math.floor",
"Physics.NBody",
"matplotlib.pyplot.figure",
"Body.GravBody",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"vector.Vector",
"numpy.arccos",
"numpy.sqrt"
] |
[((4569, 4654), 'math.sqrt', 'math.sqrt', (['(self.G * M2 ** 3.0 / (a1 * (M1 + M2) ** 2.0) * ((1.0 + e) / (1.0 - e)))'], {}), '(self.G * M2 ** 3.0 / (a1 * (M1 + M2) ** 2.0) * ((1.0 + e) / (1.0 -\n e)))\n', (4578, 4654), False, 'import math\n'), ((4671, 4699), 'vector.Vector', 'vector.Vector', (['r1p', '(0.0)', '(0.0)'], {}), '(r1p, 0.0, 0.0)\n', (4684, 4699), False, 'import vector\n'), ((4709, 4737), 'vector.Vector', 'vector.Vector', (['r2p', '(0.0)', '(0.0)'], {}), '(r2p, 0.0, 0.0)\n', (4722, 4737), False, 'import vector\n'), ((4747, 4775), 'vector.Vector', 'vector.Vector', (['(0.0)', 'v1p', '(0.0)'], {}), '(0.0, v1p, 0.0)\n', (4760, 4775), False, 'import vector\n'), ((4785, 4813), 'vector.Vector', 'vector.Vector', (['(0.0)', 'v2p', '(0.0)'], {}), '(0.0, v2p, 0.0)\n', (4798, 4813), False, 'import vector\n'), ((4826, 4851), 'Body.GravBody', 'Body.GravBody', (['M1', 'r1', 'v1'], {}), '(M1, r1, v1)\n', (4839, 4851), False, 'import Body\n'), ((4866, 4891), 'Body.GravBody', 'Body.GravBody', (['M2', 'r2', 'v2'], {}), '(M2, r2, v2)\n', (4879, 4891), False, 'import Body\n'), ((4975, 4991), 'Solver.RK2', 'Solver.RK2', (['(0.01)'], {}), '(0.01)\n', (4985, 4991), False, 'import Solver\n'), ((5015, 5044), 'Physics.NBody', 'Physics.NBody', (['solver', 'self.G'], {}), '(solver, self.G)\n', (5028, 5044), False, 'import Physics\n'), ((5985, 6034), 'numpy.sqrt', 'np.sqrt', (['(ap ** 3.0 * (Ms + Mp) ** 2.0 / Ms ** 3.0)'], {}), '(ap ** 3.0 * (Ms + Mp) ** 2.0 / Ms ** 3.0)\n', (5992, 6034), True, 'import numpy as np\n'), ((6127, 6212), 'math.sqrt', 'math.sqrt', (['(self.G * Ms ** 3.0 / (ap * (Ms + Mp) ** 2.0) * ((1.0 + e) / (1.0 - e)))'], {}), '(self.G * Ms ** 3.0 / (ap * (Ms + Mp) ** 2.0) * ((1.0 + e) / (1.0 -\n e)))\n', (6136, 6212), False, 'import math\n'), ((6285, 6313), 'vector.Vector', 'vector.Vector', (['rsp', '(0.0)', '(0.0)'], {}), '(rsp, 0.0, 0.0)\n', (6298, 6313), False, 'import vector\n'), ((6367, 6395), 'vector.Vector', 'vector.Vector', (['rpp', '(0.0)', '(0.0)'], {}), '(rpp, 0.0, 0.0)\n', (6380, 6395), False, 'import vector\n'), ((6449, 6477), 'vector.Vector', 'vector.Vector', (['(0.0)', 'vsp', '(0.0)'], {}), '(0.0, vsp, 0.0)\n', (6462, 6477), False, 'import vector\n'), ((6531, 6559), 'vector.Vector', 'vector.Vector', (['(0.0)', 'vpp', '(0.0)'], {}), '(0.0, vpp, 0.0)\n', (6544, 6559), False, 'import vector\n'), ((6649, 6674), 'Body.GravBody', 'Body.GravBody', (['Ms', 'rs', 'vs'], {}), '(Ms, rs, vs)\n', (6662, 6674), False, 'import Body\n'), ((6690, 6715), 'Body.GravBody', 'Body.GravBody', (['Mp', 'rp', 'vp'], {}), '(Mp, rp, vp)\n', (6703, 6715), False, 'import Body\n'), ((6765, 6781), 'Solver.RK2', 'Solver.RK2', (['(0.01)'], {}), '(0.01)\n', (6775, 6781), False, 'import Solver\n'), ((6805, 6834), 'Physics.NBody', 'Physics.NBody', (['solver', 'self.G'], {}), '(solver, self.G)\n', (6818, 6834), False, 'import Physics\n'), ((7950, 8009), 'numpy.array', 'np.array', (['[(b[0].position - b[1].position) for b in bodies]'], {}), '([(b[0].position - b[1].position) for b in bodies])\n', (7958, 8009), True, 'import numpy as np\n'), ((8020, 8054), 'numpy.array', 'np.array', (['[r.cart for r in r_list]'], {}), '([r.cart for r in r_list])\n', (8028, 8054), True, 'import numpy as np\n'), ((8067, 8107), 'numpy.sqrt', 'np.sqrt', (['(p[:, 0] ** 2.0 + p[:, 1] ** 2.0)'], {}), '(p[:, 0] ** 2.0 + p[:, 1] ** 2.0)\n', (8074, 8107), True, 'import numpy as np\n'), ((8171, 8205), 'numpy.sqrt', 'np.sqrt', (['(self.Rp ** 2.0 - x ** 2.0)'], {}), '(self.Rp ** 2.0 - x ** 2.0)\n', (8178, 8205), True, 'import numpy as np\n'), ((8216, 8238), 'numpy.arccos', 'np.arccos', (['(x / self.Rp)'], {}), '(x / self.Rp)\n', (8225, 8238), True, 'import numpy as np\n'), ((8251, 8279), 'numpy.arccos', 'np.arccos', (['((d - x) / self.Rs)'], {}), '((d - x) / self.Rs)\n', (8260, 8279), True, 'import numpy as np\n'), ((8757, 8769), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8767, 8769), True, 'import matplotlib.pyplot as plt\n'), ((8778, 8800), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'I', '"""."""'], {}), "(time, I, '.')\n", (8786, 8800), True, 'import matplotlib.pyplot as plt\n'), ((8807, 8842), 'matplotlib.pyplot.title', 'plt.title', (['"""Exo Planet Light Curve"""'], {}), "('Exo Planet Light Curve')\n", (8816, 8842), True, 'import matplotlib.pyplot as plt\n'), ((8851, 8877), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [Years]"""'], {}), "('Time [Years]')\n", (8861, 8877), True, 'import matplotlib.pyplot as plt\n'), ((8886, 8909), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {}), "('Intensity')\n", (8896, 8909), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1053), 'copy.deepcopy', 'cp.deepcopy', (['self.body'], {}), '(self.body)\n', (1042, 1053), True, 'import copy as cp\n'), ((1846, 1868), 'copy.deepcopy', 'cp.deepcopy', (['self.body'], {}), '(self.body)\n', (1857, 1868), True, 'import copy as cp\n'), ((2939, 2987), 'math.floor', 'math.floor', (['(time / self.physics.solver.step_size)'], {}), '(time / self.physics.solver.step_size)\n', (2949, 2987), False, 'import math\n'), ((5067, 5089), 'copy.deepcopy', 'cp.deepcopy', (['self.body'], {}), '(self.body)\n', (5078, 5089), True, 'import copy as cp\n'), ((6857, 6879), 'copy.deepcopy', 'cp.deepcopy', (['self.body'], {}), '(self.body)\n', (6868, 6879), True, 'import copy as cp\n'), ((1220, 1237), 'copy.deepcopy', 'cp.deepcopy', (['body'], {}), '(body)\n', (1231, 1237), True, 'import copy as cp\n'), ((3939, 3961), 'copy.deepcopy', 'cp.deepcopy', (['self.body'], {}), '(self.body)\n', (3950, 3961), True, 'import copy as cp\n'), ((3989, 4006), 'copy.deepcopy', 'cp.deepcopy', (['time'], {}), '(time)\n', (4000, 4006), True, 'import copy as cp\n'), ((3822, 3844), 'copy.deepcopy', 'cp.deepcopy', (['self.body'], {}), '(self.body)\n', (3833, 3844), True, 'import copy as cp\n')]
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import os
import time
import argparse
import numpy as np
from PIL import Image
import cv2
from data.choose_config import cfg
cfg = cfg.cfg
from utils.augmentations import to_chw_bgr
from importlib import import_module
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='face detection demo')
parser.add_argument('--save_dir', type=str, default='results/',
help='Directory for detect result')
parser.add_argument('--model', type=str,
default='weights/rpool_face_c.pth', help='trained model')
parser.add_argument('--thresh', default=0.17, type=float,
help='Final confidence threshold')
parser.add_argument('--multigpu',
default=False, type=str2bool,
help='Specify whether model was trained with multigpu')
parser.add_argument('--model_arch',
default='RPool_Face_C', type=str,
choices=['RPool_Face_C', 'RPool_Face_Quant', 'RPool_Face_QVGA_monochrome', 'RPool_Face_M4'],
help='choose architecture among rpool variants')
parser.add_argument('--image_folder', default=None, type=str, help='folder containing images')
parser.add_argument('--save_traces',
default=False, type=str2bool,
help='Specify whether to save input output traces')
args = parser.parse_args()
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
def detect(net, img_path, thresh, save_traces):
img = Image.open(img_path)
img = img.convert('RGB')
img = np.array(img)
height, width, _ = img.shape
if os.environ['IS_QVGA_MONO'] == '1':
max_im_shrink = np.sqrt(
320 * 240 / (img.shape[0] * img.shape[1]))
else:
max_im_shrink = np.sqrt(
640 * 480 / (img.shape[0] * img.shape[1]))
if save_traces==True and os.environ['IS_QVGA_MONO'] == '1':
image = cv2.resize(img, (320, 240))
elif save_traces==True:
image = cv2.resize(img, (640, 480))
else:
image = cv2.resize(img, None, None, fx=max_im_shrink,
fy=max_im_shrink, interpolation=cv2.INTER_LINEAR)
x = to_chw_bgr(image)
x = x.astype('float32')
x -= cfg.img_mean
x = x[[2, 1, 0], :, :]
if cfg.IS_MONOCHROME == True:
x = 0.299 * x[0] + 0.587 * x[1] + 0.114 * x[2]
x = torch.from_numpy(x).unsqueeze(0).unsqueeze(0)
else:
x = torch.from_numpy(x).unsqueeze(0)
if use_cuda:
x = x.cuda()
t1 = time.time()
y, loc, conf = net(x)
detections = y.data
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= thresh:
score = detections[0, i, j, 0]
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
left_up, right_bottom = (pt[0], pt[1]), (pt[2], pt[3])
j += 1
cv2.rectangle(img, left_up, right_bottom, (0, 0, 255), 2)
conf_score = "{:.3f}".format(score)
point = (int(left_up[0]), int(left_up[1] - 5))
cv2.putText(img, conf_score, point, cv2.FONT_HERSHEY_COMPLEX,
0.6, (0, 255, 0), 1)
t2 = time.time()
print('detect:{} timer:{}'.format(img_path, t2 - t1))
cv2.imwrite(os.path.join(args.save_dir, os.path.basename(img_path)), img)
if save_traces == True:
return x, loc, conf
if __name__ == '__main__':
module = import_module('models.' + args.model_arch)
net = module.build_s3fd('test', cfg.NUM_CLASSES)
if args.multigpu == True:
net = torch.nn.DataParallel(net)
checkpoint_dict = torch.load(args.model)
model_dict = net.state_dict()
model_dict.update(checkpoint_dict)
net.load_state_dict(model_dict)
net.eval()
if use_cuda:
net.cuda()
cudnn.benckmark = True
img_path = args.image_folder
img_list = [os.path.join(img_path, x)
for x in os.listdir(img_path)]
x = []
loc = []
conf = []
for path in img_list:
if args.save_traces == True:
x_temp, loc_temp, conf_temp = detect(net, path, args.thresh, args.save_traces)
x.append(x_temp)
loc.append(loc_temp)
conf.append(conf_temp)
else:
detect(net, path, args.thresh, args.save_traces)
if args.save_traces == True:
np.save('trace_inputs.npy', torch.cat(x).cpu().detach().numpy())
np.save('trace_outputs.npy', torch.cat([torch.cat(conf), torch.cat(loc)], dim=1).cpu().detach().numpy())
|
[
"utils.augmentations.to_chw_bgr",
"argparse.ArgumentParser",
"torch.set_default_tensor_type",
"torch.cat",
"cv2.rectangle",
"os.path.join",
"torch.load",
"os.path.exists",
"torch.Tensor",
"cv2.resize",
"importlib.import_module",
"os.path.basename",
"torch.cuda.is_available",
"os.listdir",
"torch.from_numpy",
"cv2.putText",
"os.makedirs",
"PIL.Image.open",
"time.time",
"cv2.imread",
"numpy.array",
"torch.nn.DataParallel",
"numpy.sqrt"
] |
[((543, 601), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""face detection demo"""'}), "(description='face detection demo')\n", (566, 601), False, 'import argparse\n'), ((1751, 1776), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1774, 1776), False, 'import torch\n'), ((1677, 1706), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (1691, 1706), False, 'import os\n'), ((1712, 1738), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (1723, 1738), False, 'import os\n'), ((1795, 1850), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (1824, 1850), False, 'import torch\n'), ((1861, 1911), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (1890, 1911), False, 'import torch\n'), ((1972, 1992), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1982, 1992), False, 'from PIL import Image\n'), ((2032, 2045), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2040, 2045), True, 'import numpy as np\n'), ((2642, 2659), 'utils.augmentations.to_chw_bgr', 'to_chw_bgr', (['image'], {}), '(image)\n', (2652, 2659), False, 'from utils.augmentations import to_chw_bgr\n'), ((2988, 2999), 'time.time', 'time.time', ([], {}), '()\n', (2997, 2999), False, 'import time\n'), ((3062, 3132), 'torch.Tensor', 'torch.Tensor', (['[img.shape[1], img.shape[0], img.shape[1], img.shape[0]]'], {}), '([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])\n', (3074, 3132), False, 'import torch\n'), ((3170, 3208), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_COLOR'], {}), '(img_path, cv2.IMREAD_COLOR)\n', (3180, 3208), False, 'import cv2\n'), ((3811, 3822), 'time.time', 'time.time', ([], {}), '()\n', (3820, 3822), False, 'import time\n'), ((4060, 4102), 'importlib.import_module', 'import_module', (["('models.' + args.model_arch)"], {}), "('models.' + args.model_arch)\n", (4073, 4102), False, 'from importlib import import_module\n'), ((4251, 4273), 'torch.load', 'torch.load', (['args.model'], {}), '(args.model)\n', (4261, 4273), False, 'import torch\n'), ((2146, 2196), 'numpy.sqrt', 'np.sqrt', (['(320 * 240 / (img.shape[0] * img.shape[1]))'], {}), '(320 * 240 / (img.shape[0] * img.shape[1]))\n', (2153, 2196), True, 'import numpy as np\n'), ((2244, 2294), 'numpy.sqrt', 'np.sqrt', (['(640 * 480 / (img.shape[0] * img.shape[1]))'], {}), '(640 * 480 / (img.shape[0] * img.shape[1]))\n', (2251, 2294), True, 'import numpy as np\n'), ((2389, 2416), 'cv2.resize', 'cv2.resize', (['img', '(320, 240)'], {}), '(img, (320, 240))\n', (2399, 2416), False, 'import cv2\n'), ((4201, 4227), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (4222, 4227), False, 'import torch\n'), ((4521, 4546), 'os.path.join', 'os.path.join', (['img_path', 'x'], {}), '(img_path, x)\n', (4533, 4546), False, 'import os\n'), ((2461, 2488), 'cv2.resize', 'cv2.resize', (['img', '(640, 480)'], {}), '(img, (640, 480))\n', (2471, 2488), False, 'import cv2\n'), ((2515, 2614), 'cv2.resize', 'cv2.resize', (['img', 'None', 'None'], {'fx': 'max_im_shrink', 'fy': 'max_im_shrink', 'interpolation': 'cv2.INTER_LINEAR'}), '(img, None, None, fx=max_im_shrink, fy=max_im_shrink,\n interpolation=cv2.INTER_LINEAR)\n', (2525, 2614), False, 'import cv2\n'), ((3518, 3575), 'cv2.rectangle', 'cv2.rectangle', (['img', 'left_up', 'right_bottom', '(0, 0, 255)', '(2)'], {}), '(img, left_up, right_bottom, (0, 0, 255), 2)\n', (3531, 3575), False, 'import cv2\n'), ((3695, 3781), 'cv2.putText', 'cv2.putText', (['img', 'conf_score', 'point', 'cv2.FONT_HERSHEY_COMPLEX', '(0.6)', '(0, 255, 0)', '(1)'], {}), '(img, conf_score, point, cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255,\n 0), 1)\n', (3706, 3781), False, 'import cv2\n'), ((3926, 3952), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (3942, 3952), False, 'import os\n'), ((4572, 4592), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (4582, 4592), False, 'import os\n'), ((2908, 2927), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2924, 2927), False, 'import torch\n'), ((2840, 2859), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2856, 2859), False, 'import torch\n'), ((5028, 5040), 'torch.cat', 'torch.cat', (['x'], {}), '(x)\n', (5037, 5040), False, 'import torch\n'), ((5113, 5128), 'torch.cat', 'torch.cat', (['conf'], {}), '(conf)\n', (5122, 5128), False, 'import torch\n'), ((5130, 5144), 'torch.cat', 'torch.cat', (['loc'], {}), '(loc)\n', (5139, 5144), False, 'import torch\n')]
|
from glob import glob
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
"""
This is a reproduction of Fernando's 2011 normalized commit rate plot. This
shows roughly the bus factor
"""
parser = argparse.ArgumentParser()
parser.add_argument("--outname", "-o")
args = parser.parse_args()
outname = args.outname
filenames = glob("data/raw_data/*/commits.tsv")
filenames.sort()
fig, ax = plt.subplots()
for i, filename in enumerate(filenames):
# Parse project name
project = filename.split("/")[2].split("_")[0]
commits = pd.read_csv(filename, sep="\t", keep_default_na=False)
commits[commits["author_name"].isnull()]["author_name"] = ""
_, ticket_counts = np.unique(commits["author_name"], return_counts=True)
ticket_counts.sort()
ticket_counts = ticket_counts[::-1] / ticket_counts.max()
ax.plot(ticket_counts[:15] * 100,
label=project,
marker=".", color="C%d" % i,
linewidth=2)
ax.set_xlim(0, 20)
ax.legend()
ax.set_title("Normalized commit rates", fontweight="bold",
fontsize="large")
ax.set_xticks(np.arange(0, 21, 5))
ax.set_yticks([0, 50, 100])
[ax.axhline(i, color="0", alpha=0.3, linewidth=1, zorder=-1)
for i in (0, 50, 100)]
ax.set_ylim(-1, 105)
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-',
color='0.5')
ax.set_xticklabels(["%d" % i for i in np.arange(0, 20, 5)], fontsize="medium",
fontweight="bold", color="0.5")
ax.set_yticklabels(["%d%%" % i for i in (0, 50, 100)], fontsize="medium",
fontweight="bold", color="0.5")
ax.set_xlabel("Contributors", fontsize="medium", fontweight="bold",
color="0.5")
if outname is not None:
try:
os.makedirs(os.path.dirname(outname))
except OSError:
pass
fig.savefig(outname)
|
[
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.dirname",
"numpy.arange",
"glob.glob",
"matplotlib.pyplot.subplots",
"numpy.unique"
] |
[((243, 268), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (266, 268), False, 'import argparse\n'), ((372, 407), 'glob.glob', 'glob', (['"""data/raw_data/*/commits.tsv"""'], {}), "('data/raw_data/*/commits.tsv')\n", (376, 407), False, 'from glob import glob\n'), ((436, 450), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (448, 450), True, 'import matplotlib.pyplot as plt\n'), ((584, 638), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""', 'keep_default_na': '(False)'}), "(filename, sep='\\t', keep_default_na=False)\n", (595, 638), True, 'import pandas as pd\n'), ((727, 780), 'numpy.unique', 'np.unique', (["commits['author_name']"], {'return_counts': '(True)'}), "(commits['author_name'], return_counts=True)\n", (736, 780), True, 'import numpy as np\n'), ((1136, 1155), 'numpy.arange', 'np.arange', (['(0)', '(21)', '(5)'], {}), '(0, 21, 5)\n', (1145, 1155), True, 'import numpy as np\n'), ((1634, 1653), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(5)'], {}), '(0, 20, 5)\n', (1643, 1653), True, 'import numpy as np\n'), ((2000, 2024), 'os.path.dirname', 'os.path.dirname', (['outname'], {}), '(outname)\n', (2015, 2024), False, 'import os\n')]
|
import numpy
import pytest
from grunnur import dtypes
from grunnur.modules import render_with_modules
def test_normalize_type():
dtype = dtypes.normalize_type(numpy.int32)
assert dtype == numpy.int32
assert type(dtype) == numpy.dtype
def test_ctype_builtin():
assert dtypes.ctype(numpy.int32) == 'int'
def test_is_complex():
assert dtypes.is_complex(numpy.complex64)
assert dtypes.is_complex(numpy.complex128)
assert not dtypes.is_complex(numpy.float64)
def test_is_double():
assert dtypes.is_double(numpy.float64)
assert dtypes.is_double(numpy.complex128)
assert not dtypes.is_double(numpy.complex64)
def test_is_integer():
assert dtypes.is_integer(numpy.int32)
assert not dtypes.is_integer(numpy.float32)
def test_is_real():
assert dtypes.is_real(numpy.float32)
assert not dtypes.is_real(numpy.complex64)
assert not dtypes.is_real(numpy.int32)
def test_promote_type():
assert dtypes._promote_type(numpy.int8) == numpy.int32
assert dtypes._promote_type(numpy.uint8) == numpy.uint32
assert dtypes._promote_type(numpy.float16) == numpy.float32
assert dtypes._promote_type(numpy.int32) == numpy.int32
def test_result_type():
assert dtypes.result_type(numpy.int32, numpy.float32) == numpy.float64
def test_min_scalar_type():
assert dtypes.min_scalar_type(1) == numpy.uint32
assert dtypes.min_scalar_type(-1) == numpy.int32
assert dtypes.min_scalar_type(1.) == numpy.float32
assert dtypes.min_scalar_type(2**31-1, force_signed=True) == numpy.int32
# 2**31 will not fit into int32 type
assert dtypes.min_scalar_type(2**31, force_signed=True) == numpy.int64
def test_detect_type():
assert dtypes.detect_type(numpy.int8(-1)) == numpy.int32
assert dtypes.detect_type(numpy.int64(-1)) == numpy.int64
assert dtypes.detect_type(-1) == numpy.int32
assert dtypes.detect_type(-1.) == numpy.float32
def test_complex_for():
assert dtypes.complex_for(numpy.float32) == numpy.complex64
assert dtypes.complex_for(numpy.float64) == numpy.complex128
with pytest.raises(ValueError):
assert dtypes.complex_for(numpy.complex64)
with pytest.raises(ValueError):
assert dtypes.complex_for(numpy.int32)
def test_real_for():
assert dtypes.real_for(numpy.complex64) == numpy.float32
assert dtypes.real_for(numpy.complex128) == numpy.float64
with pytest.raises(ValueError):
assert dtypes.real_for(numpy.float32)
with pytest.raises(ValueError):
assert dtypes.real_for(numpy.int32)
def test_complex_ctr():
assert dtypes.complex_ctr(numpy.complex64) == "COMPLEX_CTR(float2)"
def test_cast():
cast = dtypes.cast(numpy.uint64)
for val in [cast(1), cast(numpy.int32(1)), cast(numpy.uint64(1))]:
assert val.dtype == numpy.uint64 and val == 1
def test_c_constant():
# scalar values
assert dtypes.c_constant(1) == "1"
assert dtypes.c_constant(numpy.uint64(1)) == "1UL"
assert dtypes.c_constant(numpy.int64(-1)) == "-1L"
assert dtypes.c_constant(numpy.float64(1.)) == "1.0"
assert dtypes.c_constant(numpy.float32(1.)) == "1.0f"
assert dtypes.c_constant(numpy.complex64(1 + 2j)) == "COMPLEX_CTR(float2)(1.0f, 2.0f)"
assert dtypes.c_constant(numpy.complex128(1 + 2j)) == "COMPLEX_CTR(double2)(1.0, 2.0)"
# array
assert dtypes.c_constant(numpy.array([1, 2, 3], numpy.float32)) == "{1.0f, 2.0f, 3.0f}"
# struct type
dtype = numpy.dtype([('val1', numpy.int32), ('val2', numpy.float32)])
val = numpy.empty((), dtype)
val['val1'] = 1
val['val2'] = 2
assert dtypes.c_constant(val) == "{1, 2.0f}"
# custom dtype
assert dtypes.c_constant(1, numpy.float32) == "1.0f"
def test__align_simple():
dtype = numpy.dtype('int32')
res = dtypes._align(dtype)
ref = dtypes.WrappedType(dtype, dtype.itemsize)
assert res == ref
def test__align_array():
dtype = numpy.dtype('int32')
dtype_arr = numpy.dtype((dtype, 3))
res = dtypes._align(dtype_arr)
ref = dtypes.WrappedType(dtype_arr, dtype.itemsize)
assert res == ref
def test__align_non_aligned_struct():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32]))
res = dtypes._align(dtype)
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=8,
aligned=True))
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 4, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=None, z=None))
assert res == ref
def test__align_aligned_struct():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=8,
aligned=True))
res = dtypes._align(dtype_aligned)
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 4, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=None, z=None))
assert res == ref
def test__align_aligned_struct_custom_itemsize():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=16,
aligned=True))
res = dtypes._align(dtype_aligned)
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 16, explicit_alignment=16, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=None, z=None))
assert res == ref
def test__align_custom_field_offsets():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=32))
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=32,
aligned=True))
res = dtypes._align(dtype_aligned)
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
ref = dtypes.WrappedType(
dtype_aligned, 16, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=4, z=16))
assert res == ref
def test__align_aligned_struct_invalid_itemsize():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 2, 4],
itemsize=20, # not a power of 2, an error should be raised
aligned=True))
with pytest.raises(ValueError):
dtypes._align(dtype_aligned)
def test_align_nested():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
dtype_ref = numpy.dtype(dict(
names=['pad','struct_arr','regular_arr'],
formats=[numpy.int32, (dtype_nested, (2,)), (numpy.int16, (3,))],
offsets=[0,4,8],
itemsize=16))
dtype_aligned = dtypes.align(dtype)
assert dtype_aligned.isalignedstruct
assert dtype_aligned == dtype_ref
def test_align_preserve_nested_aligned():
dtype_int3 = numpy.dtype(dict(names=['x'], formats=[(numpy.int32, 3)], itemsize=16, aligned=True))
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int32, dtype_int3, numpy.int32]))
dtype_ref = numpy.dtype(dict(
names=['x','y','z'],
formats=[numpy.int32, dtype_int3, numpy.int32],
offsets=[0,16,32],
itemsize=48,
aligned=True))
dtype_aligned = dtypes.align(dtype)
assert dtype_aligned.isalignedstruct
assert dtype_aligned == dtype_ref
def test_lcm():
assert dtypes._lcm(10) == 10
assert dtypes._lcm(15, 20) == 60
assert dtypes._lcm(16, 32, 24) == 96
def test_find_minimum_alignment():
# simple case: base alignment is enough because 12 is the next multiple of 4 after 9
assert dtypes._find_minimum_alignment(12, 4, 9) == 4
# the next multiple of 4 is 12, but we want offset 16 - this means we need to set
# the alignment equal to 8, because 16 is the next multiple of 8 after 9.
assert dtypes._find_minimum_alignment(16, 4, 9) == 8
# incorrect offset (not a multiple of the base alignment)
with pytest.raises(ValueError):
dtypes._find_minimum_alignment(13, 4, 9)
# offset too large and not a power of 2 - cannot achieve that with alignment only,
# will need explicit padding
with pytest.raises(ValueError):
dtypes._find_minimum_alignment(24, 4, 9)
def test_wrapped_type_repr():
dtype_aligned = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=32,
aligned=True))
wt_x = dtypes.WrappedType(numpy.dtype('int8'), 1)
wt_y = dtypes.WrappedType(numpy.dtype('int16'), 2)
wt_z = dtypes.WrappedType(numpy.dtype('int32'), 4)
wt = dtypes.WrappedType(
dtype_aligned, 16, explicit_alignment=None, wrapped_fields=dict(x=wt_x, y=wt_y, z=wt_z),
field_alignments=dict(x=None, y=4, z=16))
assert eval(
repr(wt),
dict(
numpy=numpy, WrappedType=dtypes.WrappedType,
int8=numpy.int8, int16=numpy.int16, int32=numpy.int32)) == wt
def test_ctype_struct():
dtype = dtypes.align(numpy.dtype([('val1', numpy.int32), ('val2', numpy.float32)]))
ctype = dtypes.ctype(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' int val1;\n'
' float val2;\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct_nested():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
dtype = dtypes.align(dtype)
ctype = dtypes.ctype(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_1__ {\n'
' char val1;\n'
' char pad;\n'
'} _mod__module_1_;\n\n\n'
'typedef struct _mod__module_0__ {\n'
' int pad;\n'
' _mod__module_1_ struct_arr[2];\n'
' short regular_arr[3];\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_to_ctype_struct():
# Checks that ctype() on an unknown type calls ctype_struct()
dtype = dtypes.align(numpy.dtype([('val1', numpy.int32), ('val2', numpy.float32)]))
ctype = dtypes.ctype(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' int val1;\n'
' float val2;\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=64,
aligned=True))
ctype = dtypes.ctype_struct(dtype)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' char x;\n'
' short ALIGN(4) y;\n'
' int ALIGN(16) z;\n'
'} ALIGN(64) _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct_ignore_alignment():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32],
offsets=[0, 4, 16],
itemsize=64,
aligned=True))
ctype = dtypes.ctype_struct(dtype, ignore_alignment=True)
src = render_with_modules("${ctype}", render_globals=dict(ctype=ctype)).strip()
assert src == (
'typedef struct _mod__module_0__ {\n'
' char x;\n'
' short y;\n'
' int z;\n'
'} _mod__module_0_;\n\n\n'
'_mod__module_0_')
def test_ctype_struct_checks_alignment():
dtype = numpy.dtype(dict(
names=['x', 'y', 'z'],
formats=[numpy.int8, numpy.int16, numpy.int32]))
with pytest.raises(ValueError):
dtypes.ctype_struct(dtype)
def test_ctype_struct_for_non_struct():
dtype = numpy.dtype((numpy.int32, 3))
with pytest.raises(ValueError):
dtypes.ctype_struct(dtype)
# ctype_struct() is not applicable for simple types
with pytest.raises(ValueError):
dtypes.ctype_struct(numpy.int32)
def test_flatten_dtype():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
res = dtypes.flatten_dtype(dtype)
ref = [
(['pad'], numpy.dtype('int32')),
(['struct_arr', 0, 'val1'], numpy.dtype('int8')),
(['struct_arr', 0, 'pad'], numpy.dtype('int8')),
(['struct_arr', 1, 'val1'], numpy.dtype('int8')),
(['struct_arr', 1, 'pad'], numpy.dtype('int8')),
(['regular_arr', 0], numpy.dtype('int16')),
(['regular_arr', 1], numpy.dtype('int16')),
(['regular_arr', 2], numpy.dtype('int16'))]
assert dtypes.flatten_dtype(dtype) == ref
def test_c_path():
assert dtypes.c_path(['struct_arr', 0, 'val1']) == 'struct_arr[0].val1'
def test_extract_field():
dtype_nested = numpy.dtype(dict(
names=['val1', 'pad'],
formats=[numpy.int8, numpy.int8]))
dtype = numpy.dtype(dict(
names=['pad', 'struct_arr', 'regular_arr'],
formats=[numpy.int32, numpy.dtype((dtype_nested, 2)), numpy.dtype((numpy.int16, 3))]))
a = numpy.empty(16, dtype)
a['struct_arr']['val1'][:,1] = numpy.arange(16)
assert (dtypes.extract_field(a, ['struct_arr', 1, 'val1']) == numpy.arange(16)).all()
b = numpy.empty(16, dtype_nested)
b['val1'] = numpy.arange(16)
assert (dtypes.extract_field(b, ['val1']) == numpy.arange(16)).all()
|
[
"numpy.uint64",
"grunnur.dtypes.is_double",
"numpy.empty",
"grunnur.dtypes.is_complex",
"grunnur.dtypes.detect_type",
"grunnur.dtypes._align",
"grunnur.dtypes._find_minimum_alignment",
"numpy.arange",
"grunnur.dtypes.align",
"numpy.float64",
"numpy.complex64",
"numpy.int8",
"grunnur.dtypes.c_constant",
"grunnur.dtypes.flatten_dtype",
"pytest.raises",
"grunnur.dtypes.normalize_type",
"grunnur.dtypes.is_real",
"numpy.int64",
"numpy.int32",
"grunnur.dtypes._promote_type",
"grunnur.dtypes.WrappedType",
"numpy.complex128",
"grunnur.dtypes.complex_for",
"grunnur.dtypes.cast",
"grunnur.dtypes.extract_field",
"grunnur.dtypes.ctype_struct",
"grunnur.dtypes.min_scalar_type",
"grunnur.dtypes.ctype",
"grunnur.dtypes._lcm",
"grunnur.dtypes.c_path",
"numpy.dtype",
"grunnur.dtypes.complex_ctr",
"numpy.float32",
"numpy.array",
"grunnur.dtypes.real_for",
"grunnur.dtypes.is_integer",
"grunnur.dtypes.result_type"
] |
[((144, 178), 'grunnur.dtypes.normalize_type', 'dtypes.normalize_type', (['numpy.int32'], {}), '(numpy.int32)\n', (165, 178), False, 'from grunnur import dtypes\n'), ((359, 393), 'grunnur.dtypes.is_complex', 'dtypes.is_complex', (['numpy.complex64'], {}), '(numpy.complex64)\n', (376, 393), False, 'from grunnur import dtypes\n'), ((405, 440), 'grunnur.dtypes.is_complex', 'dtypes.is_complex', (['numpy.complex128'], {}), '(numpy.complex128)\n', (422, 440), False, 'from grunnur import dtypes\n'), ((524, 555), 'grunnur.dtypes.is_double', 'dtypes.is_double', (['numpy.float64'], {}), '(numpy.float64)\n', (540, 555), False, 'from grunnur import dtypes\n'), ((567, 601), 'grunnur.dtypes.is_double', 'dtypes.is_double', (['numpy.complex128'], {}), '(numpy.complex128)\n', (583, 601), False, 'from grunnur import dtypes\n'), ((687, 717), 'grunnur.dtypes.is_integer', 'dtypes.is_integer', (['numpy.int32'], {}), '(numpy.int32)\n', (704, 717), False, 'from grunnur import dtypes\n'), ((799, 828), 'grunnur.dtypes.is_real', 'dtypes.is_real', (['numpy.float32'], {}), '(numpy.float32)\n', (813, 828), False, 'from grunnur import dtypes\n'), ((2687, 2712), 'grunnur.dtypes.cast', 'dtypes.cast', (['numpy.uint64'], {}), '(numpy.uint64)\n', (2698, 2712), False, 'from grunnur import dtypes\n'), ((3465, 3526), 'numpy.dtype', 'numpy.dtype', (["[('val1', numpy.int32), ('val2', numpy.float32)]"], {}), "([('val1', numpy.int32), ('val2', numpy.float32)])\n", (3476, 3526), False, 'import numpy\n'), ((3537, 3559), 'numpy.empty', 'numpy.empty', (['()', 'dtype'], {}), '((), dtype)\n', (3548, 3559), False, 'import numpy\n'), ((3766, 3786), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (3777, 3786), False, 'import numpy\n'), ((3797, 3817), 'grunnur.dtypes._align', 'dtypes._align', (['dtype'], {}), '(dtype)\n', (3810, 3817), False, 'from grunnur import dtypes\n'), ((3828, 3869), 'grunnur.dtypes.WrappedType', 'dtypes.WrappedType', (['dtype', 'dtype.itemsize'], {}), '(dtype, dtype.itemsize)\n', (3846, 3869), False, 'from grunnur import dtypes\n'), ((3931, 3951), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (3942, 3951), False, 'import numpy\n'), ((3968, 3991), 'numpy.dtype', 'numpy.dtype', (['(dtype, 3)'], {}), '((dtype, 3))\n', (3979, 3991), False, 'import numpy\n'), ((4002, 4026), 'grunnur.dtypes._align', 'dtypes._align', (['dtype_arr'], {}), '(dtype_arr)\n', (4015, 4026), False, 'from grunnur import dtypes\n'), ((4037, 4082), 'grunnur.dtypes.WrappedType', 'dtypes.WrappedType', (['dtype_arr', 'dtype.itemsize'], {}), '(dtype_arr, dtype.itemsize)\n', (4055, 4082), False, 'from grunnur import dtypes\n'), ((4273, 4293), 'grunnur.dtypes._align', 'dtypes._align', (['dtype'], {}), '(dtype)\n', (4286, 4293), False, 'from grunnur import dtypes\n'), ((5100, 5128), 'grunnur.dtypes._align', 'dtypes._align', (['dtype_aligned'], {}), '(dtype_aligned)\n', (5113, 5128), False, 'from grunnur import dtypes\n'), ((5756, 5784), 'grunnur.dtypes._align', 'dtypes._align', (['dtype_aligned'], {}), '(dtype_aligned)\n', (5769, 5784), False, 'from grunnur import dtypes\n'), ((6570, 6598), 'grunnur.dtypes._align', 'dtypes._align', (['dtype_aligned'], {}), '(dtype_aligned)\n', (6583, 6598), False, 'from grunnur import dtypes\n'), ((7875, 7894), 'grunnur.dtypes.align', 'dtypes.align', (['dtype'], {}), '(dtype)\n', (7887, 7894), False, 'from grunnur import dtypes\n'), ((8454, 8473), 'grunnur.dtypes.align', 'dtypes.align', (['dtype'], {}), '(dtype)\n', (8466, 8473), False, 'from grunnur import dtypes\n'), ((10318, 10337), 'grunnur.dtypes.ctype', 'dtypes.ctype', (['dtype'], {}), '(dtype)\n', (10330, 10337), False, 'from grunnur import dtypes\n'), ((10945, 10964), 'grunnur.dtypes.align', 'dtypes.align', (['dtype'], {}), '(dtype)\n', (10957, 10964), False, 'from grunnur import dtypes\n'), ((10977, 10996), 'grunnur.dtypes.ctype', 'dtypes.ctype', (['dtype'], {}), '(dtype)\n', (10989, 10996), False, 'from grunnur import dtypes\n'), ((11663, 11682), 'grunnur.dtypes.ctype', 'dtypes.ctype', (['dtype'], {}), '(dtype)\n', (11675, 11682), False, 'from grunnur import dtypes\n'), ((12182, 12208), 'grunnur.dtypes.ctype_struct', 'dtypes.ctype_struct', (['dtype'], {}), '(dtype)\n', (12201, 12208), False, 'from grunnur import dtypes\n'), ((12769, 12818), 'grunnur.dtypes.ctype_struct', 'dtypes.ctype_struct', (['dtype'], {'ignore_alignment': '(True)'}), '(dtype, ignore_alignment=True)\n', (12788, 12818), False, 'from grunnur import dtypes\n'), ((13394, 13423), 'numpy.dtype', 'numpy.dtype', (['(numpy.int32, 3)'], {}), '((numpy.int32, 3))\n', (13405, 13423), False, 'import numpy\n'), ((13957, 13984), 'grunnur.dtypes.flatten_dtype', 'dtypes.flatten_dtype', (['dtype'], {}), '(dtype)\n', (13977, 13984), False, 'from grunnur import dtypes\n'), ((14894, 14916), 'numpy.empty', 'numpy.empty', (['(16)', 'dtype'], {}), '(16, dtype)\n', (14905, 14916), False, 'import numpy\n'), ((14952, 14968), 'numpy.arange', 'numpy.arange', (['(16)'], {}), '(16)\n', (14964, 14968), False, 'import numpy\n'), ((15068, 15097), 'numpy.empty', 'numpy.empty', (['(16)', 'dtype_nested'], {}), '(16, dtype_nested)\n', (15079, 15097), False, 'import numpy\n'), ((15114, 15130), 'numpy.arange', 'numpy.arange', (['(16)'], {}), '(16)\n', (15126, 15130), False, 'import numpy\n'), ((288, 313), 'grunnur.dtypes.ctype', 'dtypes.ctype', (['numpy.int32'], {}), '(numpy.int32)\n', (300, 313), False, 'from grunnur import dtypes\n'), ((456, 488), 'grunnur.dtypes.is_complex', 'dtypes.is_complex', (['numpy.float64'], {}), '(numpy.float64)\n', (473, 488), False, 'from grunnur import dtypes\n'), ((617, 650), 'grunnur.dtypes.is_double', 'dtypes.is_double', (['numpy.complex64'], {}), '(numpy.complex64)\n', (633, 650), False, 'from grunnur import dtypes\n'), ((733, 765), 'grunnur.dtypes.is_integer', 'dtypes.is_integer', (['numpy.float32'], {}), '(numpy.float32)\n', (750, 765), False, 'from grunnur import dtypes\n'), ((844, 875), 'grunnur.dtypes.is_real', 'dtypes.is_real', (['numpy.complex64'], {}), '(numpy.complex64)\n', (858, 875), False, 'from grunnur import dtypes\n'), ((891, 918), 'grunnur.dtypes.is_real', 'dtypes.is_real', (['numpy.int32'], {}), '(numpy.int32)\n', (905, 918), False, 'from grunnur import dtypes\n'), ((957, 989), 'grunnur.dtypes._promote_type', 'dtypes._promote_type', (['numpy.int8'], {}), '(numpy.int8)\n', (977, 989), False, 'from grunnur import dtypes\n'), ((1016, 1049), 'grunnur.dtypes._promote_type', 'dtypes._promote_type', (['numpy.uint8'], {}), '(numpy.uint8)\n', (1036, 1049), False, 'from grunnur import dtypes\n'), ((1077, 1112), 'grunnur.dtypes._promote_type', 'dtypes._promote_type', (['numpy.float16'], {}), '(numpy.float16)\n', (1097, 1112), False, 'from grunnur import dtypes\n'), ((1141, 1174), 'grunnur.dtypes._promote_type', 'dtypes._promote_type', (['numpy.int32'], {}), '(numpy.int32)\n', (1161, 1174), False, 'from grunnur import dtypes\n'), ((1227, 1273), 'grunnur.dtypes.result_type', 'dtypes.result_type', (['numpy.int32', 'numpy.float32'], {}), '(numpy.int32, numpy.float32)\n', (1245, 1273), False, 'from grunnur import dtypes\n'), ((1332, 1357), 'grunnur.dtypes.min_scalar_type', 'dtypes.min_scalar_type', (['(1)'], {}), '(1)\n', (1354, 1357), False, 'from grunnur import dtypes\n'), ((1385, 1411), 'grunnur.dtypes.min_scalar_type', 'dtypes.min_scalar_type', (['(-1)'], {}), '(-1)\n', (1407, 1411), False, 'from grunnur import dtypes\n'), ((1438, 1465), 'grunnur.dtypes.min_scalar_type', 'dtypes.min_scalar_type', (['(1.0)'], {}), '(1.0)\n', (1460, 1465), False, 'from grunnur import dtypes\n'), ((1494, 1548), 'grunnur.dtypes.min_scalar_type', 'dtypes.min_scalar_type', (['(2 ** 31 - 1)'], {'force_signed': '(True)'}), '(2 ** 31 - 1, force_signed=True)\n', (1516, 1548), False, 'from grunnur import dtypes\n'), ((1612, 1662), 'grunnur.dtypes.min_scalar_type', 'dtypes.min_scalar_type', (['(2 ** 31)'], {'force_signed': '(True)'}), '(2 ** 31, force_signed=True)\n', (1634, 1662), False, 'from grunnur import dtypes\n'), ((1836, 1858), 'grunnur.dtypes.detect_type', 'dtypes.detect_type', (['(-1)'], {}), '(-1)\n', (1854, 1858), False, 'from grunnur import dtypes\n'), ((1885, 1909), 'grunnur.dtypes.detect_type', 'dtypes.detect_type', (['(-1.0)'], {}), '(-1.0)\n', (1903, 1909), False, 'from grunnur import dtypes\n'), ((1963, 1996), 'grunnur.dtypes.complex_for', 'dtypes.complex_for', (['numpy.float32'], {}), '(numpy.float32)\n', (1981, 1996), False, 'from grunnur import dtypes\n'), ((2027, 2060), 'grunnur.dtypes.complex_for', 'dtypes.complex_for', (['numpy.float64'], {}), '(numpy.float64)\n', (2045, 2060), False, 'from grunnur import dtypes\n'), ((2090, 2115), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2103, 2115), False, 'import pytest\n'), ((2132, 2167), 'grunnur.dtypes.complex_for', 'dtypes.complex_for', (['numpy.complex64'], {}), '(numpy.complex64)\n', (2150, 2167), False, 'from grunnur import dtypes\n'), ((2177, 2202), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2190, 2202), False, 'import pytest\n'), ((2219, 2250), 'grunnur.dtypes.complex_for', 'dtypes.complex_for', (['numpy.int32'], {}), '(numpy.int32)\n', (2237, 2250), False, 'from grunnur import dtypes\n'), ((2285, 2317), 'grunnur.dtypes.real_for', 'dtypes.real_for', (['numpy.complex64'], {}), '(numpy.complex64)\n', (2300, 2317), False, 'from grunnur import dtypes\n'), ((2346, 2379), 'grunnur.dtypes.real_for', 'dtypes.real_for', (['numpy.complex128'], {}), '(numpy.complex128)\n', (2361, 2379), False, 'from grunnur import dtypes\n'), ((2406, 2431), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2419, 2431), False, 'import pytest\n'), ((2448, 2478), 'grunnur.dtypes.real_for', 'dtypes.real_for', (['numpy.float32'], {}), '(numpy.float32)\n', (2463, 2478), False, 'from grunnur import dtypes\n'), ((2488, 2513), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2501, 2513), False, 'import pytest\n'), ((2530, 2558), 'grunnur.dtypes.real_for', 'dtypes.real_for', (['numpy.int32'], {}), '(numpy.int32)\n', (2545, 2558), False, 'from grunnur import dtypes\n'), ((2596, 2631), 'grunnur.dtypes.complex_ctr', 'dtypes.complex_ctr', (['numpy.complex64'], {}), '(numpy.complex64)\n', (2614, 2631), False, 'from grunnur import dtypes\n'), ((2894, 2914), 'grunnur.dtypes.c_constant', 'dtypes.c_constant', (['(1)'], {}), '(1)\n', (2911, 2914), False, 'from grunnur import dtypes\n'), ((3611, 3633), 'grunnur.dtypes.c_constant', 'dtypes.c_constant', (['val'], {}), '(val)\n', (3628, 3633), False, 'from grunnur import dtypes\n'), ((3680, 3715), 'grunnur.dtypes.c_constant', 'dtypes.c_constant', (['(1)', 'numpy.float32'], {}), '(1, numpy.float32)\n', (3697, 3715), False, 'from grunnur import dtypes\n'), ((4521, 4540), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (4532, 4540), False, 'import numpy\n'), ((4575, 4595), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (4586, 4595), False, 'import numpy\n'), ((4630, 4650), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (4641, 4650), False, 'import numpy\n'), ((5160, 5179), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (5171, 5179), False, 'import numpy\n'), ((5214, 5234), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (5225, 5234), False, 'import numpy\n'), ((5269, 5289), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (5280, 5289), False, 'import numpy\n'), ((5816, 5835), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (5827, 5835), False, 'import numpy\n'), ((5870, 5890), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (5881, 5890), False, 'import numpy\n'), ((5925, 5945), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (5936, 5945), False, 'import numpy\n'), ((6630, 6649), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (6641, 6649), False, 'import numpy\n'), ((6684, 6704), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (6695, 6704), False, 'import numpy\n'), ((6739, 6759), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (6750, 6759), False, 'import numpy\n'), ((7268, 7293), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7281, 7293), False, 'import pytest\n'), ((7303, 7331), 'grunnur.dtypes._align', 'dtypes._align', (['dtype_aligned'], {}), '(dtype_aligned)\n', (7316, 7331), False, 'from grunnur import dtypes\n'), ((8583, 8598), 'grunnur.dtypes._lcm', 'dtypes._lcm', (['(10)'], {}), '(10)\n', (8594, 8598), False, 'from grunnur import dtypes\n'), ((8616, 8635), 'grunnur.dtypes._lcm', 'dtypes._lcm', (['(15)', '(20)'], {}), '(15, 20)\n', (8627, 8635), False, 'from grunnur import dtypes\n'), ((8653, 8676), 'grunnur.dtypes._lcm', 'dtypes._lcm', (['(16)', '(32)', '(24)'], {}), '(16, 32, 24)\n', (8664, 8676), False, 'from grunnur import dtypes\n'), ((8820, 8860), 'grunnur.dtypes._find_minimum_alignment', 'dtypes._find_minimum_alignment', (['(12)', '(4)', '(9)'], {}), '(12, 4, 9)\n', (8850, 8860), False, 'from grunnur import dtypes\n'), ((9041, 9081), 'grunnur.dtypes._find_minimum_alignment', 'dtypes._find_minimum_alignment', (['(16)', '(4)', '(9)'], {}), '(16, 4, 9)\n', (9071, 9081), False, 'from grunnur import dtypes\n'), ((9159, 9184), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9172, 9184), False, 'import pytest\n'), ((9194, 9234), 'grunnur.dtypes._find_minimum_alignment', 'dtypes._find_minimum_alignment', (['(13)', '(4)', '(9)'], {}), '(13, 4, 9)\n', (9224, 9234), False, 'from grunnur import dtypes\n'), ((9365, 9390), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9378, 9390), False, 'import pytest\n'), ((9400, 9440), 'grunnur.dtypes._find_minimum_alignment', 'dtypes._find_minimum_alignment', (['(24)', '(4)', '(9)'], {}), '(24, 4, 9)\n', (9430, 9440), False, 'from grunnur import dtypes\n'), ((9700, 9719), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (9711, 9719), False, 'import numpy\n'), ((9754, 9774), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (9765, 9774), False, 'import numpy\n'), ((9809, 9829), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (9820, 9829), False, 'import numpy\n'), ((10243, 10304), 'numpy.dtype', 'numpy.dtype', (["[('val1', numpy.int32), ('val2', numpy.float32)]"], {}), "([('val1', numpy.int32), ('val2', numpy.float32)])\n", (10254, 10304), False, 'import numpy\n'), ((11588, 11649), 'numpy.dtype', 'numpy.dtype', (["[('val1', numpy.int32), ('val2', numpy.float32)]"], {}), "([('val1', numpy.int32), ('val2', numpy.float32)])\n", (11599, 11649), False, 'import numpy\n'), ((13278, 13303), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13291, 13303), False, 'import pytest\n'), ((13313, 13339), 'grunnur.dtypes.ctype_struct', 'dtypes.ctype_struct', (['dtype'], {}), '(dtype)\n', (13332, 13339), False, 'from grunnur import dtypes\n'), ((13433, 13458), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13446, 13458), False, 'import pytest\n'), ((13468, 13494), 'grunnur.dtypes.ctype_struct', 'dtypes.ctype_struct', (['dtype'], {}), '(dtype)\n', (13487, 13494), False, 'from grunnur import dtypes\n'), ((13561, 13586), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13574, 13586), False, 'import pytest\n'), ((13596, 13628), 'grunnur.dtypes.ctype_struct', 'dtypes.ctype_struct', (['numpy.int32'], {}), '(numpy.int32)\n', (13615, 13628), False, 'from grunnur import dtypes\n'), ((14436, 14463), 'grunnur.dtypes.flatten_dtype', 'dtypes.flatten_dtype', (['dtype'], {}), '(dtype)\n', (14456, 14463), False, 'from grunnur import dtypes\n'), ((14503, 14543), 'grunnur.dtypes.c_path', 'dtypes.c_path', (["['struct_arr', 0, 'val1']"], {}), "(['struct_arr', 0, 'val1'])\n", (14516, 14543), False, 'from grunnur import dtypes\n'), ((1732, 1746), 'numpy.int8', 'numpy.int8', (['(-1)'], {}), '(-1)\n', (1742, 1746), False, 'import numpy\n'), ((1793, 1808), 'numpy.int64', 'numpy.int64', (['(-1)'], {}), '(-1)\n', (1804, 1808), False, 'import numpy\n'), ((2743, 2757), 'numpy.int32', 'numpy.int32', (['(1)'], {}), '(1)\n', (2754, 2757), False, 'import numpy\n'), ((2765, 2780), 'numpy.uint64', 'numpy.uint64', (['(1)'], {}), '(1)\n', (2777, 2780), False, 'import numpy\n'), ((2951, 2966), 'numpy.uint64', 'numpy.uint64', (['(1)'], {}), '(1)\n', (2963, 2966), False, 'import numpy\n'), ((3006, 3021), 'numpy.int64', 'numpy.int64', (['(-1)'], {}), '(-1)\n', (3017, 3021), False, 'import numpy\n'), ((3061, 3079), 'numpy.float64', 'numpy.float64', (['(1.0)'], {}), '(1.0)\n', (3074, 3079), False, 'import numpy\n'), ((3118, 3136), 'numpy.float32', 'numpy.float32', (['(1.0)'], {}), '(1.0)\n', (3131, 3136), False, 'import numpy\n'), ((3176, 3201), 'numpy.complex64', 'numpy.complex64', (['(1 + 2.0j)'], {}), '(1 + 2.0j)\n', (3191, 3201), False, 'import numpy\n'), ((3267, 3293), 'numpy.complex128', 'numpy.complex128', (['(1 + 2.0j)'], {}), '(1 + 2.0j)\n', (3283, 3293), False, 'import numpy\n'), ((3371, 3408), 'numpy.array', 'numpy.array', (['[1, 2, 3]', 'numpy.float32'], {}), '([1, 2, 3], numpy.float32)\n', (3382, 3408), False, 'import numpy\n'), ((14015, 14035), 'numpy.dtype', 'numpy.dtype', (['"""int32"""'], {}), "('int32')\n", (14026, 14035), False, 'import numpy\n'), ((14074, 14093), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (14085, 14093), False, 'import numpy\n'), ((14131, 14150), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (14142, 14150), False, 'import numpy\n'), ((14189, 14208), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (14200, 14208), False, 'import numpy\n'), ((14246, 14265), 'numpy.dtype', 'numpy.dtype', (['"""int8"""'], {}), "('int8')\n", (14257, 14265), False, 'import numpy\n'), ((14297, 14317), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (14308, 14317), False, 'import numpy\n'), ((14349, 14369), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (14360, 14369), False, 'import numpy\n'), ((14401, 14421), 'numpy.dtype', 'numpy.dtype', (['"""int16"""'], {}), "('int16')\n", (14412, 14421), False, 'import numpy\n'), ((14981, 15031), 'grunnur.dtypes.extract_field', 'dtypes.extract_field', (['a', "['struct_arr', 1, 'val1']"], {}), "(a, ['struct_arr', 1, 'val1'])\n", (15001, 15031), False, 'from grunnur import dtypes\n'), ((15035, 15051), 'numpy.arange', 'numpy.arange', (['(16)'], {}), '(16)\n', (15047, 15051), False, 'import numpy\n'), ((15143, 15176), 'grunnur.dtypes.extract_field', 'dtypes.extract_field', (['b', "['val1']"], {}), "(b, ['val1'])\n", (15163, 15176), False, 'from grunnur import dtypes\n'), ((15180, 15196), 'numpy.arange', 'numpy.arange', (['(16)'], {}), '(16)\n', (15192, 15196), False, 'import numpy\n'), ((7583, 7613), 'numpy.dtype', 'numpy.dtype', (['(dtype_nested, 2)'], {}), '((dtype_nested, 2))\n', (7594, 7613), False, 'import numpy\n'), ((7615, 7644), 'numpy.dtype', 'numpy.dtype', (['(numpy.int16, 3)'], {}), '((numpy.int16, 3))\n', (7626, 7644), False, 'import numpy\n'), ((10867, 10897), 'numpy.dtype', 'numpy.dtype', (['(dtype_nested, 2)'], {}), '((dtype_nested, 2))\n', (10878, 10897), False, 'import numpy\n'), ((10899, 10928), 'numpy.dtype', 'numpy.dtype', (['(numpy.int16, 3)'], {}), '((numpy.int16, 3))\n', (10910, 10928), False, 'import numpy\n'), ((13881, 13911), 'numpy.dtype', 'numpy.dtype', (['(dtype_nested, 2)'], {}), '((dtype_nested, 2))\n', (13892, 13911), False, 'import numpy\n'), ((13913, 13942), 'numpy.dtype', 'numpy.dtype', (['(numpy.int16, 3)'], {}), '((numpy.int16, 3))\n', (13924, 13942), False, 'import numpy\n'), ((14820, 14850), 'numpy.dtype', 'numpy.dtype', (['(dtype_nested, 2)'], {}), '((dtype_nested, 2))\n', (14831, 14850), False, 'import numpy\n'), ((14852, 14881), 'numpy.dtype', 'numpy.dtype', (['(numpy.int16, 3)'], {}), '((numpy.int16, 3))\n', (14863, 14881), False, 'import numpy\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: dev/52_USB_camera.ipynb (unless otherwise specified).
__all__ = ['Camera']
# Cell
from FLIRCam.core import *
# Cell
# Standard imports:
from pathlib import Path
import logging
from logging.handlers import RotatingFileHandler
from time import sleep, time as timestamp
from datetime import datetime
from threading import Thread, Event
from struct import pack as pack_data
# External imports:
import numpy as np
# Cell
import PySpin
class Camera():
"""Control acquisition and receive images from a camera.
To initialise a Camera a *model* (determines hardware interface) and *identity* (identifying the specific device)
must be given. If both are given to the constructor the Camera will be initialised immediately (unless
auto_init=False is passed). Manually initialise with a call to Camera.initialize(); release hardware with a call to
Camera.deinitialize().
After the Camera is intialised, acquisition properties (e.g. exposure_time and frame_rate) may be set and images
received. The Camera also supports event-driven acquisition, see Camera.add_event_callback(), where new images are
automatically passed on to the desired functions.
Args:
model (str, optional): The model used to determine the correct hardware API. Supported: 'ptgrey' for
PointGrey/FLIR Machine Vision cameras (using Spinnaker and PySpin).
identity (str, optional): String identifying the device. For model *ptgrey* this is 'serial number' *as a
string*.
name (str, optional): Name for the device.
auto_init (bool, optional): If both model and identity are given when creating the Camera and auto_init
is True (the default), Camera.initialize() will be called after creation.
debug_folder (pathlib.Path, optional): The folder for debug logging. If None (the default)
the folder *pypogs*/debug will be used/created.
Example:
::
# Create instance and set parameters (will auto initialise)
cam = pypogs.Camera(model='ptgrey', identity='18285284', name='CoarseCam')
cam.gain = 0 #decibel
cam.exposure_time = 100 #milliseconds
cam.frame_rate_auto = True
# Start acquisition
cam.start()
# Wait for a while
time.sleep(2)
# Read the latest image
img = cam.get_latest_image()
# Stop the acquisition
cam.stop()
# Release the hardware
cam.deinitialize()
"""
_supported_models = ('ptgrey',)
def __init__(self, model=None, identity=None, name=None, auto_init=True, debug_folder=None):
"""Create Camera instance. See class documentation."""
# Logger setup
self._debug_folder = None
if debug_folder is None:
try:
self.debug_folder = Path(__file__).parent / 'debug'
except:
self.debug_folder = Path()/'debug'
else:
self.debug_folder = debug_folder
self.log = logging.getLogger(f'{name}')
if not self.log.hasHandlers():
# Add new handlers to the logger if there are none
self.log.setLevel(logging.DEBUG)
# Console handler at INFO level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# File handler at DEBUG level
# fh = logging.FileHandler(self.debug_folder / 'log.txt')
fh = RotatingFileHandler(self.debug_folder / 'camera.log', maxBytes=1*1024*1024,
backupCount=2)
fh.setLevel(logging.DEBUG)
# Format and add
# log_formatter = logging.Formatter('%(asctime)s:%(name)s-%(levelname)s: %(message)s')
# log_formatter = logging.Formatter('%(asctime)s CAM-%(levelname)s: %(message)s')
log_formatter = logging.Formatter('%(asctime)s %(name)s-%(levelname)s-%(threadName)s'+
'-%(funcName)s-(%(lineno)d) %(message)s')
fh.setFormatter(log_formatter)
ch.setFormatter(log_formatter)
self.log.addHandler(fh)
self.log.addHandler(ch)
self.log.info('New console and file logging handlers added.')
# Start of constructor
self.log.debug('Camera Init: Model:'+str(model)+' ID:'+str(identity) \
+' Name:'+str(name) +' AutoInit:'+str(auto_init))
self._model = None
self._identity = None
self._name = 'UnnamedCamera'
self._plate_scale = 1.0
self._rotation = 0.0
self._flipX = False
self._flipY = False
self._rot90 = 0 #Number of times to rotate by 90 deg, done after flips
#Only used for ptgrey
self._ptgrey_camera = None
self._ptgrey_camlist = None
self._ptgrey_system = None
#Callbacks on image event
self._call_on_image = set()
self._got_image_event = Event()
self._image_data = None
self._image_frameID = None
self._image_timestamp = None
self._imgs_since_start = 0
self.log.debug('Calling self on constructor input')
if model is not None:
self.model = model
if identity is not None:
self.identity = identity
if name is not None:
self.name = name
if auto_init and not None in (model, identity):
self.log.debug('Trying to auto-initialise')
self.initialize()
self.log.debug('Registering destructor')
# TODO: Should we register deinitialisor instead? (probably yes...)
import atexit, weakref
atexit.register(weakref.ref(self.__del__))
self.log.info('Camera instance created with name: ' + self.name + '.')
def __del__(self):
"""Destructor. Releases hardware."""
if self.is_init:
self.deinitialize()
def getprops(self, prop_list):
""" Get FLIR Camera properties, listed in the prop_list"""
assert self.is_init, 'Camera must be initialised'
prop_dict = { i : None for i in prop_list }
try:
nodemap = self._ptgrey_camera.GetNodeMap()
for i, p in enumerate(prop_list):
# val_list[i] = PySpin.CIntegerPtr(nodemap.GetNode(p)).GetValue()
try: # integer
prop_dict[p] = PySpin.CIntegerPtr(nodemap.GetNode(p)).GetValue()
except:
try: # Float
prop_dict[p] = PySpin.CFloatPtr(nodemap.GetNode(p)).GetValue()
except:
try: # enumeration
node = PySpin.CEnumerationPtr(nodemap.GetNode(p))
prop_dict[p] = node.GetCurrentEntry().GetDisplayName().lower()
except: # Bool
prop_dict[p] = PySpin.CBooleanPtr(nodemap.GetNode(p)).GetValue()
self.log.debug(f'Found Node "{str(p)}" = {prop_dict[p]}')
except PySpin.SpinnakerException:
self.log.warning(f'Failed to read node "{str(p)}"')
finally:
return prop_dict
def setprops(self, prop_dict, stop=True):
""" Set FLIR Camera properties, listed in the prop_dict"""
assert self.is_init, 'Camera must be initialised'
was_stopped = False
if self.is_running and stop:
self.log.debug('Camera is running, stop it and restart immediately after.')
self.stop()
was_stopped = True
assert self.is_init, 'Camera must be initialised'
type_list = [type(value) for key, value in self.getprops(prop_dict).items()]
self.log.debug(f'Type_list = {type_list}')
try:
nodemap = self._ptgrey_camera.GetNodeMap()
for (key, value), t in zip(prop_dict.items(), type_list):
if t == int : # integer
PySpin.CIntegerPtr(nodemap.GetNode(key)).SetValue(value)
elif t == float:
PySpin.CFloatPtr(nodemap.GetNode(key)).SetValue(value)
elif t == str:
node = PySpin.CEnumerationPtr(nodemap.GetNode(key))
node.SetIntValue(node.GetEntryByName(value).GetValue())
elif t == bool:
# node = PySpin.CBooleanPtr(nodemap.GetNode('AcquisitionFrameRateEnable'))
PySpin.CBooleanPtr(nodemap.GetNode(key)).SetValue(value)
elif t == type(None):
self.log.warning(f'No property type found for node: "{key}"')
# raise Exception(f'No property type found for node: "{key}"')
return
else:
self.log.warning(f'Property type not implemented for node: "{key}"')
# raise Exception(f'Property type not implemented for node: "{key}"')
return
self.log.debug(f'Set Node "{key}" = {value}')
except PySpin.SpinnakerException as e:
if 'LogicalErrorException' in e.message:
self.log.warning(f'Node: "{key}", LogicalErrorException')
elif 'OutOfRangeException' in e.message:
self.log.warning(f'Node: "{key}", value: "{value}" is out of range.')
elif 'AccessException' in e.message:
self.log.warning(f'Not allowed to change Node: "{key}" now - Try "stop=True".')
else:
self.log.warning(f'Failed to set node: "{key}"')
if was_stopped :
try:
self.start()
self.log.debug('Restarted')
except Exception:
self.log.debug('Failed to restart: ', exc_info=True)
def _ptgrey_release(self):
"""PRIVATE: Release Point Grey hardware resources."""
self.log.debug('PointGrey hardware release called')
if self._ptgrey_camera is not None:
self.log.debug('Deleting PtGrey camera object')
del(self._ptgrey_camera) #Preferred over =None according to PtGrey
self._ptgrey_camera = None
if self._ptgrey_camlist is not None:
self.log.debug('Clearing and deleting PtGrey camlist')
self._ptgrey_camlist.Clear()
del(self._ptgrey_camlist)
self._ptgrey_camlist = None
if self._ptgrey_system is not None:
self.log.debug('Has PtGrey system. Is in use? '+str(self._ptgrey_system.IsInUse()))
if not self._ptgrey_system.IsInUse():
self.log.debug('Not in use, releasing and deleting')
self._ptgrey_system.ReleaseInstance()
del(self._ptgrey_system)
self._ptgrey_system = None
self.log.debug('Hardware released')
@property
def debug_folder(self):
"""pathlib.Path: Get or set the path for debug logging. Will create folder if not existing."""
return self._debug_folder
@debug_folder.setter
def debug_folder(self, path):
# Do not do logging in here! This will be called before the logger is set up
path = Path(path) #Make sure pathlib.Path
if path.is_file():
path = path.parent
if not path.is_dir():
path.mkdir(parents=True)
self._debug_folder = path
@property
def name(self):
"""str: Get or set the name."""
return self._name
@name.setter
def name(self, name):
self.log.debug('Setting name to: '+str(name))
self._name = str(name)
self.log.debug('Name set to '+str(self.name))
@property
def model(self):
"""str: Get or set the device model.
Supported:
- 'ptgrey' for FLIR/Point Grey cameras (using Spinnaker/PySpin SDKs).
- This will determine which hardware API that is used.
- Must set before initialising the device and may not be changed for an initialised device.
"""
return self._model
@model.setter
def model(self, model):
self.log.debug('Setting model to: '+str(model))
assert not self.is_init, 'Can not change already intialised device model'
model = str(model)
assert model.lower() in self._supported_models,\
'Model type not recognised, allowed: '+str(self._supported_models)
#TODO: Check that the APIs are available.
self._model = model
self.log.debug('Model set to '+str(self.model))
@property
def identity(self):
"""str: Get or set the device and/or input. Model must be defined first.
- For model *ptgrey* this is the serial number *as a string*
- Must set before initialising the device and may not be changed for an initialised device.
"""
return self._identity
@identity.setter
def identity(self, identity):
self.log.debug('Setting identity to: '+str(identity))
assert not self.is_init, 'Can not change already intialised device'
assert self.model is not None, 'Must define model first'
identity = str(identity)
if not self._ptgrey_system:
self._ptgrey_system = PySpin.System.GetInstance() #Get singleton
self._ptgrey_camlist = self._ptgrey_system.GetCameras()
self.log.debug('Got cam list, size:'+str(self._ptgrey_camlist.GetSize()))
self._ptgrey_camera = self._ptgrey_camlist.GetBySerial(identity)
valid = self._ptgrey_camera.IsValid()
self.log.debug('Got object, valid: '+str(valid))
if valid:
self.log.debug('Already init: '+str(self._ptgrey_camera.IsInitialized()))
if not valid:
self.log.debug('Invalid camera object. Cleaning up')
del(self._ptgrey_camera)
self._ptgrey_camera = None
self._ptgrey_camlist.Clear()
raise AssertionError('The camera was not found')
elif self._ptgrey_camera.IsInitialized():
self.log.debug('Camera object already in use. Cleaning up')
del(self._ptgrey_camera)
self._ptgrey_camera = None
self._ptgrey_camlist.Clear()
raise RuntimeError('The camera is already in use')
else:
self.log.debug('Seems valid. Setting identity and cleaning up')
del(self._ptgrey_camera)
self._ptgrey_camera = None
self._identity = identity
self._ptgrey_camlist.Clear()
self.log.debug('Identity set to: '+str(self.identity))
@property
def is_init(self):
"""bool: True if the device is initialised (and therefore ready to start)."""
init = self._ptgrey_camera is not None and self._ptgrey_camera.IsInitialized()
return init
def initialize(self):
"""Initialise (make ready to start) the device. The model and identity must be defined."""
self.log.debug('Initialising')
assert not self.is_init, 'Already initialised'
assert not None in (self.model, self.identity), 'Must define model and identity before initialising'
if self._ptgrey_camera is not None:
raise RuntimeError('There is already a camera object here')
if not self._ptgrey_system: self._ptgrey_system = PySpin.System.GetInstance() #Get singleton
if self._ptgrey_camlist: #Clear old list and get fresh one
self._ptgrey_camlist.Clear()
del(self._ptgrey_camlist)
self._ptgrey_camlist = self._ptgrey_system.GetCameras()
self.log.debug('Getting pyspin object and initialising')
self._ptgrey_camera = self._ptgrey_camlist.GetBySerial(self.identity)
self._ptgrey_camera.Init()
# BASIC SETUP
# self.log.debug('Setting gamma off')
# nodemap = self._ptgrey_camera.GetNodeMap()
# PySpin.CBooleanPtr(nodemap.GetNode('GammaEnable')).SetValue(False)
self.log.debug('Setting acquisition mode to continuous')
self._ptgrey_camera.AcquisitionMode.SetIntValue(PySpin.AcquisitionMode_Continuous)
self.log.debug('Setting stream mode to newest only')
self._ptgrey_camera.TLStream.StreamBufferHandlingMode.SetIntValue(
PySpin.StreamBufferHandlingMode_NewestOnly)
self.log.info('Camera successfully initialised')
def deinitialize(self):
"""De-initialise the device and release hardware resources. Will stop the acquisition if it is running."""
self.log.debug('De-initialising')
assert self.is_init, 'Not initialised'
if self.is_running:
self.log.debug('Is running, stopping')
self.stop()
self.log.debug('Stopped')
self.log.debug('Found PtGrey camera, deinitialising')
self.unregister_event_handler()
try:
self._ptgrey_camera.DeInit()
del(self._ptgrey_camera)
self._ptgrey_camera = None
self.log.debug('Deinitialised PtGrey camera object and deleted')
except:
self.log.exception('Failed to close task')
self.log.debug('Trying to release PtGrey hardware resources')
self._ptgrey_release()
def register_event_handler(self):
"""Initialise images event handler mode."""
class PtGreyEventHandler(PySpin.ImageEvent):
"""Barebones event handler for ptgrey, just pass along the event to the Camera class."""
def __init__(self, parent):
assert parent.model.lower() == 'ptgrey', 'Trying to attach ptgrey event handler to non ptgrey model'
super().__init__()
self.parent = parent
def OnImageEvent(self, image:PySpin.Image):
"""Read out the image and a timestamp, reshape to array, pass to parent"""
# self.parent.log.debug('Image event! Unpack and release pointer')
self.parent._image_timestamp = datetime.utcnow()
try:
# img = image.GetData()
image_converted = image.Convert(PySpin.PixelFormat_RGB8)
image_converted = image_converted.GetNDArray()
# print('img', image_converted.shape)
# img = img.reshape((img_ptr.GetHeight(), img_ptr.GetWidth(), 3))
if self.parent._flipX:
img = np.fliplr(image_converted)
if self.parent._flipY:
img = np.flipud(image_converted)
if self.parent._rot90:
img = np.rot90(image_converted, self.parent._rot90)
self.parent._image_data = image_converted
self.parent._image_frameID = image.GetFrameID()
except:
self.parent.log.warning('Failed to unpack image', exc_info=True)
self.parent._image_data = None
finally:
image.Release()
# self.parent._image_data = np.ones((100,100,3))
# self.parent._image_frameID = image.GetFrameID()
# image.Release()
self.parent._got_image_event.set()
if self.parent._imgs_since_start % 10 == 0:
self.parent.log.debug('Frames Received: ' + str(self.parent._imgs_since_start) \
+ ' Size:' + str(self.parent._image_data.shape) \
+ ' Type:' + str(self.parent._image_data.dtype))
for func in self.parent._call_on_image:
try:
self.parent.log.debug('Calling back to: ' + str(func))
func(self.parent._image_data, self.parent._image_frameID, self.parent._image_timestamp, self.parent.identity)
except:
self.parent.log.warning('Failed image callback', exc_info=True)
self.parent._imgs_since_start += 1
# self.parent.log.debug('Event handler finished.')
self._ptgrey_event_handler = PtGreyEventHandler(self)
self.log.debug('Created ptgrey image event handler')
self._ptgrey_camera.RegisterEvent( self._ptgrey_event_handler )
self.log.debug('Registered ptgrey image event handler')
def unregister_event_handler(self):
"""Unregister images event handler."""
try:
self._ptgrey_camera.UnregisterEvent(self._ptgrey_event_handler)
self.log.debug('Unregistered event handler')
except:
self.log.exception('Failed to unregister event handler')
@property
def available_properties(self):
"""tuple of str: Get all the available properties (settings) supported by this device."""
assert self.is_init, 'Camera must be initialised'
return ('flip_x', 'flip_y', 'rotate_90', 'plate_scale', 'rotation', 'binning', 'size_readout', 'frame_rate_auto',\
'frame_rate', 'gain_auto', 'gain', 'exposure_time_auto', 'exposure_time')
@property
def flip_x(self):
"""bool: Get or set if the image X-axis should be flipped. Default is False."""
self.log.debug('Get flip-X called')
assert self.is_init, 'Camera must be initialised'
self.log.debug('Using PtGrey camera. Will flip the received image array ourselves: ' +str(self._flipX))
return self._flipX
@flip_x.setter
def flip_x(self, flip):
self.log.debug('Set flip-X called with: '+str(flip))
assert self.is_init, 'Camera must be initialised'
flip = bool(flip)
self.log.debug('Using PtGrey camera. Will flip the received image array ourselves.')
self._flipX = flip
self.log.debug('_flipX set to: '+str(self._flipX))
@property
def flip_y(self):
"""bool: Get or set if the image Y-axis should be flipped. Default is False."""
self.log.debug('Get flip-Y called')
assert self.is_init, 'Camera must be initialised'
self.log.debug('Using PtGrey camera. Will flip the received image array ourselves: ' +str(self._flipX))
return self._flipY
@flip_y.setter
def flip_y(self, flip):
self.log.debug('Set flip-Y called with: '+str(flip))
assert self.is_init, 'Camera must be initialised'
flip = bool(flip)
self.log.debug('Using PtGrey camera. Will flip the received image array ourselves.')
self._flipY = flip
self.log.debug('_flipY set to: '+str(self._flipY))
@property
def rotate_90(self):
"""int: Get or set how many times the image should be rotated by 90 degrees. Applied *after* flip_x and flip_y.
"""
assert self.is_init, 'Camera must be initialised'
return self._rot90
@rotate_90.setter
def rotate_90(self, k):
self.log.debug('Set rot90 called with: '+str(k))
assert self.is_init, 'Camera must be initialised'
k = int(k)
self.log.debug('Using PtGrey camera. Will rotate the received image array ourselves.')
self._rot90 = k
self.log.debug('rot90 set to: '+str(self._rot90))
@property
def plate_scale(self):
"""float: Get or set the plate scale of the Camera in arcsec per pixel.
This will not affect anything in this class but is used elsewhere. Set this to the physical pixel plate scale
*before* any binning. When getting the plate scale it will be scaled by the binning factor.
"""
return self._plate_scale * self.binning
@plate_scale.setter
def plate_scale(self, arcsec):
self.log.debug('Set plate scale called with: '+str(arcsec))
self._plate_scale = float(arcsec)
self.log.debug('Plate scale set to: '+str(self.plate_scale))
@property
def rotation(self):
"""float: Get or set the camera rotation relative to the horizon in degrees.
This does not affect the received images, but is used elsewhere. Use rotate_90 first to keep this rotation
small.
"""
return self._rotation
@rotation.setter
def rotation(self, rot):
self.log.debug('Set rotation called with: '+str(rot))
self._rotation = float(rot)
self.log.debug('Rotation set to: '+str(self.rotation))
@property
def frame_rate_auto(self):
"""bool: Get or set automatic frame rate. If True camera will run as fast as possible."""
self.log.debug('Get frame rate auto called')
val = self.getprops(['AcquisitionFrameRateEnable'])['AcquisitionFrameRateEnable']
return not val
@frame_rate_auto.setter
def frame_rate_auto(self, auto):
self.log.debug('Set frame rate called with: '+str(auto))
auto = bool(auto)
self.setprops({'AcquisitionFrameRateEnable': not auto})
@property
def frame_rate_limit(self):
"""tuple of float: Get the minimum and maximum frame rate in Hz supported."""
self.log.debug('Get frame rate limit called')
mn,mx = list(self.getprops(['FrameRateHz_Min', 'FrameRateHz_Max']).values())
return (mn,mx)
@property
def frame_rate(self):
"""float: Get or set the camera frame rate in Hz. Will set auto frame rate to False."""
self.log.debug('Get frame rate called')
return self.getprops(['AcquisitionFrameRate'])['AcquisitionFrameRate']
@frame_rate.setter
def frame_rate(self, frame_rate_hz):
self.log.debug('Set frame rate called with: '+str(frame_rate_hz))
self.frame_rate_auto = False
self.setprops({'AcquisitionFrameRate':frame_rate_hz})
@property
def gain_auto(self):
"""bool: Get or set automatic gain. If True the gain will be continuously updated."""
self.log.debug('Get gain auto called')
val = self.getprops(['GainAuto'])['GainAuto'].lower()
return True if val == 'continuous' else False
@gain_auto.setter
def gain_auto(self, auto):
self.log.debug('Set gain called with: '+str(auto))
auto = bool(auto)
self.setprops({'GainAuto': 'Continuous' if auto else 'Off'})
@property
def gain_limit(self):
"""tuple of float: Get the minimum and maximum gain in dB supported."""
self.log.debug('Get gain limit called')
mn,mx = list(self.getprops(['GainDB_Min', 'GainDB_Max']).values())
return (mn,mx)
@property
def gain(self):
"""Float: Get or set the camera gain in dB. Will set auto frame rate to False."""
self.log.debug('Get gain called')
return self.getprops(['Gain'])['Gain']
@gain.setter
def gain(self, gain_db):
self.log.debug('Set gain called with: '+str(gain_db))
self.gain_auto = False
self.setprops({'Gain':gain_db})
@property
def exposure_time_auto(self):
"""bool: Get or set automatic exposure time. If True the exposure time will be continuously updated."""
self.log.debug('Get exposure time auto called')
val = self.getprops(['ExposureAuto'])['ExposureAuto'].lower()
return True if val == 'continuous' else False
@exposure_time_auto.setter
def exposure_time_auto(self, auto):
self.log.debug('Set exposure time called with: '+str(auto))
auto = bool(auto)
self.setprops({'ExposureAuto': 'Continuous' if auto else 'Off'})
@property
def exposure_time_limit(self):
"""tuple of float: Get the minimum and maximum expsure time in ms supported."""
self.log.debug('Get gain limit called')
prop_list = list(self.getprops(['ExposureTime_FloatMin', 'ExposureTime_FloatMax']).values())
return (prop_list[0]/1000, prop_list[1]/1000)
@property
def exposure_time(self):
"""float: Get or set the camera expsure time in ms. Will set auto exposure time to False."""
self.log.debug('Get exposure time called')
return self.getprops(['ExposureTime'])['ExposureTime'] / 1000
@exposure_time.setter
def exposure_time(self, exposure_ms):
self.log.debug('Set exposure time called with: '+str(exposure_ms))
assert self.is_init, 'Camera must be initialised'
exposure_ms = float(exposure_ms)*1000
self.exposure_time_auto = False
self.setprops({'ExposureTime':exposure_ms})
@property
def binning(self):
"""int: Number of pixels to bin in each dimension (e.g. 2 gives 2x2 binning). Bins by summing.
Setting will stop and restart camera if running. Will scale size_readout to show the same sensor area.
"""
val_horiz, val_vert = self.getprops(['BinningHorizontal','BinningVertical']).values()
if val_horiz != val_vert:
self.log.warning('Horzontal and vertical binning is not equal.')
return val_horiz
@binning.setter
def binning(self, binning):
self.log.debug('Set binning called with: '+str(binning))
binning = int(binning)
initial_size = self.size_readout
initial_bin = self.binning
self.log.debug('Initial sensor readout area and binning: '+str(initial_size)+' ,'+str(initial_bin))
self.setprops({'BinningHorizontal':binning, 'BinningVertical':binning})
new_bin = self.binning
bin_scaling = new_bin/initial_bin
new_size = [round(sz/bin_scaling) for sz in initial_size]
self.log.debug('New binning and new size to set: '+str(new_bin)+' ,'+str(new_size))
try:
self.size_readout = new_size
self.log.debug('Set new size to: ' + str(self.size_readout))
except:
self.log.warning('Failed to scale readout after binning change', exc_info=True)
@property
def size_max(self):
"""tuple of int: Get the maximum allowed readout size (width, height) in pixels."""
val_w, val_h = self.getprops(['WidthMax','HeightMax']).values()
return (val_w, val_h)
@property
def size_readout(self):
"""tuple of int: Get or set the number of pixels read out (width, height). Will automatically center.
This applies after binning, i.e. this is the size the output image will be.
Setting will stop and restart camera if running.
"""
val_w, val_h = self.getprops(['Width','Height']).values()
return (val_w, val_h)
@size_readout.setter
def size_readout(self, size):
assert self.is_init, 'Camera must be initialised'
if isinstance(size, (int, float)): size = (size, size)
size = tuple([int(x) for x in size])
self.log.debug(f'Setting size_readout({size})')
maxWidth, maxHeight = self.size_max
new_offset = (round((maxWidth - size[0]) / 2), round((maxHeight - size[1]) / 2))
self.log.debug('Neccessary offset: ' + str(new_offset))
self.setprops({'OffsetX':new_offset[0], 'OffsetY':new_offset[1], 'Width':size[0], 'Height':size[1]})
def add_event_callback(self, method):
"""Add a method to be called when a new image shows up.
The method should have the signature (image, timestamp, \*args, \*\*kwargs) where:
- image (numpy.ndarray): The image data as a 2D numpy array.
- timestamp (datetime.datetime): UTC timestamp when the image event occured (i.e. when the capture
finished).
- \*args, \*\*kwargs should be allowed for forward compatability.
The callback should *not* be used for computations, make sure the method returns as fast as possible.
Args:
method: The method to be called, with signature (image, timestamp, \*args, \*\*kwargs).
"""
self.log.debug('Adding to callbacks: ' + str(method))
self._call_on_image.add(method)
def remove_event_callback(self, method):
"""Remove method from event callbacks."""
self.log.debug('Removing callbacks: ' + str(method))
try:
self._call_on_image.remove(method)
except:
self.log.warning('Could not remove callback', exc_info=True)
@property
def is_running(self):
"""bool: True if device is currently acquiring data."""
# self.log.debug('Checking if running')
if not self.is_init: return False
if self.model.lower() == 'ptgrey':
return self._ptgrey_camera is not None and self._ptgrey_camera.IsStreaming()
else:
self.log.warning('Forbidden model string defined.')
raise RuntimeError('An unknown (forbidden) model is defined: '+str(self.model))
def start(self):
""" Start the acquisition. Device must be initialised."""
assert self.is_init, 'Must initialise first'
if self.is_running:
self.log.info('Camera already running, name: '+self.name)
return
self.log.debug('Got start command')
self._imgs_since_start = 0
try:
self._ptgrey_camera.BeginAcquisition()
except PySpin.SpinnakerException as e:
self.log.debug('Could not start:', exc_info=True)
if 'already streaming' in e.message:
self.log.warning('The camera was already streaming...')
else:
raise RuntimeError('Failed to start camera acquisition') from e
self.log.info('Acquisition started, name: '+self.name)
def stop(self):
"""Stop the acquisition."""
if not self.is_running:
self.log.info('Camera was not running, name: '+self.name)
return
self.log.debug('Got stop command')
if self.model.lower() == 'ptgrey':
self.log.debug('Using PtGrey')
try:
self._ptgrey_camera.EndAcquisition()
except:
self.log.debug('Could not stop:', exc_info=True)
raise RuntimeError('Failed to stop camera acquisition')
else:
self.log.warning('Forbidden model string defined.')
raise RuntimeError('An unknown (forbidden) model is defined: '+str(self.model))
self._image_data = None
self._image_timestamp = None
self._got_image_event.clear()
self.log.info('Acquisition stopped, name: '+self.name)
def get_next_image(self, timeout=10):
"""Get the next image to be completed. Camera does not have to be running.
Args:
timeout (float): Maximum time (seconds) to wait for the image before raising TimeoutError.
Returns:
numpy.ndarray: 2d array with image data.
"""
# self.log.debug('Got next image request')
assert self.is_init, 'Camera must be initialised'
if not self.is_running:
self.log.debug('Camera was not running, start and grab the first image')
self._got_image_event.clear()
self.start()
if not self._got_image_event.wait(timeout):
raise TimeoutError('Getting image timed out')
img = self._image_data
self.stop()
else:
# self.log.debug('Camera running, grab the first image to show up')
self._got_image_event.clear()
if not self._got_image_event.wait(timeout):
raise TimeoutError('Getting image timed out')
img = self._image_data
return img
def get_new_image(self, timeout=10):
"""Get an image guaranteed to be started *after* calling this method. Camera does not have to be running.
Args:
timeout (float): Maximum time (seconds) to wait for the image before raising TimeoutError.
Returns:
numpy.ndarray: 2d array with image data.
"""
self.log.debug('Got next image request')
assert self.is_init, 'Camera must be initialised'
if not self.is_running:
self.log.debug('Camera was not running, start and grab the first image')
self._got_image_event.clear()
self.start()
if not self._got_image_event.wait(timeout):
raise TimeoutError('Getting image timed out')
img = self._image_data
self.stop()
else:
self.log.debug('Camera running, grab the second image to show up')
self._got_image_event.clear()
if not self._got_image_event.wait(timeout/2):
raise TimeoutError('Getting image timed out')
self._got_image_event.clear()
if not self._got_image_event.wait(timeout/2):
raise TimeoutError('Getting image timed out')
img = self._image_data
return img
def get_latest_image(self):
"""Get latest image in the cache immediately. Camera must be running.
Returns:
numpy.ndarray: 2d array with image data.
"""
self.log.debug('Got latest image request')
assert self.is_running, 'Camera must be running'
return self._image_data
|
[
"PySpin.System.GetInstance",
"logging.StreamHandler",
"numpy.flipud",
"logging.Formatter",
"datetime.datetime.utcnow",
"pathlib.Path",
"numpy.fliplr",
"threading.Event",
"numpy.rot90",
"weakref.ref",
"logging.handlers.RotatingFileHandler",
"logging.getLogger"
] |
[((3110, 3138), 'logging.getLogger', 'logging.getLogger', (['f"""{name}"""'], {}), "(f'{name}')\n", (3127, 3138), False, 'import logging\n'), ((5060, 5067), 'threading.Event', 'Event', ([], {}), '()\n', (5065, 5067), False, 'from threading import Thread, Event\n'), ((11271, 11281), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (11275, 11281), False, 'from pathlib import Path\n'), ((3347, 3370), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3368, 3370), False, 'import logging\n'), ((3538, 3637), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (["(self.debug_folder / 'camera.log')"], {'maxBytes': '(1 * 1024 * 1024)', 'backupCount': '(2)'}), "(self.debug_folder / 'camera.log', maxBytes=1 * 1024 * \n 1024, backupCount=2)\n", (3557, 3637), False, 'from logging.handlers import RotatingFileHandler\n'), ((3951, 4068), 'logging.Formatter', 'logging.Formatter', (["('%(asctime)s %(name)s-%(levelname)s-%(threadName)s' +\n '-%(funcName)s-(%(lineno)d) %(message)s')"], {}), "('%(asctime)s %(name)s-%(levelname)s-%(threadName)s' +\n '-%(funcName)s-(%(lineno)d) %(message)s')\n", (3968, 4068), False, 'import logging\n'), ((5779, 5804), 'weakref.ref', 'weakref.ref', (['self.__del__'], {}), '(self.__del__)\n', (5790, 5804), False, 'import atexit, weakref\n'), ((13341, 13368), 'PySpin.System.GetInstance', 'PySpin.System.GetInstance', ([], {}), '()\n', (13366, 13368), False, 'import PySpin\n'), ((15421, 15448), 'PySpin.System.GetInstance', 'PySpin.System.GetInstance', ([], {}), '()\n', (15446, 15448), False, 'import PySpin\n'), ((18122, 18139), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (18137, 18139), False, 'from datetime import datetime\n'), ((2929, 2943), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2933, 2943), False, 'from pathlib import Path\n'), ((3017, 3023), 'pathlib.Path', 'Path', ([], {}), '()\n', (3021, 3023), False, 'from pathlib import Path\n'), ((18566, 18592), 'numpy.fliplr', 'np.fliplr', (['image_converted'], {}), '(image_converted)\n', (18575, 18592), True, 'import numpy as np\n'), ((18666, 18692), 'numpy.flipud', 'np.flipud', (['image_converted'], {}), '(image_converted)\n', (18675, 18692), True, 'import numpy as np\n'), ((18766, 18811), 'numpy.rot90', 'np.rot90', (['image_converted', 'self.parent._rot90'], {}), '(image_converted, self.parent._rot90)\n', (18774, 18811), True, 'import numpy as np\n')]
|
import sys
from pathlib import Path
from argparse import ArgumentParser
import h5py
import pandas as pd
import numpy as np
from tqdm import tqdm
from export import export_read_file
def get_args():
parser = ArgumentParser(description="Parse sequencing_summary.txt files and .paf files to find split reads "
"in an Oxford Nanopore Dataset",
add_help=False)
general = parser.add_argument_group(title='General options')
general.add_argument("-h", "--help",
action="help",
help="Show this help and exit"
)
in_args = parser.add_argument_group(
title='Input sources'
)
in_args.add_argument("-s", "--summary",
required=True,
nargs='+',
help='Sequencing summary file(s) generated by albacore or guppy. Can be compressed '
'using gzip, bzip2, xz, or zip')
in_args.add_argument("--start-events",
help="start_events.csv file generated by event_finder.py",
default="",
required=True,
)
in_args.add_argument("--end-events",
help="end_events.csv file generated by event_finder.py",
default="",
required=True,
)
in_args.add_argument("--targets",
help="A text file of target read ids with one per line.",
default="",
required=True,
)
in_args.add_argument("--bulk-files",
help="ONT bulk FAST5 files.",
nargs='+',
default="",
)
in_args.add_argument("-o", "--output-name",
help="Name of the output folder, this will be generated if it does not exist",
required=True,
default=""
)
in_args.add_argument("--extra-classifications",
help="Any extra MinKNOW classifications to include.",
nargs='*',
default="",
)
return parser.parse_args()
def main():
args = get_args()
# debug(args)
# # sys.exit()
# Make folders
for j in ['starts', 'ends']:
Path('{i}/{j}/{k}'.format(i=args.output_name, j=j, k='fast5')).mkdir(parents=True, exist_ok=True)
# Open files
start_events = pd.read_csv(args.start_events, sep=',')
end_events = pd.read_csv(args.end_events, sep=',')
seq_sum_df = concat_files_to_df(file_list=args.summary, sep='\t')
# Create end_time Series in seq_sum_df
seq_sum_df['end_time'] = seq_sum_df['start_time'] + seq_sum_df['duration']
# Sort and Groupby to segregate runs and channels
seq_sum_df = seq_sum_df.sort_values(by=['run_id', 'channel', 'start_time'], ascending=True)
seq_sum_df_1 = seq_sum_df.copy()
gb = seq_sum_df.groupby(['run_id', 'channel'])
gb1 = seq_sum_df_1.groupby(['run_id', 'channel'])
# Get previous and next start times within groupby
seq_sum_df['next_start'] = gb['start_time'].shift(-1)
seq_sum_df_1['prev_start'] = gb1['start_time'].shift(1)
target_read_ids = []
with open(args.targets, 'r') as file:
for line in file:
target_read_ids.append(line.strip())
classifications = ['pore', 'inrange', 'good_single', 'unblocking']
if args.extra_classifications:
classifications.extend(args.extra_classifications)
# Get end_events for target_read_ids
end_events = end_events[end_events['read_id'].isin(target_read_ids)]
normal_ending_ids = end_events[end_events['time'].ge(0) &
end_events['label'].isin(classifications)]['read_id'].unique()
abnormally_ending_ids = end_events[~end_events['read_id'].isin(normal_ending_ids)]['read_id'].unique()
end_target_ss = seq_sum_df[seq_sum_df['read_id'].isin(abnormally_ending_ids)]
# Get start_events for target_read_ids
start_events = start_events[start_events['read_id'].isin(target_read_ids)]
normal_starting_ids = start_events[start_events['time'].le(0) &
start_events['label'].isin(classifications)]['read_id'].unique()
abnormally_starting_ids = start_events[~start_events['read_id'].isin(normal_starting_ids)]['read_id'].unique()
start_target_ss = seq_sum_df_1[seq_sum_df_1['read_id'].isin(abnormally_starting_ids)]
print('Collecting abnormally ending reads:')
end_read_info = write_files(end_target_ss, args.bulk_files, 'start_time',
'next_start', '{i}/ends/fast5/'.format(i=args.output_name))
end_read_info.to_csv('{}/ends_read_info.txt'.format(args.output_name), sep='\t', index=False, header=True)
end_read_info.to_csv('{}/ends_filenames.txt'.format(args.output_name), sep='\t', index=False, header=False,
columns=['filename'])
print('Collecting abnormally starting reads:')
start_read_info = write_files(start_target_ss, args.bulk_files, 'prev_start',
'end_time', '{i}/starts/fast5/'.format(i=args.output_name))
start_read_info.to_csv('{}/starts_read_info.txt'.format(args.output_name), sep='\t', index=False, header=True)
start_read_info.to_csv('{}/starts_filenames.txt'.format(args.output_name), sep='\t', index=False, header=False,
columns=['filename'])
return
def write_files(target_ss, bulkfiles, read_start_col, read_end_col, export_path, remove_pore=True):
"""Abstraction for export_read_file for collecting read info
Parameters
----------
target_ss : pd.DataFrame
DataFrame of reads to generate reads for
bulkfiles: list
list of bulk FAST5 files
read_start_col : str
Column in the target_ss that start index is derived from
read_end_col : str
Column in the target_ss that end index is derived from
export_path : str
The folder where read files will be written
remove_pore : bool
Remove pore-like signal from trace (>1500)
Returns
-------
pd.DataFrame
DataFrame of read info about reads that have been written
"""
d = {
'read_id': [],
'channel': [],
'start_index': [],
'end_index': [],
'bv_read_id': [],
'filename': [],
'bv_filename': []
}
files_written = 0
for bf in tqdm(bulkfiles):
f = h5py.File(bf, 'r')
run_id = f['UniqueGlobalKey']["tracking_id"].attrs["run_id"].decode('utf8')
sf = int(f["UniqueGlobalKey"]["context_tags"].attrs["sample_frequency"].decode('utf8'))
t = target_ss[target_ss['run_id'] == run_id]
t = t.dropna()
f.close()
file = h5py.File(bf, 'r')
for idx, row in tqdm(t.iterrows(), total=t.shape[0], desc=run_id):
si = int(np.floor(row[read_start_col] * sf))
ei = int(np.floor(row[read_end_col] * sf))
d['read_id'].append(row['read_id'])
d['channel'].append(row['channel'])
d['start_index'].append(si)
d['end_index'].append(ei)
d['bv_read_id'].append("{ch}-{start}-{end}".format(ch=row['channel'], start=si, end=ei))
d['filename'].append(row['filename'])
d['bv_filename'].append(export_read_file(row['channel'],
si,
ei,
file,
export_path,
remove_pore=remove_pore))
files_written += 1
print('{} reads written'.format(files_written))
return pd.DataFrame(d)
def concat_files_to_df(file_list, **kwargs):
"""Return a pandas.DataFrame from a list of files
"""
df_list = []
for f in file_list:
try:
df_list.append(pd.read_csv(filepath_or_buffer=f, **kwargs))
except pd.errors.ParserError as e:
print('{}\nThis is usually caused by an input file not being the expected format'.format(repr(e)))
sys.exit(1)
except Exception as e:
sys.exit(1)
return pd.concat(df_list, ignore_index=True)
def debug(args):
dirs = dir(args)
for attr in dirs:
if attr[0] != '_':
print('{a:<25} {b}'.format(a=attr, b=getattr(args, attr)))
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"export.export_read_file",
"tqdm.tqdm",
"h5py.File",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.floor",
"pandas.concat",
"sys.exit"
] |
[((214, 369), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Parse sequencing_summary.txt files and .paf files to find split reads in an Oxford Nanopore Dataset"""', 'add_help': '(False)'}), "(description=\n 'Parse sequencing_summary.txt files and .paf files to find split reads in an Oxford Nanopore Dataset'\n , add_help=False)\n", (228, 369), False, 'from argparse import ArgumentParser\n'), ((2693, 2732), 'pandas.read_csv', 'pd.read_csv', (['args.start_events'], {'sep': '""","""'}), "(args.start_events, sep=',')\n", (2704, 2732), True, 'import pandas as pd\n'), ((2750, 2787), 'pandas.read_csv', 'pd.read_csv', (['args.end_events'], {'sep': '""","""'}), "(args.end_events, sep=',')\n", (2761, 2787), True, 'import pandas as pd\n'), ((6723, 6738), 'tqdm.tqdm', 'tqdm', (['bulkfiles'], {}), '(bulkfiles)\n', (6727, 6738), False, 'from tqdm import tqdm\n'), ((8072, 8087), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (8084, 8087), True, 'import pandas as pd\n'), ((8567, 8604), 'pandas.concat', 'pd.concat', (['df_list'], {'ignore_index': '(True)'}), '(df_list, ignore_index=True)\n', (8576, 8604), True, 'import pandas as pd\n'), ((6752, 6770), 'h5py.File', 'h5py.File', (['bf', '"""r"""'], {}), "(bf, 'r')\n", (6761, 6770), False, 'import h5py\n'), ((7060, 7078), 'h5py.File', 'h5py.File', (['bf', '"""r"""'], {}), "(bf, 'r')\n", (7069, 7078), False, 'import h5py\n'), ((7175, 7209), 'numpy.floor', 'np.floor', (['(row[read_start_col] * sf)'], {}), '(row[read_start_col] * sf)\n', (7183, 7209), True, 'import numpy as np\n'), ((7232, 7264), 'numpy.floor', 'np.floor', (['(row[read_end_col] * sf)'], {}), '(row[read_end_col] * sf)\n', (7240, 7264), True, 'import numpy as np\n'), ((7627, 7716), 'export.export_read_file', 'export_read_file', (["row['channel']", 'si', 'ei', 'file', 'export_path'], {'remove_pore': 'remove_pore'}), "(row['channel'], si, ei, file, export_path, remove_pore=\n remove_pore)\n", (7643, 7716), False, 'from export import export_read_file\n'), ((8278, 8321), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'f'}), '(filepath_or_buffer=f, **kwargs)\n', (8289, 8321), True, 'import pandas as pd\n'), ((8489, 8500), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8497, 8500), False, 'import sys\n'), ((8544, 8555), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8552, 8555), False, 'import sys\n')]
|
from autumn.projects.covid_19.vaccine_optimisation.vaccine_opti import (
get_decision_vars_names,
initialise_opti_object,
)
import numpy as np
import yaml
COUNTRY = "malaysia" # should use "malaysia" or "philippines"
def run_sample_code():
# Initialisation of the optimisation object. This needs to be run once before optimising.
opti_object = initialise_opti_object(COUNTRY)
# Create decision variables for random allocations and random relaxation
decision_vars = []
for phase_number in range(2):
sample = list(np.random.uniform(low=0.0, high=1.0, size=(8,)))
_sum = sum(sample)
decision_vars += [s / _sum for s in sample]
decision_vars.append(np.random.uniform(low=0.0, high=1.0))
# create_scenario_yml_file(COUNTRY, decision_vars, sc_index=6)
# Evaluate objective function
[total_deaths, max_hospital, relaxation] = opti_object.evaluate_objective(decision_vars)
# Print decision vars and outputs
print(get_decision_vars_names())
print(f"Decision variables: {decision_vars}")
print(f"N deaths: {total_deaths} / Max hospital: {max_hospital} / Relaxation: {relaxation}")
def dump_decision_vars_sample(n_samples):
decision_vars_sample = []
for i in range(n_samples):
decision_vars = []
for phase_number in range(2):
sample = list(np.random.uniform(low=0.0, high=1.0, size=(8,)))
_sum = sum(sample)
decision_vars += [s / _sum for s in sample]
decision_vars.append(float(np.random.uniform(low=0.0, high=1.0)))
decision_vars = [float(v) for v in decision_vars]
decision_vars_sample.append(decision_vars)
file_path = "comparison_test/vars_sample.yml"
with open(file_path, "w") as f:
yaml.dump(decision_vars_sample, f)
def evaluate_sample_decision_vars(user="Guillaume"):
file_path = "comparison_test/vars_sample.yml"
with open(file_path) as file:
vars_samples = yaml.load(file)
opti_object = initialise_opti_object(COUNTRY)
dumped_dict = {"deaths": [], "hosp": []}
for decision_vars in vars_samples:
[total_deaths, max_hospital, _] = opti_object.evaluate_objective(decision_vars)
dumped_dict["deaths"].append(float(total_deaths))
dumped_dict["hosp"].append(float(max_hospital))
file_path = f"comparison_test/obj_values_{user}.yml"
with open(file_path, "w") as f:
yaml.dump(dumped_dict, f)
def compare_outputs():
outputs = {}
for name in ["Romain", "Guillaume"]:
file_path = f"comparison_test/obj_values_{name}.yml"
with open(file_path) as file:
outputs[name] = yaml.load(file)
for obj in ["deaths", "hosp"]:
perc_diff = [
int(
100
* (outputs["Guillaume"][obj][i] - outputs["Romain"][obj][i])
/ outputs["Romain"][obj][i]
)
for i in range(len(outputs["Romain"][obj]))
]
average_perc_diff = sum(perc_diff) / len(perc_diff)
print(f"Comparison for {obj}:")
print("Percentage difference (ref. Romain):")
print(perc_diff)
print(f"Average perc diff: {average_perc_diff}%")
for name in ["Romain", "Guillaume"]:
x = outputs[name][obj]
ordered_output = sorted(x)
ranks = [ordered_output.index(v) for v in x]
print(f"Ranks {name}:")
print(ranks)
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print()
# evaluate_sample_decision_vars("Guillaume")
# compare_outputs()
# This can be run using:
# python -m apps runsamplevaccopti
|
[
"numpy.random.uniform",
"yaml.load",
"autumn.projects.covid_19.vaccine_optimisation.vaccine_opti.initialise_opti_object",
"autumn.projects.covid_19.vaccine_optimisation.vaccine_opti.get_decision_vars_names",
"yaml.dump"
] |
[((365, 396), 'autumn.projects.covid_19.vaccine_optimisation.vaccine_opti.initialise_opti_object', 'initialise_opti_object', (['COUNTRY'], {}), '(COUNTRY)\n', (387, 396), False, 'from autumn.projects.covid_19.vaccine_optimisation.vaccine_opti import get_decision_vars_names, initialise_opti_object\n'), ((2009, 2040), 'autumn.projects.covid_19.vaccine_optimisation.vaccine_opti.initialise_opti_object', 'initialise_opti_object', (['COUNTRY'], {}), '(COUNTRY)\n', (2031, 2040), False, 'from autumn.projects.covid_19.vaccine_optimisation.vaccine_opti import get_decision_vars_names, initialise_opti_object\n'), ((707, 743), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)'}), '(low=0.0, high=1.0)\n', (724, 743), True, 'import numpy as np\n'), ((990, 1015), 'autumn.projects.covid_19.vaccine_optimisation.vaccine_opti.get_decision_vars_names', 'get_decision_vars_names', ([], {}), '()\n', (1013, 1015), False, 'from autumn.projects.covid_19.vaccine_optimisation.vaccine_opti import get_decision_vars_names, initialise_opti_object\n'), ((1776, 1810), 'yaml.dump', 'yaml.dump', (['decision_vars_sample', 'f'], {}), '(decision_vars_sample, f)\n', (1785, 1810), False, 'import yaml\n'), ((1974, 1989), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (1983, 1989), False, 'import yaml\n'), ((2429, 2454), 'yaml.dump', 'yaml.dump', (['dumped_dict', 'f'], {}), '(dumped_dict, f)\n', (2438, 2454), False, 'import yaml\n'), ((554, 601), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': '(8,)'}), '(low=0.0, high=1.0, size=(8,))\n', (571, 601), True, 'import numpy as np\n'), ((2666, 2681), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (2675, 2681), False, 'import yaml\n'), ((1360, 1407), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': '(8,)'}), '(low=0.0, high=1.0, size=(8,))\n', (1377, 1407), True, 'import numpy as np\n'), ((1531, 1567), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)'}), '(low=0.0, high=1.0)\n', (1548, 1567), True, 'import numpy as np\n')]
|
import numpy as np
import os
from astropy.time import Time
from pandas import DataFrame
from orbitize.kepler import calc_orbit
from orbitize import read_input, system, sampler
def test_secondary_rv_lnlike_calc():
"""
Generates fake secondary RV data and asserts that
the log(likelihood) of the true parameters is what we expect.
Also tests that the primary and secondary RV orbits are related by
-m/mtot
"""
# define an orbit & generate secondary RVs
a = 10
e = 0
i = np.pi / 4
omega = 0
Omega = 0
tau = 0.3
m0 = 1
m1 = 0.1
plx = 10
orbitize_params_list = np.array([a, e, i, omega, Omega, tau, plx, m1, m0])
epochs = Time(np.linspace(2005, 2025, int(1e3)), format='decimalyear').mjd
_, _, rv_p = calc_orbit(epochs, a, e, i, omega, Omega, tau, plx, m0+m1, mass_for_Kamp=m0)
data_file = DataFrame(columns=['epoch', 'object','rv', 'rv_err'])
data_file.epoch = epochs
data_file.object = np.ones(len(epochs), dtype=int)
data_file.rv = rv_p
data_file.rv_err = np.ones(len(epochs)) * 0.01
data_file.to_csv('tmp.csv', index=False)
# set up a fit using the simulated data
data_table = read_input.read_file('tmp.csv')
mySys = system.System(1, data_table, m0, plx, mass_err=0.1, plx_err=0.1, fit_secondary_mass=True)
mySamp = sampler.MCMC(mySys)
computed_lnlike = mySamp._logl(orbitize_params_list)
# residuals should be 0
assert computed_lnlike == np.sum(-np.log(np.sqrt(2 * np.pi * data_file.rv_err.values**2)))
# clean up
os.system('rm tmp.csv')
# assert that the secondary orbit is the primary orbit scaled
_, _, rv = mySys.compute_all_orbits(orbitize_params_list)
rv0 = rv[:,0]
rv1 = rv[:,1]
assert np.all(rv0 == -m1 / m0 * rv1)
if __name__ == '__main__':
test_secondary_rv_lnlike_calc()
|
[
"pandas.DataFrame",
"orbitize.kepler.calc_orbit",
"orbitize.read_input.read_file",
"os.system",
"orbitize.system.System",
"numpy.array",
"orbitize.sampler.MCMC",
"numpy.all",
"numpy.sqrt"
] |
[((628, 679), 'numpy.array', 'np.array', (['[a, e, i, omega, Omega, tau, plx, m1, m0]'], {}), '([a, e, i, omega, Omega, tau, plx, m1, m0])\n', (636, 679), True, 'import numpy as np\n'), ((778, 856), 'orbitize.kepler.calc_orbit', 'calc_orbit', (['epochs', 'a', 'e', 'i', 'omega', 'Omega', 'tau', 'plx', '(m0 + m1)'], {'mass_for_Kamp': 'm0'}), '(epochs, a, e, i, omega, Omega, tau, plx, m0 + m1, mass_for_Kamp=m0)\n', (788, 856), False, 'from orbitize.kepler import calc_orbit\n'), ((872, 926), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['epoch', 'object', 'rv', 'rv_err']"}), "(columns=['epoch', 'object', 'rv', 'rv_err'])\n", (881, 926), False, 'from pandas import DataFrame\n'), ((1193, 1224), 'orbitize.read_input.read_file', 'read_input.read_file', (['"""tmp.csv"""'], {}), "('tmp.csv')\n", (1213, 1224), False, 'from orbitize import read_input, system, sampler\n'), ((1237, 1330), 'orbitize.system.System', 'system.System', (['(1)', 'data_table', 'm0', 'plx'], {'mass_err': '(0.1)', 'plx_err': '(0.1)', 'fit_secondary_mass': '(True)'}), '(1, data_table, m0, plx, mass_err=0.1, plx_err=0.1,\n fit_secondary_mass=True)\n', (1250, 1330), False, 'from orbitize import read_input, system, sampler\n'), ((1340, 1359), 'orbitize.sampler.MCMC', 'sampler.MCMC', (['mySys'], {}), '(mySys)\n', (1352, 1359), False, 'from orbitize import read_input, system, sampler\n'), ((1561, 1584), 'os.system', 'os.system', (['"""rm tmp.csv"""'], {}), "('rm tmp.csv')\n", (1570, 1584), False, 'import os\n'), ((1762, 1791), 'numpy.all', 'np.all', (['(rv0 == -m1 / m0 * rv1)'], {}), '(rv0 == -m1 / m0 * rv1)\n', (1768, 1791), True, 'import numpy as np\n'), ((1491, 1540), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * data_file.rv_err.values ** 2)'], {}), '(2 * np.pi * data_file.rv_err.values ** 2)\n', (1498, 1540), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
def get_infos2Laplace_1D(input_dim=1, out_dim=1, intervalL=0.0, intervalR=1.0, equa_name=None):
# -uxx = f
if equa_name == 'PDE1':
# u=sin(pi*x), f=-pi*pi*sin(pi*x)
fside = lambda x: -(np.pi)*(np.pi)*tf.sin(np.pi*x)
utrue = lambda x: tf.sin(np.pi*x)
uleft = lambda x: tf.sin(np.pi*intervalL)
uright = lambda x: tf.sin(np.pi*intervalR)
return fside, utrue, uleft, uright
# 偏微分方程的一些信息:边界条件,初始条件,真解,右端项函数
def get_infos2Laplace_2D(input_dim=1, out_dim=1, left_bottom=0.0, right_top=1.0, equa_name=None):
if equa_name == 'PDE1':
# u=exp(-x)(x_y^3), f = -exp(-x)(x-2+y^3+6y)
f_side = lambda x, y: -(tf.exp(-1.0*x)) * (x - 2 + tf.pow(y, 3) + 6 * y)
u_true = lambda x, y: (tf.exp(-1.0*x))*(x + tf.pow(y, 3))
ux_left = lambda x, y: tf.exp(-left_bottom) * (tf.pow(y, 3) + 1.0 * left_bottom)
ux_right = lambda x, y: tf.exp(-right_top) * (tf.pow(y, 3) + 1.0 * right_top)
uy_bottom = lambda x, y: tf.exp(-x) * (tf.pow(left_bottom, 3) + x)
uy_top = lambda x, y: tf.exp(-x) * (tf.pow(right_top, 3) + x)
return f_side, u_true, ux_left, ux_right, uy_bottom, uy_top
elif equa_name == 'PDE2':
f_side = lambda x, y: (-1.0)*tf.sin(np.pi*x) * (2 - np.square(np.pi)*tf.square(y))
u_true = lambda x, y: tf.square(y)*tf.sin(np.pi*x)
ux_left = lambda x, y: tf.square(y) * tf.sin(np.pi * left_bottom)
ux_right = lambda x, y: tf.square(y) * tf.sin(np.pi * right_top)
uy_bottom = lambda x, y: tf.square(left_bottom) * tf.sin(np.pi * x)
uy_top = lambda x, y: tf.square(right_top) * tf.sin(np.pi * x)
return f_side, u_true, ux_left, ux_right, uy_bottom, uy_top
elif equa_name == 'PDE3':
# u=exp(x+y), f = -2*exp(x+y)
f_side = lambda x, y: -2.0*(tf.exp(x)*tf.exp(y))
u_true = lambda x, y: tf.exp(x)*tf.exp(y)
ux_left = lambda x, y: tf.multiply(tf.exp(y), tf.exp(left_bottom))
ux_right = lambda x, y: tf.multiply(tf.exp(y), tf.exp(right_top))
uy_bottom = lambda x, y: tf.multiply(tf.exp(x), tf.exp(left_bottom))
uy_top = lambda x, y: tf.multiply(tf.exp(x), tf.exp(right_top))
return f_side, u_true, ux_left, ux_right, uy_bottom, uy_top
elif equa_name == 'PDE4':
# u=(1/4)*(x^2+y^2), f = -1
f_side = lambda x, y: -1.0*tf.ones_like(x)
u_true = lambda x, y: 0.25*(tf.pow(x, 2)+tf.pow(y, 2))
ux_left = lambda x, y: 0.25 * tf.pow(y, 2) + 0.25 * tf.pow(left_bottom, 2)
ux_right = lambda x, y: 0.25 * tf.pow(y, 2) + 0.25 * tf.pow(right_top, 2)
uy_bottom = lambda x, y: 0.25 * tf.pow(x, 2) + 0.25 * tf.pow(left_bottom, 2)
uy_top = lambda x, y: 0.25 * tf.pow(x, 2) + 0.25 * tf.pow(right_top, 2)
return f_side, u_true, ux_left, ux_right, uy_bottom, uy_top
elif equa_name == 'PDE5':
# u=(1/4)*(x^2+y^2)+x+y, f = -1
f_side = lambda x, y: -1.0*tf.ones_like(x)
u_true = lambda x, y: 0.25*(tf.pow(x, 2)+tf.pow(y, 2)) + x + y
ux_left = lambda x, y: 0.25 * tf.pow(y, 2) + 0.25 * tf.pow(left_bottom, 2) + left_bottom + y
ux_right = lambda x, y: 0.25 * tf.pow(y, 2) + 0.25 * tf.pow(right_top, 2) + right_top + y
uy_bottom = lambda x, y: 0.25 * tf.pow(x, 2) + tf.pow(left_bottom, 2) + left_bottom + x
uy_top = lambda x, y: 0.25 * tf.pow(x, 2) + 0.25 * tf.pow(right_top, 2) + right_top + x
return f_side, u_true, ux_left, ux_right, uy_bottom, uy_top
elif equa_name == 'PDE6':
# u=(1/2)*(x^2)*(y^2), f = -(x^2+y^2)
f_side = lambda x, y: -1.0*(tf.pow(x, 2)+tf.pow(y, 2))
u_true = lambda x, y: 0.5 * (tf.pow(x, 2) * tf.pow(y, 2))
ux_left = lambda x, y: 0.5 * (tf.pow(left_bottom, 2) * tf.pow(y, 2))
ux_right = lambda x, y: 0.5 * (tf.pow(right_top, 2) * tf.pow(y, 2))
uy_bottom = lambda x, y: 0.5 * (tf.pow(x, 2) * tf.pow(left_bottom, 2))
uy_top = lambda x, y: 0.5 * (tf.pow(x, 2) * tf.pow(right_top, 2))
return f_side, u_true, ux_left, ux_right, uy_bottom, uy_top
elif equa_name == 'PDE7':
# u=(1/2)*(x^2)*(y^2)+x+y, f = -(x^2+y^2)
f_side = lambda x, y: -1.0*(tf.pow(x, 2)+tf.pow(y, 2))
u_true = lambda x, y: 0.5*(tf.pow(x, 2)*tf.pow(y, 2)) + x*tf.ones_like(x) + y*tf.ones_like(y)
ux_left = lambda x, y: 0.5 * tf.multiply(tf.pow(left_bottom, 2), tf.pow(y, 2)) + left_bottom + y
ux_right = lambda x, y: 0.5 * tf.multiply(tf.pow(right_top, 2), tf.pow(y, 2)) + right_top + y
uy_bottom = lambda x, y: 0.5 * tf.multiply(tf.pow(x, 2), tf.pow(left_bottom, 2)) + x + left_bottom
uy_top = lambda x, y: 0.5 * tf.multiply(tf.pow(x, 2), tf.pow(right_top, 2)) + x + right_top
return f_side, u_true, ux_left, ux_right, uy_bottom, uy_top
# 偏微分方程的一些信息:边界条件,初始条件,真解,右端项函数
def get_infos2Laplace_3D(input_dim=1, out_dim=1, intervalL=0.0, intervalR=1.0, equa_name=None):
if equa_name == 'PDE1':
# -Laplace U = f
# u=sin(pi*x)*sin(pi*y)*sin(pi*z), f=-pi*pi*sin(pi*x)*sin(pi*y)*sin(pi*z)
fside = lambda x, y, z: -(np.pi)*(np.pi)*tf.sin(np.pi*x)
utrue = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)
u_00 = lambda x, y, z: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)
u_01 = lambda x, y, z: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)
u_10 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*intervalL)*tf.sin(np.pi*z)
u_11 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*intervalR)*tf.sin(np.pi*z)
u_20 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalL)
u_21 = lambda x, y, z: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*intervalR)
return fside, utrue, u_00, u_01, u_10, u_11, u_20, u_21
# 偏微分方程的一些信息:边界条件,初始条件,真解,右端项函数
def get_infos2Laplace_5D(input_dim=1, out_dim=1, intervalL=0.0, intervalR=1.0, equa_name=None):
if equa_name == 'PDE1':
# u=sin(pi*x), f=-pi*pi*sin(pi*x)
fside = lambda x, y, z, s, t: -(np.pi)*(np.pi)*tf.sin(np.pi*x)
utrue = lambda x, y, z, s, t: tf.sin(np.pi*x)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)
u_00 = lambda x, y, z, s, t: tf.sin(np.pi*intervalL)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)
u_01 = lambda x, y, z, s, t: tf.sin(np.pi*intervalR)*tf.sin(np.pi*y)*tf.sin(np.pi*z)*tf.sin(np.pi*s)*tf.sin(np.pi*t)
u_10 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_11 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_20 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_21 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * s) * tf.sin(np.pi * t)
u_30 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * intervalL) * tf.sin(np.pi * t)
u_31 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * intervalR) * tf.sin(np.pi * t)
u_40 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * intervalL)
u_41 = lambda x, y, z, s, t: tf.sin(np.pi * x) * tf.sin(np.pi * y) * tf.sin(np.pi * z) * tf.sin(np.pi * s) * tf.sin(np.pi * intervalR)
return fside, utrue, u_00, u_01, u_10, u_11, u_20, u_21, u_30, u_31, u_40, u_41
|
[
"tensorflow.sin",
"numpy.square",
"tensorflow.pow",
"tensorflow.ones_like",
"tensorflow.exp",
"tensorflow.square"
] |
[((320, 337), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (326, 337), True, 'import tensorflow as tf\n'), ((363, 388), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (369, 388), True, 'import tensorflow as tf\n'), ((415, 440), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (421, 440), True, 'import tensorflow as tf\n'), ((277, 294), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (283, 294), True, 'import tensorflow as tf\n'), ((814, 830), 'tensorflow.exp', 'tf.exp', (['(-1.0 * x)'], {}), '(-1.0 * x)\n', (820, 830), True, 'import tensorflow as tf\n'), ((883, 903), 'tensorflow.exp', 'tf.exp', (['(-left_bottom)'], {}), '(-left_bottom)\n', (889, 903), True, 'import tensorflow as tf\n'), ((974, 992), 'tensorflow.exp', 'tf.exp', (['(-right_top)'], {}), '(-right_top)\n', (980, 992), True, 'import tensorflow as tf\n'), ((1062, 1072), 'tensorflow.exp', 'tf.exp', (['(-x)'], {}), '(-x)\n', (1068, 1072), True, 'import tensorflow as tf\n'), ((1135, 1145), 'tensorflow.exp', 'tf.exp', (['(-x)'], {}), '(-x)\n', (1141, 1145), True, 'import tensorflow as tf\n'), ((5253, 5270), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5259, 5270), True, 'import tensorflow as tf\n'), ((5334, 5351), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (5340, 5351), True, 'import tensorflow as tf\n'), ((5422, 5439), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (5428, 5439), True, 'import tensorflow as tf\n'), ((5510, 5527), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (5516, 5527), True, 'import tensorflow as tf\n'), ((5598, 5615), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (5604, 5615), True, 'import tensorflow as tf\n'), ((5686, 5703), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (5692, 5703), True, 'import tensorflow as tf\n'), ((5766, 5791), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (5772, 5791), True, 'import tensorflow as tf\n'), ((5854, 5879), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (5860, 5879), True, 'import tensorflow as tf\n'), ((6201, 6218), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (6207, 6218), True, 'import tensorflow as tf\n'), ((6320, 6337), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (6326, 6337), True, 'import tensorflow as tf\n'), ((6446, 6463), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (6452, 6463), True, 'import tensorflow as tf\n'), ((6572, 6589), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (6578, 6589), True, 'import tensorflow as tf\n'), ((6714, 6731), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (6720, 6731), True, 'import tensorflow as tf\n'), ((6858, 6875), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (6864, 6875), True, 'import tensorflow as tf\n'), ((7002, 7019), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (7008, 7019), True, 'import tensorflow as tf\n'), ((7146, 7163), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (7152, 7163), True, 'import tensorflow as tf\n'), ((7290, 7307), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (7296, 7307), True, 'import tensorflow as tf\n'), ((7434, 7451), 'tensorflow.sin', 'tf.sin', (['(np.pi * t)'], {}), '(np.pi * t)\n', (7440, 7451), True, 'import tensorflow as tf\n'), ((7570, 7595), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (7576, 7595), True, 'import tensorflow as tf\n'), ((7714, 7739), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (7720, 7739), True, 'import tensorflow as tf\n'), ((731, 747), 'tensorflow.exp', 'tf.exp', (['(-1.0 * x)'], {}), '(-1.0 * x)\n', (737, 747), True, 'import tensorflow as tf\n'), ((835, 847), 'tensorflow.pow', 'tf.pow', (['y', '(3)'], {}), '(y, 3)\n', (841, 847), True, 'import tensorflow as tf\n'), ((907, 919), 'tensorflow.pow', 'tf.pow', (['y', '(3)'], {}), '(y, 3)\n', (913, 919), True, 'import tensorflow as tf\n'), ((996, 1008), 'tensorflow.pow', 'tf.pow', (['y', '(3)'], {}), '(y, 3)\n', (1002, 1008), True, 'import tensorflow as tf\n'), ((1076, 1098), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(3)'], {}), '(left_bottom, 3)\n', (1082, 1098), True, 'import tensorflow as tf\n'), ((1149, 1169), 'tensorflow.pow', 'tf.pow', (['right_top', '(3)'], {}), '(right_top, 3)\n', (1155, 1169), True, 'import tensorflow as tf\n'), ((1400, 1412), 'tensorflow.square', 'tf.square', (['y'], {}), '(y)\n', (1409, 1412), True, 'import tensorflow as tf\n'), ((1413, 1430), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1419, 1430), True, 'import tensorflow as tf\n'), ((1463, 1475), 'tensorflow.square', 'tf.square', (['y'], {}), '(y)\n', (1472, 1475), True, 'import tensorflow as tf\n'), ((1478, 1505), 'tensorflow.sin', 'tf.sin', (['(np.pi * left_bottom)'], {}), '(np.pi * left_bottom)\n', (1484, 1505), True, 'import tensorflow as tf\n'), ((1539, 1551), 'tensorflow.square', 'tf.square', (['y'], {}), '(y)\n', (1548, 1551), True, 'import tensorflow as tf\n'), ((1554, 1579), 'tensorflow.sin', 'tf.sin', (['(np.pi * right_top)'], {}), '(np.pi * right_top)\n', (1560, 1579), True, 'import tensorflow as tf\n'), ((1614, 1636), 'tensorflow.square', 'tf.square', (['left_bottom'], {}), '(left_bottom)\n', (1623, 1636), True, 'import tensorflow as tf\n'), ((1639, 1656), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1645, 1656), True, 'import tensorflow as tf\n'), ((1688, 1708), 'tensorflow.square', 'tf.square', (['right_top'], {}), '(right_top)\n', (1697, 1708), True, 'import tensorflow as tf\n'), ((1711, 1728), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1717, 1728), True, 'import tensorflow as tf\n'), ((5302, 5319), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5308, 5319), True, 'import tensorflow as tf\n'), ((5318, 5335), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (5324, 5335), True, 'import tensorflow as tf\n'), ((5382, 5407), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (5388, 5407), True, 'import tensorflow as tf\n'), ((5406, 5423), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (5412, 5423), True, 'import tensorflow as tf\n'), ((5470, 5495), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (5476, 5495), True, 'import tensorflow as tf\n'), ((5494, 5511), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (5500, 5511), True, 'import tensorflow as tf\n'), ((5558, 5575), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5564, 5575), True, 'import tensorflow as tf\n'), ((5574, 5599), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (5580, 5599), True, 'import tensorflow as tf\n'), ((5646, 5663), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5652, 5663), True, 'import tensorflow as tf\n'), ((5662, 5687), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (5668, 5687), True, 'import tensorflow as tf\n'), ((5734, 5751), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5740, 5751), True, 'import tensorflow as tf\n'), ((5750, 5767), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (5756, 5767), True, 'import tensorflow as tf\n'), ((5822, 5839), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (5828, 5839), True, 'import tensorflow as tf\n'), ((5838, 5855), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (5844, 5855), True, 'import tensorflow as tf\n'), ((6304, 6321), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (6310, 6321), True, 'import tensorflow as tf\n'), ((6430, 6447), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (6436, 6447), True, 'import tensorflow as tf\n'), ((6556, 6573), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (6562, 6573), True, 'import tensorflow as tf\n'), ((6694, 6711), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (6700, 6711), True, 'import tensorflow as tf\n'), ((6838, 6855), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (6844, 6855), True, 'import tensorflow as tf\n'), ((6982, 6999), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (6988, 6999), True, 'import tensorflow as tf\n'), ((7126, 7143), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (7132, 7143), True, 'import tensorflow as tf\n'), ((7262, 7287), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (7268, 7287), True, 'import tensorflow as tf\n'), ((7406, 7431), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (7412, 7431), True, 'import tensorflow as tf\n'), ((7550, 7567), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (7556, 7567), True, 'import tensorflow as tf\n'), ((7694, 7711), 'tensorflow.sin', 'tf.sin', (['(np.pi * s)'], {}), '(np.pi * s)\n', (7700, 7711), True, 'import tensorflow as tf\n'), ((758, 770), 'tensorflow.pow', 'tf.pow', (['y', '(3)'], {}), '(y, 3)\n', (764, 770), True, 'import tensorflow as tf\n'), ((1313, 1330), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1319, 1330), True, 'import tensorflow as tf\n'), ((1957, 1966), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (1963, 1966), True, 'import tensorflow as tf\n'), ((1967, 1976), 'tensorflow.exp', 'tf.exp', (['y'], {}), '(y)\n', (1973, 1976), True, 'import tensorflow as tf\n'), ((2021, 2030), 'tensorflow.exp', 'tf.exp', (['y'], {}), '(y)\n', (2027, 2030), True, 'import tensorflow as tf\n'), ((2032, 2051), 'tensorflow.exp', 'tf.exp', (['left_bottom'], {}), '(left_bottom)\n', (2038, 2051), True, 'import tensorflow as tf\n'), ((2098, 2107), 'tensorflow.exp', 'tf.exp', (['y'], {}), '(y)\n', (2104, 2107), True, 'import tensorflow as tf\n'), ((2109, 2126), 'tensorflow.exp', 'tf.exp', (['right_top'], {}), '(right_top)\n', (2115, 2126), True, 'import tensorflow as tf\n'), ((2174, 2183), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (2180, 2183), True, 'import tensorflow as tf\n'), ((2185, 2204), 'tensorflow.exp', 'tf.exp', (['left_bottom'], {}), '(left_bottom)\n', (2191, 2204), True, 'import tensorflow as tf\n'), ((2249, 2258), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (2255, 2258), True, 'import tensorflow as tf\n'), ((2260, 2277), 'tensorflow.exp', 'tf.exp', (['right_top'], {}), '(right_top)\n', (2266, 2277), True, 'import tensorflow as tf\n'), ((6288, 6305), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (6294, 6305), True, 'import tensorflow as tf\n'), ((6414, 6431), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (6420, 6431), True, 'import tensorflow as tf\n'), ((6540, 6557), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (6546, 6557), True, 'import tensorflow as tf\n'), ((6674, 6691), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (6680, 6691), True, 'import tensorflow as tf\n'), ((6818, 6835), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (6824, 6835), True, 'import tensorflow as tf\n'), ((6954, 6979), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (6960, 6979), True, 'import tensorflow as tf\n'), ((7098, 7123), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (7104, 7123), True, 'import tensorflow as tf\n'), ((7242, 7259), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (7248, 7259), True, 'import tensorflow as tf\n'), ((7386, 7403), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (7392, 7403), True, 'import tensorflow as tf\n'), ((7530, 7547), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (7536, 7547), True, 'import tensorflow as tf\n'), ((7674, 7691), 'tensorflow.sin', 'tf.sin', (['(np.pi * z)'], {}), '(np.pi * z)\n', (7680, 7691), True, 'import tensorflow as tf\n'), ((1336, 1352), 'numpy.square', 'np.square', (['np.pi'], {}), '(np.pi)\n', (1345, 1352), True, 'import numpy as np\n'), ((1353, 1365), 'tensorflow.square', 'tf.square', (['y'], {}), '(y)\n', (1362, 1365), True, 'import tensorflow as tf\n'), ((1905, 1914), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (1911, 1914), True, 'import tensorflow as tf\n'), ((1915, 1924), 'tensorflow.exp', 'tf.exp', (['y'], {}), '(y)\n', (1921, 1924), True, 'import tensorflow as tf\n'), ((2452, 2467), 'tensorflow.ones_like', 'tf.ones_like', (['x'], {}), '(x)\n', (2464, 2467), True, 'import tensorflow as tf\n'), ((6256, 6273), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (6262, 6273), True, 'import tensorflow as tf\n'), ((6272, 6289), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (6278, 6289), True, 'import tensorflow as tf\n'), ((6374, 6399), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (6380, 6399), True, 'import tensorflow as tf\n'), ((6398, 6415), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (6404, 6415), True, 'import tensorflow as tf\n'), ((6500, 6525), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (6506, 6525), True, 'import tensorflow as tf\n'), ((6524, 6541), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (6530, 6541), True, 'import tensorflow as tf\n'), ((6626, 6643), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (6632, 6643), True, 'import tensorflow as tf\n'), ((6646, 6671), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalL)'], {}), '(np.pi * intervalL)\n', (6652, 6671), True, 'import tensorflow as tf\n'), ((6770, 6787), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (6776, 6787), True, 'import tensorflow as tf\n'), ((6790, 6815), 'tensorflow.sin', 'tf.sin', (['(np.pi * intervalR)'], {}), '(np.pi * intervalR)\n', (6796, 6815), True, 'import tensorflow as tf\n'), ((6914, 6931), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (6920, 6931), True, 'import tensorflow as tf\n'), ((6934, 6951), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (6940, 6951), True, 'import tensorflow as tf\n'), ((7058, 7075), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (7064, 7075), True, 'import tensorflow as tf\n'), ((7078, 7095), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (7084, 7095), True, 'import tensorflow as tf\n'), ((7202, 7219), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (7208, 7219), True, 'import tensorflow as tf\n'), ((7222, 7239), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (7228, 7239), True, 'import tensorflow as tf\n'), ((7346, 7363), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (7352, 7363), True, 'import tensorflow as tf\n'), ((7366, 7383), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (7372, 7383), True, 'import tensorflow as tf\n'), ((7490, 7507), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (7496, 7507), True, 'import tensorflow as tf\n'), ((7510, 7527), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (7516, 7527), True, 'import tensorflow as tf\n'), ((7634, 7651), 'tensorflow.sin', 'tf.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (7640, 7651), True, 'import tensorflow as tf\n'), ((7654, 7671), 'tensorflow.sin', 'tf.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (7660, 7671), True, 'import tensorflow as tf\n'), ((2505, 2517), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (2511, 2517), True, 'import tensorflow as tf\n'), ((2518, 2530), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (2524, 2530), True, 'import tensorflow as tf\n'), ((2571, 2583), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (2577, 2583), True, 'import tensorflow as tf\n'), ((2593, 2615), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (2599, 2615), True, 'import tensorflow as tf\n'), ((2656, 2668), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (2662, 2668), True, 'import tensorflow as tf\n'), ((2678, 2698), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (2684, 2698), True, 'import tensorflow as tf\n'), ((2740, 2752), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (2746, 2752), True, 'import tensorflow as tf\n'), ((2762, 2784), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (2768, 2784), True, 'import tensorflow as tf\n'), ((2823, 2835), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (2829, 2835), True, 'import tensorflow as tf\n'), ((2845, 2865), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (2851, 2865), True, 'import tensorflow as tf\n'), ((3043, 3058), 'tensorflow.ones_like', 'tf.ones_like', (['x'], {}), '(x)\n', (3055, 3058), True, 'import tensorflow as tf\n'), ((3392, 3414), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (3398, 3414), True, 'import tensorflow as tf\n'), ((3714, 3726), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (3720, 3726), True, 'import tensorflow as tf\n'), ((3727, 3739), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (3733, 3739), True, 'import tensorflow as tf\n'), ((3781, 3793), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (3787, 3793), True, 'import tensorflow as tf\n'), ((3796, 3808), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (3802, 3808), True, 'import tensorflow as tf\n'), ((3851, 3873), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (3857, 3873), True, 'import tensorflow as tf\n'), ((3876, 3888), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (3882, 3888), True, 'import tensorflow as tf\n'), ((3930, 3950), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (3936, 3950), True, 'import tensorflow as tf\n'), ((3953, 3965), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (3959, 3965), True, 'import tensorflow as tf\n'), ((4008, 4020), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (4014, 4020), True, 'import tensorflow as tf\n'), ((4023, 4045), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (4029, 4045), True, 'import tensorflow as tf\n'), ((4085, 4097), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (4091, 4097), True, 'import tensorflow as tf\n'), ((4100, 4120), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (4106, 4120), True, 'import tensorflow as tf\n'), ((3098, 3110), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (3104, 3110), True, 'import tensorflow as tf\n'), ((3111, 3123), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (3117, 3123), True, 'import tensorflow as tf\n'), ((3174, 3186), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (3180, 3186), True, 'import tensorflow as tf\n'), ((3196, 3218), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (3202, 3218), True, 'import tensorflow as tf\n'), ((3277, 3289), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (3283, 3289), True, 'import tensorflow as tf\n'), ((3299, 3319), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (3305, 3319), True, 'import tensorflow as tf\n'), ((3377, 3389), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (3383, 3389), True, 'import tensorflow as tf\n'), ((3471, 3483), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (3477, 3483), True, 'import tensorflow as tf\n'), ((3493, 3513), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (3499, 3513), True, 'import tensorflow as tf\n'), ((4310, 4322), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (4316, 4322), True, 'import tensorflow as tf\n'), ((4323, 4335), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (4329, 4335), True, 'import tensorflow as tf\n'), ((4426, 4441), 'tensorflow.ones_like', 'tf.ones_like', (['y'], {}), '(y)\n', (4438, 4441), True, 'import tensorflow as tf\n'), ((4406, 4421), 'tensorflow.ones_like', 'tf.ones_like', (['x'], {}), '(x)\n', (4418, 4421), True, 'import tensorflow as tf\n'), ((4375, 4387), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (4381, 4387), True, 'import tensorflow as tf\n'), ((4388, 4400), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (4394, 4400), True, 'import tensorflow as tf\n'), ((4494, 4516), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (4500, 4516), True, 'import tensorflow as tf\n'), ((4518, 4530), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (4524, 4530), True, 'import tensorflow as tf\n'), ((4601, 4621), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (4607, 4621), True, 'import tensorflow as tf\n'), ((4623, 4635), 'tensorflow.pow', 'tf.pow', (['y', '(2)'], {}), '(y, 2)\n', (4629, 4635), True, 'import tensorflow as tf\n'), ((4705, 4717), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (4711, 4717), True, 'import tensorflow as tf\n'), ((4719, 4741), 'tensorflow.pow', 'tf.pow', (['left_bottom', '(2)'], {}), '(left_bottom, 2)\n', (4725, 4741), True, 'import tensorflow as tf\n'), ((4810, 4822), 'tensorflow.pow', 'tf.pow', (['x', '(2)'], {}), '(x, 2)\n', (4816, 4822), True, 'import tensorflow as tf\n'), ((4824, 4844), 'tensorflow.pow', 'tf.pow', (['right_top', '(2)'], {}), '(right_top, 2)\n', (4830, 4844), True, 'import tensorflow as tf\n')]
|
import modelexp
from modelexp.experiments import Generic
from modelexp.models.Generic import Parabola
import numpy as np
import random
app = modelexp.App()
app.setExperiment(Generic)
modelRef = app.setModel(Parabola)
modelRef.defineDomain(np.linspace(-3, 3, 100))
modelRef.setParam('a', 1.3)
modelRef.setParam('x0', 0.3)
modelRef.setParam('c', -0.2)
modelRef.calcModel()
sig_y = 0.05*modelRef.y
randomized_y = []
for i in range(len(modelRef.y)):
randomized_y.append(random.gauss(modelRef.y[i], 0.05*modelRef.y[i]))
randomized_y = np.array(randomized_y)
with open('parabolaData.xye', 'w') as f:
for i in range(len(modelRef.y)):
f.write(f'{modelRef.x[i]}\t{randomized_y[i]}\t{sig_y[i]}\n')
|
[
"modelexp.App",
"random.gauss",
"numpy.array",
"numpy.linspace"
] |
[((142, 156), 'modelexp.App', 'modelexp.App', ([], {}), '()\n', (154, 156), False, 'import modelexp\n'), ((536, 558), 'numpy.array', 'np.array', (['randomized_y'], {}), '(randomized_y)\n', (544, 558), True, 'import numpy as np\n'), ((242, 265), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (253, 265), True, 'import numpy as np\n'), ((472, 521), 'random.gauss', 'random.gauss', (['modelRef.y[i]', '(0.05 * modelRef.y[i])'], {}), '(modelRef.y[i], 0.05 * modelRef.y[i])\n', (484, 521), False, 'import random\n')]
|
import tensorflow as tf
import numpy as np
import os
SCRIPT_PATH = os.path.abspath(__file__)
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
MODEL_PATH = os.path.join(SCRIPT_DIR, "model/model.h5")
MODEL = None
INPUT_SIZE = 7 * 12
OUTPUT_SIZE = 1
def _load_model():
"""
Load the TensorFlow model if it is not loaded in the current context
Azure functions often preserve their contexts between executions
https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python#global-variables
"""
global MODEL
if MODEL is None:
MODEL = tf.keras.models.load_model(MODEL_PATH)
def normalize(costs):
return np.log(costs + 1)
def denormalize(costs):
return np.exp(costs) - 1
def make_subsequences(data, subsequence_size):
"""
Create subsequences of subsequence_size with the array
Example
-------
>>> make_subsequences(np.array([1, 2, 3, 4]), 2)
array([
[1, 2],
[2, 3],
[3, 4],
])
"""
number_of_subsequences = data.shape[0] - subsequence_size + 1
return np.array([data[index:subsequence_size+index] for index in range(number_of_subsequences)])
def predict_costs(actual_costs):
_load_model()
normalized_costs = normalize(np.array(actual_costs))
subsequences = make_subsequences(normalized_costs, INPUT_SIZE)
predictions = MODEL.predict(subsequences, subsequences.shape[0]).flatten()
predictions = denormalize(predictions)
return predictions.tolist()
|
[
"os.path.abspath",
"tensorflow.keras.models.load_model",
"numpy.log",
"os.path.dirname",
"numpy.array",
"numpy.exp",
"os.path.join"
] |
[((68, 93), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (83, 93), False, 'import os\n'), ((107, 135), 'os.path.dirname', 'os.path.dirname', (['SCRIPT_PATH'], {}), '(SCRIPT_PATH)\n', (122, 135), False, 'import os\n'), ((149, 191), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', '"""model/model.h5"""'], {}), "(SCRIPT_DIR, 'model/model.h5')\n", (161, 191), False, 'import os\n'), ((654, 671), 'numpy.log', 'np.log', (['(costs + 1)'], {}), '(costs + 1)\n', (660, 671), True, 'import numpy as np\n'), ((580, 618), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (606, 618), True, 'import tensorflow as tf\n'), ((709, 722), 'numpy.exp', 'np.exp', (['costs'], {}), '(costs)\n', (715, 722), True, 'import numpy as np\n'), ((1251, 1273), 'numpy.array', 'np.array', (['actual_costs'], {}), '(actual_costs)\n', (1259, 1273), True, 'import numpy as np\n')]
|
"""
ModelFit.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon May 12 14:01:29 MDT 2014
Description:
"""
import signal
import numpy as np
from ..util.PrintInfo import print_fit
from ..util.Pickling import write_pickle_file
from ..physics.Constants import nu_0_mhz
import gc, os, sys, copy, types, time, re
from .ModelFit import ModelFit, LogLikelihood, FitBase
from ..simulations import Global21cm as simG21
from ..analysis import Global21cm as anlGlobal21cm
from ..simulations import Global21cm as simGlobal21cm
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
def_kwargs = {'verbose': False, 'progress_bar': False}
class loglikelihood(LogLikelihood):
def __init__(self, xdata, ydata, error, turning_points):
"""
Computes log-likelihood at given step in MCMC chain.
Parameters
----------
"""
LogLikelihood.__init__(self, xdata, ydata, error)
self.turning_points = turning_points
def __call__(self, sim):
"""
Compute log-likelihood for model generated via input parameters.
Returns
-------
Tuple: (log likelihood, blobs)
"""
# Compute the likelihood if we've made it this far
if self.turning_points:
tps = sim.turning_points
try:
nu = [nu_0_mhz / (1. + tps[tp][0]) \
for tp in self.turning_points]
T = [tps[tp][1] for tp in self.turning_points]
except KeyError:
return -np.inf
yarr = np.array(nu + T)
assert len(yarr) == len(self.ydata)
else:
yarr = np.interp(self.xdata, sim.history['nu'], sim.history['dTb'])
if np.any(np.isnan(yarr)):
return -np.inf
lnL = -0.5 * (np.sum((yarr - self.ydata)**2 \
/ self.error**2 + np.log(2. * np.pi * self.error**2)))
return lnL + self.const_term
class FitGlobal21cm(FitBase):
@property
def loglikelihood(self):
if not hasattr(self, '_loglikelihood'):
self._loglikelihood = loglikelihood(self.xdata, self.ydata,
self.error, self.turning_points)
return self._loglikelihood
@property
def turning_points(self):
if not hasattr(self, '_turning_points'):
self._turning_points = False
return self._turning_points
@turning_points.setter
def turning_points(self, value):
if type(value) == bool:
if value:
self._turning_points = list('BCD')
else:
self._turning_points = False
elif type(value) == tuple:
self._turning_points = list(value)
elif type(value) == list:
self._turning_points = value
elif isinstance(value, basestring):
if len(value) == 1:
self._turning_points = [value]
else:
self._turning_points = list(value)
@property
def frequencies(self):
if not hasattr(self, '_frequencies'):
raise AttributeError('Must supply frequencies by hand!')
return self._frequencies
@frequencies.setter
def frequencies(self, value):
self._frequencies = value
@property
def data(self):
if not hasattr(self, '_data'):
raise AttributeError('Must set data by hand!')
return self._data
@data.setter
def data(self, value):
"""
Set x and ydata at the same time, either by passing in
a simulation instance, a dictionary of parameters, or a
sequence of brightness temperatures corresponding to the
frequencies defined in self.frequencies (self.xdata).
"""
if type(value) == dict:
kwargs = value.copy()
kwargs.update(def_kwargs)
sim = simGlobal21cm(**kwargs)
sim.run()
self.sim = sim
elif isinstance(value, simGlobal21cm) or \
isinstance(value, anlGlobal21cm):
sim = self.sim = value
elif type(value) in [list, tuple]:
sim = None
else:
assert len(value) == len(self.frequencies)
assert not self.turning_points
self.xdata = self.frequencies
self.ydata = value
return
if self.turning_points is not None:
self.xdata = None
if sim is not None:
z = [sim.turning_points[tp][0] for tp in self.turning_points]
T = [sim.turning_points[tp][1] for tp in self.turning_points]
nu = nu_0_mhz / (1. + np.array(z))
self.ydata = np.array(list(nu) + T)
else:
assert len(value) == 2 * len(self.turning_points)
self.ydata = value
else:
self.xdata = self.frequencies
if hasattr(self, 'sim'):
nu = self.sim.history['nu']
dTb = self.sim.history['dTb']
self.ydata = np.interp(self.xdata, nu, dTb).copy() \
+ self.noise
@property
def noise(self):
if not hasattr(self, '_noise'):
self._noise = np.zeros_like(self.xdata)
return self._noise
@noise.setter
def noise(self, value):
self._noise = np.random.normal(0., value, size=len(self.frequencies))
@property
def error(self):
if not hasattr(self, '_error'):
raise AttributeError('Must set errors by hand!')
return self._error
@error.setter
def error(self, value):
if type(value) is dict:
nu = [value[tp][0] for tp in self.turning_points]
T = [value[tp][1] for tp in self.turning_points]
self._error = np.array(nu + T)
else:
if hasattr(self, '_data'):
assert len(value) == len(self.data), \
"Data and errors must have same shape!"
self._error = value
def _check_for_conflicts(self):
"""
Hacky at the moment. Preventative measure against is_log=True for
spectrum_logN. Could generalize.
"""
for i, element in enumerate(self.parameters):
if re.search('spectrum_logN', element):
if self.is_log[i]:
raise ValueError('spectrum_logN is already logarithmic!')
|
[
"numpy.zeros_like",
"numpy.log",
"numpy.isnan",
"numpy.array",
"numpy.interp",
"re.search"
] |
[((1966, 1982), 'numpy.array', 'np.array', (['(nu + T)'], {}), '(nu + T)\n', (1974, 1982), True, 'import numpy as np\n'), ((2066, 2126), 'numpy.interp', 'np.interp', (['self.xdata', "sim.history['nu']", "sim.history['dTb']"], {}), "(self.xdata, sim.history['nu'], sim.history['dTb'])\n", (2075, 2126), True, 'import numpy as np\n'), ((2146, 2160), 'numpy.isnan', 'np.isnan', (['yarr'], {}), '(yarr)\n', (2154, 2160), True, 'import numpy as np\n'), ((5892, 5917), 'numpy.zeros_like', 'np.zeros_like', (['self.xdata'], {}), '(self.xdata)\n', (5905, 5917), True, 'import numpy as np\n'), ((6524, 6540), 'numpy.array', 'np.array', (['(nu + T)'], {}), '(nu + T)\n', (6532, 6540), True, 'import numpy as np\n'), ((7026, 7061), 're.search', 're.search', (['"""spectrum_logN"""', 'element'], {}), "('spectrum_logN', element)\n", (7035, 7061), False, 'import gc, os, sys, copy, types, time, re\n'), ((2275, 2312), 'numpy.log', 'np.log', (['(2.0 * np.pi * self.error ** 2)'], {}), '(2.0 * np.pi * self.error ** 2)\n', (2281, 2312), True, 'import numpy as np\n'), ((5274, 5285), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (5282, 5285), True, 'import numpy as np\n'), ((5717, 5747), 'numpy.interp', 'np.interp', (['self.xdata', 'nu', 'dTb'], {}), '(self.xdata, nu, dTb)\n', (5726, 5747), True, 'import numpy as np\n')]
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
#! [auto_compilation]
import openvino.runtime as ov
compiled_model = ov.compile_model("model.xml")
#! [auto_compilation]
#! [properties_example]
core = ov.Core()
input_a = ov.opset8.parameter([8])
res = ov.opset8.absolute(input_a)
model = ov.Model(res, [input_a])
compiled = core.compile_model(model, "CPU")
print(model.inputs)
print(model.outputs)
print(compiled.inputs)
print(compiled.outputs)
#! [properties_example]
#! [tensor_basics]
data_float64 = np.ones(shape=(2,8))
tensor = ov.Tensor(data_float64)
assert tensor.element_type == ov.Type.f64
data_int32 = np.ones(shape=(2,8), dtype=np.int32)
tensor = ov.Tensor(data_int32)
assert tensor.element_type == ov.Type.i32
#! [tensor_basics]
#! [tensor_shared_mode]
data_to_share = np.ones(shape=(2,8))
shared_tensor = ov.Tensor(data_to_share, shared_memory=True)
# Editing of the numpy array affects Tensor's data
data_to_share[0][2] = 6.0
assert shared_tensor.data[0][2] == 6.0
# Editing of Tensor's data affects the numpy array
shared_tensor.data[0][2] = 0.6
assert data_to_share[0][2] == 0.6
#! [tensor_shared_mode]
infer_request = compiled.create_infer_request()
data = np.random.randint(-5, 3 + 1, size=(8))
#! [passing_numpy_array]
# Passing inputs data in form of a dictionary
infer_request.infer(inputs={0: data})
# Passing inputs data in form of a list
infer_request.infer(inputs=[data])
#! [passing_numpy_array]
#! [getting_results]
# Get output tensor
results = infer_request.get_output_tensor().data
# Get tensor with CompiledModel's output node
results = infer_request.get_tensor(compiled.outputs[0]).data
# Get all results with special helper property
results = list(infer_request.results.values())
#! [getting_results]
#! [sync_infer]
# Simple call to InferRequest
results = infer_request.infer(inputs={0: data})
# Extra feature: calling CompiledModel directly
results = compiled_model(inputs={0: data})
#! [sync_infer]
#! [asyncinferqueue]
core = ov.Core()
# Simple model that adds two inputs together
input_a = ov.opset8.parameter([8])
input_b = ov.opset8.parameter([8])
res = ov.opset8.add(input_a, input_b)
model = ov.Model(res, [input_a, input_b])
compiled = core.compile_model(model, "CPU")
# Number of InferRequests that AsyncInferQueue holds
jobs = 4
infer_queue = ov.AsyncInferQueue(compiled, jobs)
# Create data
data = [np.array([i] * 8, dtype=np.float32) for i in range(jobs)]
# Run all jobs
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]})
infer_queue.wait_all()
#! [asyncinferqueue]
#! [asyncinferqueue_access]
results = infer_queue[3].get_output_tensor().data
#! [asyncinferqueue_access]
#! [asyncinferqueue_set_callback]
data_done = [False for _ in range(jobs)]
def f(request, userdata):
print(f"Done! Result: {request.get_output_tensor().data}")
data_done[userdata] = True
infer_queue.set_callback(f)
for i in range(len(data)):
infer_queue.start_async({0: data[i], 1: data[i]}, userdata=i)
infer_queue.wait_all()
assert all(data_done)
#! [asyncinferqueue_set_callback]
unt8_data = np.ones([100])
#! [packing_data]
from openvino.helpers import pack_data
packed_buffer = pack_data(unt8_data, ov.Type.u4)
# Create tensor with shape in element types
t = ov.Tensor(packed_buffer, [1, 128], ov.Type.u4)
#! [packing_data]
#! [unpacking]
from openvino.helpers import unpack_data
unpacked_data = unpack_data(t.data, t.element_type, t.shape)
assert np.array_equal(unpacked_data , unt8_data)
#! [unpacking]
#! [releasing_gil]
import openvino.runtime as ov
import cv2 as cv
from threading import Thread
input_data = []
# Processing input data will be done in a separate thread
# while compilation of the model and creation of the infer request
# is going to be executed in the main thread.
def prepare_data(input, image_path):
image = cv.imread(image_path)
h, w = list(input.shape)[-2:]
image = cv.resize(image, (h, w))
image = image.transpose((2, 0, 1))
image = np.expand_dims(image, 0)
input_data.append(image)
core = ov.Core()
model = core.read_model("model.xml")
# Create thread with prepare_data function as target and start it
thread = Thread(target=prepare_data, args=[model.input(), "path/to/image"])
thread.start()
# The GIL will be released in compile_model.
# It allows a thread above to start the job,
# while main thread is running in the background.
compiled = core.compile_model(model, "GPU")
# After returning from compile_model, the main thread acquires the GIL
# and starts create_infer_request which releases it once again.
request = compiled.create_infer_request()
# Join the thread to make sure the input_data is ready
thread.join()
# running the inference
request.infer(input_data)
#! [releasing_gil]
|
[
"openvino.runtime.opset8.parameter",
"openvino.runtime.Core",
"numpy.array_equal",
"openvino.runtime.opset8.absolute",
"openvino.runtime.Model",
"openvino.runtime.opset8.add",
"numpy.ones",
"numpy.expand_dims",
"openvino.runtime.AsyncInferQueue",
"openvino.runtime.compile_model",
"cv2.imread",
"numpy.random.randint",
"numpy.array",
"openvino.helpers.pack_data",
"openvino.runtime.Tensor",
"openvino.helpers.unpack_data",
"cv2.resize"
] |
[((173, 202), 'openvino.runtime.compile_model', 'ov.compile_model', (['"""model.xml"""'], {}), "('model.xml')\n", (189, 202), True, 'import openvino.runtime as ov\n'), ((257, 266), 'openvino.runtime.Core', 'ov.Core', ([], {}), '()\n', (264, 266), True, 'import openvino.runtime as ov\n'), ((278, 302), 'openvino.runtime.opset8.parameter', 'ov.opset8.parameter', (['[8]'], {}), '([8])\n', (297, 302), True, 'import openvino.runtime as ov\n'), ((309, 336), 'openvino.runtime.opset8.absolute', 'ov.opset8.absolute', (['input_a'], {}), '(input_a)\n', (327, 336), True, 'import openvino.runtime as ov\n'), ((345, 369), 'openvino.runtime.Model', 'ov.Model', (['res', '[input_a]'], {}), '(res, [input_a])\n', (353, 369), True, 'import openvino.runtime as ov\n'), ((563, 584), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 8)'}), '(shape=(2, 8))\n', (570, 584), True, 'import numpy as np\n'), ((594, 617), 'openvino.runtime.Tensor', 'ov.Tensor', (['data_float64'], {}), '(data_float64)\n', (603, 617), True, 'import openvino.runtime as ov\n'), ((674, 711), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 8)', 'dtype': 'np.int32'}), '(shape=(2, 8), dtype=np.int32)\n', (681, 711), True, 'import numpy as np\n'), ((721, 742), 'openvino.runtime.Tensor', 'ov.Tensor', (['data_int32'], {}), '(data_int32)\n', (730, 742), True, 'import openvino.runtime as ov\n'), ((845, 866), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 8)'}), '(shape=(2, 8))\n', (852, 866), True, 'import numpy as np\n'), ((883, 927), 'openvino.runtime.Tensor', 'ov.Tensor', (['data_to_share'], {'shared_memory': '(True)'}), '(data_to_share, shared_memory=True)\n', (892, 927), True, 'import openvino.runtime as ov\n'), ((1242, 1278), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(3 + 1)'], {'size': '(8)'}), '(-5, 3 + 1, size=8)\n', (1259, 1278), True, 'import numpy as np\n'), ((2037, 2046), 'openvino.runtime.Core', 'ov.Core', ([], {}), '()\n', (2044, 2046), True, 'import openvino.runtime as ov\n'), ((2103, 2127), 'openvino.runtime.opset8.parameter', 'ov.opset8.parameter', (['[8]'], {}), '([8])\n', (2122, 2127), True, 'import openvino.runtime as ov\n'), ((2138, 2162), 'openvino.runtime.opset8.parameter', 'ov.opset8.parameter', (['[8]'], {}), '([8])\n', (2157, 2162), True, 'import openvino.runtime as ov\n'), ((2169, 2200), 'openvino.runtime.opset8.add', 'ov.opset8.add', (['input_a', 'input_b'], {}), '(input_a, input_b)\n', (2182, 2200), True, 'import openvino.runtime as ov\n'), ((2209, 2242), 'openvino.runtime.Model', 'ov.Model', (['res', '[input_a, input_b]'], {}), '(res, [input_a, input_b])\n', (2217, 2242), True, 'import openvino.runtime as ov\n'), ((2364, 2398), 'openvino.runtime.AsyncInferQueue', 'ov.AsyncInferQueue', (['compiled', 'jobs'], {}), '(compiled, jobs)\n', (2382, 2398), True, 'import openvino.runtime as ov\n'), ((3141, 3155), 'numpy.ones', 'np.ones', (['[100]'], {}), '([100])\n', (3148, 3155), True, 'import numpy as np\n'), ((3231, 3263), 'openvino.helpers.pack_data', 'pack_data', (['unt8_data', 'ov.Type.u4'], {}), '(unt8_data, ov.Type.u4)\n', (3240, 3263), False, 'from openvino.helpers import pack_data\n'), ((3312, 3358), 'openvino.runtime.Tensor', 'ov.Tensor', (['packed_buffer', '[1, 128]', 'ov.Type.u4'], {}), '(packed_buffer, [1, 128], ov.Type.u4)\n', (3321, 3358), True, 'import openvino.runtime as ov\n'), ((3451, 3495), 'openvino.helpers.unpack_data', 'unpack_data', (['t.data', 't.element_type', 't.shape'], {}), '(t.data, t.element_type, t.shape)\n', (3462, 3495), False, 'from openvino.helpers import unpack_data\n'), ((3503, 3543), 'numpy.array_equal', 'np.array_equal', (['unpacked_data', 'unt8_data'], {}), '(unpacked_data, unt8_data)\n', (3517, 3543), True, 'import numpy as np\n'), ((4100, 4109), 'openvino.runtime.Core', 'ov.Core', ([], {}), '()\n', (4107, 4109), True, 'import openvino.runtime as ov\n'), ((2422, 2457), 'numpy.array', 'np.array', (['([i] * 8)'], {'dtype': 'np.float32'}), '([i] * 8, dtype=np.float32)\n', (2430, 2457), True, 'import numpy as np\n'), ((3894, 3915), 'cv2.imread', 'cv.imread', (['image_path'], {}), '(image_path)\n', (3903, 3915), True, 'import cv2 as cv\n'), ((3962, 3986), 'cv2.resize', 'cv.resize', (['image', '(h, w)'], {}), '(image, (h, w))\n', (3971, 3986), True, 'import cv2 as cv\n'), ((4038, 4062), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (4052, 4062), True, 'import numpy as np\n')]
|
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
#
# Nonparametric Permutation Test
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from itertools import combinations
from math import factorial
try:
from nose.tools import nottest
except ImportError:
# Use a no-op decorator if nose is not available
def nottest(f):
return f
# decorator to prevent nose to consider
# this as a unit test due to "test" in the name
@nottest
def permutation_test(x, y, func='x_mean != y_mean', method='exact',
num_rounds=1000, seed=None):
"""
Nonparametric permutation test
Parameters
-------------
x : list or numpy array with shape (n_datapoints,)
A list or 1D numpy array of the first sample
(e.g., the treatment group).
y : list or numpy array with shape (n_datapoints,)
A list or 1D numpy array of the second sample
(e.g., the control group).
func : custom function or str (default: 'x_mean != y_mean')
function to compute the statistic for the permutation test.
- If 'x_mean != y_mean', uses
`func=lambda x, y: np.abs(np.mean(x) - np.mean(y)))`
for a two-sided test.
- If 'x_mean > y_mean', uses
`func=lambda x, y: np.mean(x) - np.mean(y))`
for a one-sided test.
- If 'x_mean < y_mean', uses
`func=lambda x, y: np.mean(y) - np.mean(x))`
for a one-sided test.
method : 'approximate' or 'exact' (default: 'exact')
If 'exact' (default), all possible permutations are considered.
If 'approximate' the number of drawn samples is
given by `num_rounds`.
Note that 'exact' is typically not feasible unless the dataset
size is relatively small.
num_rounds : int (default: 1000)
The number of permutation samples if `method='approximate'`.
seed : int or None (default: None)
The random seed for generating permutation samples if
`method='approximate'`.
Returns
----------
p-value under the null hypothesis
Examples
-----------
For usage examples, please see
http://rasbt.github.io/mlxtend/user_guide/evaluate/permutation_test/
"""
if method not in ('approximate', 'exact'):
raise AttributeError('method must be "approximate"'
' or "exact", got %s' % method)
if isinstance(func, str):
if func not in (
'x_mean != y_mean', 'x_mean > y_mean', 'x_mean < y_mean'):
raise AttributeError('Provide a custom function'
' lambda x,y: ... or a string'
' in ("x_mean != y_mean", '
'"x_mean > y_mean", "x_mean < y_mean")')
elif func == 'x_mean != y_mean':
def func(x, y):
return np.abs(np.mean(x) - np.mean(y))
elif func == 'x_mean > y_mean':
def func(x, y):
return np.mean(x) - np.mean(y)
else:
def func(x, y):
return np.mean(y) - np.mean(x)
rng = np.random.RandomState(seed)
m, n = len(x), len(y)
combined = np.hstack((x, y))
more_extreme = 0.
reference_stat = func(x, y)
# Note that whether we compute the combinations or permutations
# does not affect the results, since the number of permutations
# n_A specific objects in A and n_B specific objects in B is the
# same for all combinations in x_1, ... x_{n_A} and
# x_{n_{A+1}}, ... x_{n_A + n_B}
# In other words, for any given number of combinations, we get
# n_A! x n_B! times as many permutations; hoewever, the computed
# value of those permutations that are merely re-arranged combinations
# does not change. Hence, the result, since we divide by the number of
# combinations or permutations is the same, the permutations simply have
# "n_A! x n_B!" as a scaling factor in the numerator and denominator
# and using combinations instead of permutations simply saves computational
# time
if method == 'exact':
for indices_x in combinations(range(m + n), m):
indices_y = [i for i in range(m + n) if i not in indices_x]
diff = func(combined[list(indices_x)], combined[indices_y])
if diff > reference_stat:
more_extreme += 1.
num_rounds = factorial(m + n) / (factorial(m)*factorial(n))
else:
for i in range(num_rounds):
rng.shuffle(combined)
if func(combined[:m], combined[m:]) > reference_stat:
more_extreme += 1.
return more_extreme / num_rounds
|
[
"numpy.mean",
"math.factorial",
"numpy.random.RandomState",
"numpy.hstack"
] |
[((3160, 3187), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3181, 3187), True, 'import numpy as np\n'), ((3230, 3247), 'numpy.hstack', 'np.hstack', (['(x, y)'], {}), '((x, y))\n', (3239, 3247), True, 'import numpy as np\n'), ((4453, 4469), 'math.factorial', 'factorial', (['(m + n)'], {}), '(m + n)\n', (4462, 4469), False, 'from math import factorial\n'), ((4473, 4485), 'math.factorial', 'factorial', (['m'], {}), '(m)\n', (4482, 4485), False, 'from math import factorial\n'), ((4486, 4498), 'math.factorial', 'factorial', (['n'], {}), '(n)\n', (4495, 4498), False, 'from math import factorial\n'), ((2918, 2928), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2925, 2928), True, 'import numpy as np\n'), ((2931, 2941), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (2938, 2941), True, 'import numpy as np\n'), ((3035, 3045), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3042, 3045), True, 'import numpy as np\n'), ((3048, 3058), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (3055, 3058), True, 'import numpy as np\n'), ((3125, 3135), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (3132, 3135), True, 'import numpy as np\n'), ((3138, 3148), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3145, 3148), True, 'import numpy as np\n')]
|
# OLD USAGE
# python align_faces.py --shape-predictor shape_predictor_68_face_landmarks.dat --image images/example_01.jpg
# import the necessary packages
from imutils.face_utils import FaceAligner
from PIL import Image
import numpy as np
# import argparse
import imutils
import dlib
import cv2
# construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("--shape-predictor", help="path to facial landmark predictor", default='shape_predictor_68_face_landmarks.dat')
# ap.add_argument("--input", help="path to input images", default='input_raw')
# ap.add_argument("--output", help="path to input images", default='input_aligned')
# args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor and the face aligner
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
fa = FaceAligner(predictor, desiredFaceWidth=256,
desiredLeftEye=(0.371, 0.480))
# Input: numpy array for image with RGB channels
# Output: (numpy array, face_found)
def align_face(img):
img = img[:, :, ::-1] # Convert from RGB to BGR format
img = imutils.resize(img, width=800)
# detect faces in the grayscale image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
if len(rects) > 0:
# align the face using facial landmarks
align_img = fa.align(img, gray, rects[0])[:, :, ::-1]
align_img = np.array(Image.fromarray(align_img).convert('RGB'))
return align_img, True
else:
# No face found
return None, False
# Input: img_path
# Output: aligned_img if face_found, else None
def align(img_path):
img = Image.open(img_path)
img = img.convert('RGB') # if image is RGBA or Grayscale etc
img = np.array(img)
x, face_found = align_face(img)
return x
|
[
"cv2.cvtColor",
"PIL.Image.open",
"PIL.Image.fromarray",
"numpy.array",
"dlib.get_frontal_face_detector",
"imutils.resize",
"dlib.shape_predictor",
"imutils.face_utils.FaceAligner"
] |
[((836, 868), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (866, 868), False, 'import dlib\n'), ((881, 942), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (901, 942), False, 'import dlib\n'), ((948, 1022), 'imutils.face_utils.FaceAligner', 'FaceAligner', (['predictor'], {'desiredFaceWidth': '(256)', 'desiredLeftEye': '(0.371, 0.48)'}), '(predictor, desiredFaceWidth=256, desiredLeftEye=(0.371, 0.48))\n', (959, 1022), False, 'from imutils.face_utils import FaceAligner\n'), ((1219, 1249), 'imutils.resize', 'imutils.resize', (['img'], {'width': '(800)'}), '(img, width=800)\n', (1233, 1249), False, 'import imutils\n'), ((1304, 1341), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1316, 1341), False, 'import cv2\n'), ((1767, 1787), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1777, 1787), False, 'from PIL import Image\n'), ((1864, 1877), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1872, 1877), True, 'import numpy as np\n'), ((1535, 1561), 'PIL.Image.fromarray', 'Image.fromarray', (['align_img'], {}), '(align_img)\n', (1550, 1561), False, 'from PIL import Image\n')]
|
from styx_msgs.msg import TrafficLight
import cv2
import numpy as np
class TLClassifier(object):
def __init__(self):
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
img_blur = cv2.medianBlur(image,3)
img_hsv = cv2.cvtColor(img_blur,cv2.COLOR_BGR2HSV)
red_lower_range = cv2.inRange(hsv_image, np.array([0, 100, 100],np.uint8), np.array([10, 255, 255],np.uint8))
red_upper_range = cv2.inRange(hsv_image, np.array([160, 100, 100],np.uint8), np.array([179, 255, 255],np.uint8))
yellow_range = cv2.inRange(hsv_image, np.array([28, 120, 120],np.uint8), np.array([47, 255, 255],np.uint8))
if cv2.countNonZero(red_lower_range) + cv2.countNonZero(red_upper_range) > 48 or cv2.countNonZero(yellow_range) > 48:
return TrafficLight.RED
else:
return TrafficLight.GREEN
# return TrafficLight.UNKNOWN
|
[
"cv2.cvtColor",
"cv2.countNonZero",
"numpy.array",
"cv2.medianBlur"
] |
[((453, 477), 'cv2.medianBlur', 'cv2.medianBlur', (['image', '(3)'], {}), '(image, 3)\n', (467, 477), False, 'import cv2\n'), ((495, 536), 'cv2.cvtColor', 'cv2.cvtColor', (['img_blur', 'cv2.COLOR_BGR2HSV'], {}), '(img_blur, cv2.COLOR_BGR2HSV)\n', (507, 536), False, 'import cv2\n'), ((586, 619), 'numpy.array', 'np.array', (['[0, 100, 100]', 'np.uint8'], {}), '([0, 100, 100], np.uint8)\n', (594, 619), True, 'import numpy as np\n'), ((620, 654), 'numpy.array', 'np.array', (['[10, 255, 255]', 'np.uint8'], {}), '([10, 255, 255], np.uint8)\n', (628, 654), True, 'import numpy as np\n'), ((704, 739), 'numpy.array', 'np.array', (['[160, 100, 100]', 'np.uint8'], {}), '([160, 100, 100], np.uint8)\n', (712, 739), True, 'import numpy as np\n'), ((740, 775), 'numpy.array', 'np.array', (['[179, 255, 255]', 'np.uint8'], {}), '([179, 255, 255], np.uint8)\n', (748, 775), True, 'import numpy as np\n'), ((822, 856), 'numpy.array', 'np.array', (['[28, 120, 120]', 'np.uint8'], {}), '([28, 120, 120], np.uint8)\n', (830, 856), True, 'import numpy as np\n'), ((857, 891), 'numpy.array', 'np.array', (['[47, 255, 255]', 'np.uint8'], {}), '([47, 255, 255], np.uint8)\n', (865, 891), True, 'import numpy as np\n'), ((982, 1012), 'cv2.countNonZero', 'cv2.countNonZero', (['yellow_range'], {}), '(yellow_range)\n', (998, 1012), False, 'import cv2\n'), ((904, 937), 'cv2.countNonZero', 'cv2.countNonZero', (['red_lower_range'], {}), '(red_lower_range)\n', (920, 937), False, 'import cv2\n'), ((940, 973), 'cv2.countNonZero', 'cv2.countNonZero', (['red_upper_range'], {}), '(red_upper_range)\n', (956, 973), False, 'import cv2\n')]
|
"""
Script to compute dci score of learned representation.
"""
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
from absl import flags, app
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from disentanglement_lib.evaluation.metrics import dci
from disentanglement_lib.visualize import visualize_scores
import os
FLAGS = flags.FLAGS
flags.DEFINE_string('c_path', '', 'File path for underlying factors c')
flags.DEFINE_string('assign_mat_path', 'data/hirid/assign_mats/hirid_assign_mat.npy', 'Path for assignment matrix')
flags.DEFINE_string('model_name', '', 'Name of model directory to get learned latent code')
flags.DEFINE_enum('data_type_dci', 'dsprites', ['hmnist', 'physionet', 'hirid', 'sprites', 'dsprites', 'smallnorb', 'cars3d', 'shapes3d'], 'Type of data and how to evaluate')
flags.DEFINE_list('score_factors', [], 'Underlying factors to consider in DCI score calculation')
flags.DEFINE_enum('rescaling', 'linear', ['linear', 'standard'], 'Rescaling of ground truth factors')
flags.DEFINE_bool('shuffle', False, 'Whether or not to shuffle evaluation data.')
flags.DEFINE_integer('dci_seed', 42, 'Random seed.')
flags.DEFINE_bool('visualize_score', False, 'Whether or not to visualize score')
flags.DEFINE_bool('save_score', False, 'Whether or not to save calculated score')
def load_z_c(c_path, z_path):
try:
c_full = np.load(c_path)['factors_test']
except IndexError:
c_full = np.load(c_path)
z = np.load(z_path)
c = c_full
return c, z
def main(argv, model_dir=None):
del argv # Unused
if model_dir is None:
out_dir = FLAGS.model_name
else:
out_dir = model_dir
z_path = '{}/z_mean.npy'.format(out_dir)
if FLAGS.c_path == '':
if FLAGS.data_type_dci != 'hirid':
c_path = os.path.join(F'/data/{FLAGS.data_type_dci}', F'factors_{FLAGS.data_type_dci}.npz')
else:
c_path = os.path.join(F'/data/{FLAGS.data_type_dci}', F'{FLAGS.data_type_dci}.npz')
else:
c_path = FLAGS.c_path
if FLAGS.data_type_dci == "physionet":
# Use imputed values as ground truth for physionet data
c, z = load_z_c('{}/imputed.npy'.format(out_dir), z_path)
c = np.transpose(c, (0,2,1))
elif FLAGS.data_type_dci == "hirid":
c = np.load(c_path)['x_test_miss']
c = np.transpose(c, (0, 2, 1))
c = c.astype(int)
z = np.load(z_path)
else:
c, z = load_z_c(c_path, z_path)
z_shape = z.shape
c_shape = c.shape
z_reshape = np.reshape(np.transpose(z, (0,2,1)),(z_shape[0]*z_shape[2],z_shape[1]))
c_reshape = np.reshape(np.transpose(c, (0,2,1)),(c_shape[0]*c_shape[2],c_shape[1]))
c_reshape = c_reshape[:z_reshape.shape[0], ...]
# Experimental physionet rescaling
if FLAGS.data_type_dci == 'physionet':
if FLAGS.rescaling == 'linear':
# linear rescaling
c_rescale = 10 * c_reshape
c_reshape = c_rescale.astype(int)
elif FLAGS.rescaling == 'standard':
# standardizing
scaler = StandardScaler()
c_rescale = scaler.fit_transform(c_reshape)
c_reshape = (10*c_rescale).astype(int)
else:
raise ValueError("Rescaling must be 'linear' or 'standard'")
# Include all factors in score calculation, if not specified otherwise
if not FLAGS.score_factors:
FLAGS.score_factors = np.arange(c_shape[1]).astype(str)
# Check if ground truth factor doesn't change and remove if is the case
mask = np.ones(c_reshape.shape[1], dtype=bool)
for i in range(c_reshape.shape[1]):
c_change = np.sum(abs(np.diff(c_reshape[:8000,i])))
if (not c_change) or (F"{i}" not in FLAGS.score_factors):
mask[i] = False
c_reshape = c_reshape[:,mask]
print(F'C shape: {c_reshape.shape}')
print(F'Z shape: {z_reshape.shape}')
print(F'Shuffle: {FLAGS.shuffle}')
c_train, c_test, z_train, z_test = train_test_split(c_reshape, z_reshape, test_size=0.2, shuffle=FLAGS.shuffle, random_state=FLAGS.dci_seed)
if FLAGS.data_type_dci == "hirid":
n_train = 20000
n_test = 5000
else:
n_train = 8000
n_test = 2000
importance_matrix, i_train, i_test = dci.compute_importance_gbt(
z_train[:n_train, :].transpose(),
c_train[:n_train, :].transpose().astype(int),
z_test[:n_test, :].transpose(), c_test[:n_test, :].transpose().astype(int))
# Calculate scores
d = dci.disentanglement(importance_matrix)
c = dci.completeness(importance_matrix)
print(F'D: {d}')
print(F'C: {c}')
print(F'I: {i_test}')
if FLAGS.data_type_dci in ['hirid', 'physionet']:
miss_idxs = np.nonzero(np.invert(mask))[0]
for idx in miss_idxs:
importance_matrix = np.insert(importance_matrix,
idx,
0, axis=1)
assign_mat = np.load(FLAGS.assign_mat_path)
impt_mat_assign = np.matmul(importance_matrix, assign_mat)
impt_mat_assign_norm = np.nan_to_num(
impt_mat_assign / np.sum(impt_mat_assign, axis=0))
d_assign = dci.disentanglement(impt_mat_assign_norm)
c_assign = dci.completeness(impt_mat_assign_norm)
print(F'D assign: {d_assign}')
print(F'C assign: {c_assign}')
if FLAGS.save_score:
if FLAGS.data_type_dci in ['hirid', 'physionet']:
np.savez(F'{out_dir}/dci_assign_2_{FLAGS.dci_seed}', informativeness_train=i_train, informativeness_test=i_test,
disentanglement=d, completeness=c,
disentanglement_assign=d_assign, completeness_assign=c_assign)
else:
np.savez(F'{out_dir}/dci_{FLAGS.dci_seed}', informativeness_train=i_train, informativeness_test=i_test,
disentanglement=d, completeness=c)
# Visualization
if FLAGS.visualize_score:
if FLAGS.data_type_dci == 'hirid':
# Visualize
visualize_scores.heat_square(np.transpose(importance_matrix), out_dir,
F"dci_matrix_{FLAGS.dci_seed}",
"feature", "latent dim")
visualize_scores.heat_square(np.transpose(impt_mat_assign_norm), out_dir,
F"dci_matrix_assign_{FLAGS.dci_seed}",
"feature", "latent_dim")
# Save importance matrices
if FLAGS.save_score:
np.save(F"{out_dir}/impt_matrix_{FLAGS.dci_seed}", importance_matrix)
np.save(F"{out_dir}/impt_matrix_assign_{FLAGS.dci_seed}", impt_mat_assign_norm)
else:
# Visualize
visualize_scores.heat_square(importance_matrix, out_dir,
F"dci_matrix_{FLAGS.dci_seed}",
"x_axis", "y_axis")
# Save importance matrices
np.save(F"{out_dir}/impt_matrix_{FLAGS.dci_seed}", importance_matrix)
print("Evaluation finished")
if __name__ == '__main__':
app.run(main)
|
[
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.invert",
"sklearn.model_selection.train_test_split",
"numpy.ones",
"numpy.arange",
"absl.flags.DEFINE_list",
"os.path.join",
"warnings.simplefilter",
"disentanglement_lib.visualize.visualize_scores.heat_square",
"absl.flags.DEFINE_bool",
"numpy.transpose",
"numpy.insert",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_enum",
"numpy.save",
"numpy.savez",
"disentanglement_lib.evaluation.metrics.dci.completeness",
"absl.flags.DEFINE_string",
"absl.app.run",
"numpy.diff",
"numpy.matmul",
"disentanglement_lib.evaluation.metrics.dci.disentanglement"
] |
[((79, 141), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (100, 141), False, 'import warnings\n'), ((438, 509), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""c_path"""', '""""""', '"""File path for underlying factors c"""'], {}), "('c_path', '', 'File path for underlying factors c')\n", (457, 509), False, 'from absl import flags, app\n'), ((510, 634), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""assign_mat_path"""', '"""data/hirid/assign_mats/hirid_assign_mat.npy"""', '"""Path for assignment matrix"""'], {}), "('assign_mat_path',\n 'data/hirid/assign_mats/hirid_assign_mat.npy', 'Path for assignment matrix'\n )\n", (529, 634), False, 'from absl import flags, app\n'), ((626, 721), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model_name"""', '""""""', '"""Name of model directory to get learned latent code"""'], {}), "('model_name', '',\n 'Name of model directory to get learned latent code')\n", (645, 721), False, 'from absl import flags, app\n'), ((718, 900), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""data_type_dci"""', '"""dsprites"""', "['hmnist', 'physionet', 'hirid', 'sprites', 'dsprites', 'smallnorb',\n 'cars3d', 'shapes3d']", '"""Type of data and how to evaluate"""'], {}), "('data_type_dci', 'dsprites', ['hmnist', 'physionet',\n 'hirid', 'sprites', 'dsprites', 'smallnorb', 'cars3d', 'shapes3d'],\n 'Type of data and how to evaluate')\n", (735, 900), False, 'from absl import flags, app\n'), ((893, 994), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""score_factors"""', '[]', '"""Underlying factors to consider in DCI score calculation"""'], {}), "('score_factors', [],\n 'Underlying factors to consider in DCI score calculation')\n", (910, 994), False, 'from absl import flags, app\n'), ((991, 1096), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""rescaling"""', '"""linear"""', "['linear', 'standard']", '"""Rescaling of ground truth factors"""'], {}), "('rescaling', 'linear', ['linear', 'standard'],\n 'Rescaling of ground truth factors')\n", (1008, 1096), False, 'from absl import flags, app\n'), ((1093, 1178), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""shuffle"""', '(False)', '"""Whether or not to shuffle evaluation data."""'], {}), "('shuffle', False,\n 'Whether or not to shuffle evaluation data.')\n", (1110, 1178), False, 'from absl import flags, app\n'), ((1175, 1227), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""dci_seed"""', '(42)', '"""Random seed."""'], {}), "('dci_seed', 42, 'Random seed.')\n", (1195, 1227), False, 'from absl import flags, app\n'), ((1228, 1313), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""visualize_score"""', '(False)', '"""Whether or not to visualize score"""'], {}), "('visualize_score', False, 'Whether or not to visualize score'\n )\n", (1245, 1313), False, 'from absl import flags, app\n'), ((1309, 1394), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""save_score"""', '(False)', '"""Whether or not to save calculated score"""'], {}), "('save_score', False,\n 'Whether or not to save calculated score')\n", (1326, 1394), False, 'from absl import flags, app\n'), ((1544, 1559), 'numpy.load', 'np.load', (['z_path'], {}), '(z_path)\n', (1551, 1559), True, 'import numpy as np\n'), ((3634, 3673), 'numpy.ones', 'np.ones', (['c_reshape.shape[1]'], {'dtype': 'bool'}), '(c_reshape.shape[1], dtype=bool)\n', (3641, 3673), True, 'import numpy as np\n'), ((4063, 4172), 'sklearn.model_selection.train_test_split', 'train_test_split', (['c_reshape', 'z_reshape'], {'test_size': '(0.2)', 'shuffle': 'FLAGS.shuffle', 'random_state': 'FLAGS.dci_seed'}), '(c_reshape, z_reshape, test_size=0.2, shuffle=FLAGS.shuffle,\n random_state=FLAGS.dci_seed)\n', (4079, 4172), False, 'from sklearn.model_selection import train_test_split\n'), ((4591, 4629), 'disentanglement_lib.evaluation.metrics.dci.disentanglement', 'dci.disentanglement', (['importance_matrix'], {}), '(importance_matrix)\n', (4610, 4629), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((4638, 4673), 'disentanglement_lib.evaluation.metrics.dci.completeness', 'dci.completeness', (['importance_matrix'], {}), '(importance_matrix)\n', (4654, 4673), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((7256, 7269), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (7263, 7269), False, 'from absl import flags, app\n'), ((2304, 2330), 'numpy.transpose', 'np.transpose', (['c', '(0, 2, 1)'], {}), '(c, (0, 2, 1))\n', (2316, 2330), True, 'import numpy as np\n'), ((2629, 2655), 'numpy.transpose', 'np.transpose', (['z', '(0, 2, 1)'], {}), '(z, (0, 2, 1))\n', (2641, 2655), True, 'import numpy as np\n'), ((2717, 2743), 'numpy.transpose', 'np.transpose', (['c', '(0, 2, 1)'], {}), '(c, (0, 2, 1))\n', (2729, 2743), True, 'import numpy as np\n'), ((5060, 5090), 'numpy.load', 'np.load', (['FLAGS.assign_mat_path'], {}), '(FLAGS.assign_mat_path)\n', (5067, 5090), True, 'import numpy as np\n'), ((5117, 5157), 'numpy.matmul', 'np.matmul', (['importance_matrix', 'assign_mat'], {}), '(importance_matrix, assign_mat)\n', (5126, 5157), True, 'import numpy as np\n'), ((5286, 5327), 'disentanglement_lib.evaluation.metrics.dci.disentanglement', 'dci.disentanglement', (['impt_mat_assign_norm'], {}), '(impt_mat_assign_norm)\n', (5305, 5327), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((5347, 5385), 'disentanglement_lib.evaluation.metrics.dci.completeness', 'dci.completeness', (['impt_mat_assign_norm'], {}), '(impt_mat_assign_norm)\n', (5363, 5385), False, 'from disentanglement_lib.evaluation.metrics import dci\n'), ((1448, 1463), 'numpy.load', 'np.load', (['c_path'], {}), '(c_path)\n', (1455, 1463), True, 'import numpy as np\n'), ((1520, 1535), 'numpy.load', 'np.load', (['c_path'], {}), '(c_path)\n', (1527, 1535), True, 'import numpy as np\n'), ((1885, 1971), 'os.path.join', 'os.path.join', (['f"""/data/{FLAGS.data_type_dci}"""', 'f"""factors_{FLAGS.data_type_dci}.npz"""'], {}), "(f'/data/{FLAGS.data_type_dci}',\n f'factors_{FLAGS.data_type_dci}.npz')\n", (1897, 1971), False, 'import os\n'), ((2003, 2077), 'os.path.join', 'os.path.join', (['f"""/data/{FLAGS.data_type_dci}"""', 'f"""{FLAGS.data_type_dci}.npz"""'], {}), "(f'/data/{FLAGS.data_type_dci}', f'{FLAGS.data_type_dci}.npz')\n", (2015, 2077), False, 'import os\n'), ((2425, 2451), 'numpy.transpose', 'np.transpose', (['c', '(0, 2, 1)'], {}), '(c, (0, 2, 1))\n', (2437, 2451), True, 'import numpy as np\n'), ((2490, 2505), 'numpy.load', 'np.load', (['z_path'], {}), '(z_path)\n', (2497, 2505), True, 'import numpy as np\n'), ((4910, 4954), 'numpy.insert', 'np.insert', (['importance_matrix', 'idx', '(0)'], {'axis': '(1)'}), '(importance_matrix, idx, 0, axis=1)\n', (4919, 4954), True, 'import numpy as np\n'), ((5560, 5779), 'numpy.savez', 'np.savez', (['f"""{out_dir}/dci_assign_2_{FLAGS.dci_seed}"""'], {'informativeness_train': 'i_train', 'informativeness_test': 'i_test', 'disentanglement': 'd', 'completeness': 'c', 'disentanglement_assign': 'd_assign', 'completeness_assign': 'c_assign'}), "(f'{out_dir}/dci_assign_2_{FLAGS.dci_seed}', informativeness_train=\n i_train, informativeness_test=i_test, disentanglement=d, completeness=c,\n disentanglement_assign=d_assign, completeness_assign=c_assign)\n", (5568, 5779), True, 'import numpy as np\n'), ((5839, 5981), 'numpy.savez', 'np.savez', (['f"""{out_dir}/dci_{FLAGS.dci_seed}"""'], {'informativeness_train': 'i_train', 'informativeness_test': 'i_test', 'disentanglement': 'd', 'completeness': 'c'}), "(f'{out_dir}/dci_{FLAGS.dci_seed}', informativeness_train=i_train,\n informativeness_test=i_test, disentanglement=d, completeness=c)\n", (5847, 5981), True, 'import numpy as np\n'), ((6877, 6989), 'disentanglement_lib.visualize.visualize_scores.heat_square', 'visualize_scores.heat_square', (['importance_matrix', 'out_dir', 'f"""dci_matrix_{FLAGS.dci_seed}"""', '"""x_axis"""', '"""y_axis"""'], {}), "(importance_matrix, out_dir,\n f'dci_matrix_{FLAGS.dci_seed}', 'x_axis', 'y_axis')\n", (6905, 6989), False, 'from disentanglement_lib.visualize import visualize_scores\n'), ((7119, 7188), 'numpy.save', 'np.save', (['f"""{out_dir}/impt_matrix_{FLAGS.dci_seed}"""', 'importance_matrix'], {}), "(f'{out_dir}/impt_matrix_{FLAGS.dci_seed}', importance_matrix)\n", (7126, 7188), True, 'import numpy as np\n'), ((2382, 2397), 'numpy.load', 'np.load', (['c_path'], {}), '(c_path)\n', (2389, 2397), True, 'import numpy as np\n'), ((3162, 3178), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3176, 3178), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3512, 3533), 'numpy.arange', 'np.arange', (['c_shape[1]'], {}), '(c_shape[1])\n', (3521, 3533), True, 'import numpy as np\n'), ((3744, 3772), 'numpy.diff', 'np.diff', (['c_reshape[:8000, i]'], {}), '(c_reshape[:8000, i])\n', (3751, 3772), True, 'import numpy as np\n'), ((4828, 4843), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (4837, 4843), True, 'import numpy as np\n'), ((5234, 5265), 'numpy.sum', 'np.sum', (['impt_mat_assign'], {'axis': '(0)'}), '(impt_mat_assign, axis=0)\n', (5240, 5265), True, 'import numpy as np\n'), ((6158, 6189), 'numpy.transpose', 'np.transpose', (['importance_matrix'], {}), '(importance_matrix)\n', (6170, 6189), True, 'import numpy as np\n'), ((6380, 6414), 'numpy.transpose', 'np.transpose', (['impt_mat_assign_norm'], {}), '(impt_mat_assign_norm)\n', (6392, 6414), True, 'import numpy as np\n'), ((6660, 6729), 'numpy.save', 'np.save', (['f"""{out_dir}/impt_matrix_{FLAGS.dci_seed}"""', 'importance_matrix'], {}), "(f'{out_dir}/impt_matrix_{FLAGS.dci_seed}', importance_matrix)\n", (6667, 6729), True, 'import numpy as np\n'), ((6746, 6825), 'numpy.save', 'np.save', (['f"""{out_dir}/impt_matrix_assign_{FLAGS.dci_seed}"""', 'impt_mat_assign_norm'], {}), "(f'{out_dir}/impt_matrix_assign_{FLAGS.dci_seed}', impt_mat_assign_norm)\n", (6753, 6825), True, 'import numpy as np\n')]
|
import numpy as np
import math as math
import cv2
def get_ideal_low_pass_filter( shape, cutoff,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) * (i - (h / 2)) + (j - (w / 2)) * (j - (w / 2)))
if distance >= cutoff-(width/2) and distance <= cutoff+(width/2):
mask_image[i][j] = 0
else:
mask_image[i][j] = 1
return mask_image
def get_ideal_high_pass_filter( shape, cutoff,width):
mask_image = 1 - get_ideal_low_pass_filter(shape, cutoff,width)
return mask_image
def get_butterworth_low_pass_filter( shape, cutoff,order,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) ** 2 + ((j - (w / 2)) ** 2))
if distance ** 2 - cutoff ** 2 == 0:
mask_image[i][j] = 0
else:
mask_image[i][j] = 1 / (1 + (((distance * width) / (distance ** 2 - cutoff ** 2)) ** (2 * order)))
return mask_image
def get_butterworth_high_pass_filter( shape, cutoff,order,width):
mask_image = 1 - get_butterworth_low_pass_filter(shape, cutoff,order,width)
return mask_image
def get_gaussian_low_pass_filter(shape, cutoff,width):
[h, w] = shape
mask_image = np.zeros((h, w))
for i in range(h):
for j in range(w):
distance = math.sqrt((i - (h / 2)) ** 2 + ((j - (w / 2)) ** 2))
if (distance == 0):
mask_image[i][j] = 0
else:
mask_image[i][j] = 1 - math.exp(-(((distance ** 2 - cutoff ** 2) / (distance * width)) ** 2))
return mask_image
def get_gaussian_high_pass_filter(shape, cutoff,width):
mask_image = 1 - get_gaussian_low_pass_filter(shape, cutoff,width)
return mask_image
def post_process_image(image):
c_min = np.min(image)
c_max = np.max(image)
new_min = 0
new_max = 255
stretch_image = np.zeros((np.shape(image)), dtype=np.uint8)
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
stretch_image[i][j] = (image[i][j] - c_min) * ((new_max - new_min) / (c_max - c_min)) + new_min
return stretch_image
def filtering_band_filter(image,cutoff,width,filtertype):
s = image.shape
fft_image = np.fft.fft2(image)
shift_image = np.fft.fftshift(fft_image)
dft_image = np.uint8(np.log(np.absolute(shift_image)) * 10)
if filtertype=='Ideal Low Pass' :
mask = get_ideal_low_pass_filter(s, cutoff,width)
elif filtertype=='Ideal High Pass' :
mask=get_ideal_high_pass_filter(s, cutoff,width)
elif filtertype=='Gaussain Low Pass':
mask=get_gaussian_low_pass_filter(s,cutoff,width)
elif filtertype=='Gaussian High Pass':
mask=get_gaussian_high_pass_filter(s,cutoff,width)
elif filtertype=='Butterworth Low Pass':
mask=get_butterworth_low_pass_filter(s,cutoff,width,order=2)
else:
mask=0
filter_image = shift_image * mask
filter_finalimg =np.uint8(np.log(np.absolute(filter_image))*10)
ishift_image = np.fft.ifftshift(filter_image)
ifft_image = np.fft.ifft2(ishift_image)
mag_image = np.absolute(ifft_image)
f = post_process_image(mag_image)
return [f,filter_finalimg]
def filtering_band_filter_order(image,cutoff,order,width,filtertype):
s = image.shape
fft_image = np.fft.fft2(image)
shift_image = np.fft.fftshift(fft_image)
dft_image = np.uint8(np.log(np.absolute(shift_image)) * 10)
if filtertype=='Butterworth Low Pass':
mask=get_butterworth_low_pass_filter(s,cutoff,order,width)
else:
mask=get_butterworth_high_pass_filter(s,cutoff,order,width)
filter_image = shift_image * (mask*200)
# filter_finalimg1= np.log(np.absolute(filter_image)) * 10
filter_finalimg = np.uint8(np.log(np.absolute(filter_image)) * 10)
# cv2.imshow("ButterLow",filter_finalimg)
#cv2.waitKey(0)
ishift_image = np.fft.ifftshift(filter_image)
ifft_image = np.fft.ifft2(ishift_image)
mag_image = np.absolute(ifft_image)
f = post_process_image(mag_image)
return [f,filter_finalimg]
|
[
"numpy.fft.ifftshift",
"numpy.absolute",
"math.exp",
"math.sqrt",
"numpy.zeros",
"numpy.shape",
"numpy.min",
"numpy.max",
"numpy.fft.fftshift",
"numpy.fft.fft2",
"numpy.fft.ifft2"
] |
[((143, 159), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (151, 159), True, 'import numpy as np\n'), ((747, 763), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (755, 763), True, 'import numpy as np\n'), ((1395, 1411), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (1403, 1411), True, 'import numpy as np\n'), ((1952, 1965), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (1958, 1965), True, 'import numpy as np\n'), ((1978, 1991), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1984, 1991), True, 'import numpy as np\n'), ((2402, 2420), 'numpy.fft.fft2', 'np.fft.fft2', (['image'], {}), '(image)\n', (2413, 2420), True, 'import numpy as np\n'), ((2439, 2465), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_image'], {}), '(fft_image)\n', (2454, 2465), True, 'import numpy as np\n'), ((3192, 3222), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['filter_image'], {}), '(filter_image)\n', (3208, 3222), True, 'import numpy as np\n'), ((3240, 3266), 'numpy.fft.ifft2', 'np.fft.ifft2', (['ishift_image'], {}), '(ishift_image)\n', (3252, 3266), True, 'import numpy as np\n'), ((3283, 3306), 'numpy.absolute', 'np.absolute', (['ifft_image'], {}), '(ifft_image)\n', (3294, 3306), True, 'import numpy as np\n'), ((3484, 3502), 'numpy.fft.fft2', 'np.fft.fft2', (['image'], {}), '(image)\n', (3495, 3502), True, 'import numpy as np\n'), ((3521, 3547), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft_image'], {}), '(fft_image)\n', (3536, 3547), True, 'import numpy as np\n'), ((4064, 4094), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['filter_image'], {}), '(filter_image)\n', (4080, 4094), True, 'import numpy as np\n'), ((4112, 4138), 'numpy.fft.ifft2', 'np.fft.ifft2', (['ishift_image'], {}), '(ishift_image)\n', (4124, 4138), True, 'import numpy as np\n'), ((4155, 4178), 'numpy.absolute', 'np.absolute', (['ifft_image'], {}), '(ifft_image)\n', (4166, 4178), True, 'import numpy as np\n'), ((2056, 2071), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2064, 2071), True, 'import numpy as np\n'), ((233, 297), 'math.sqrt', 'math.sqrt', (['((i - h / 2) * (i - h / 2) + (j - w / 2) * (j - w / 2))'], {}), '((i - h / 2) * (i - h / 2) + (j - w / 2) * (j - w / 2))\n', (242, 297), True, 'import math as math\n'), ((837, 883), 'math.sqrt', 'math.sqrt', (['((i - h / 2) ** 2 + (j - w / 2) ** 2)'], {}), '((i - h / 2) ** 2 + (j - w / 2) ** 2)\n', (846, 883), True, 'import math as math\n'), ((1485, 1531), 'math.sqrt', 'math.sqrt', (['((i - h / 2) ** 2 + (j - w / 2) ** 2)'], {}), '((i - h / 2) ** 2 + (j - w / 2) ** 2)\n', (1494, 1531), True, 'import math as math\n'), ((2498, 2522), 'numpy.absolute', 'np.absolute', (['shift_image'], {}), '(shift_image)\n', (2509, 2522), True, 'import numpy as np\n'), ((3141, 3166), 'numpy.absolute', 'np.absolute', (['filter_image'], {}), '(filter_image)\n', (3152, 3166), True, 'import numpy as np\n'), ((3580, 3604), 'numpy.absolute', 'np.absolute', (['shift_image'], {}), '(shift_image)\n', (3591, 3604), True, 'import numpy as np\n'), ((3945, 3970), 'numpy.absolute', 'np.absolute', (['filter_image'], {}), '(filter_image)\n', (3956, 3970), True, 'import numpy as np\n'), ((1664, 1732), 'math.exp', 'math.exp', (['(-((distance ** 2 - cutoff ** 2) / (distance * width)) ** 2)'], {}), '(-((distance ** 2 - cutoff ** 2) / (distance * width)) ** 2)\n', (1672, 1732), True, 'import math as math\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by <NAME> at 2019-09-02
"""Step_simulate.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import cobra
import matplotlib.pyplot as plt
import numpy as np
os.chdir('../../ComplementaryData/Step_03_Compare_Refine/')
print('----- loading model -----')
iHL622 = cobra.io.load_json_model('../../ModelFiles/iHL622.json')
# %% <biomass vs od >
print('----- change medium -----')
iHL622.objective = "BIOMASS"
experiment_group = ['A', 'B', 'C', 'D', 'E']
experiment_result = [1.38, 1.88, 1.92, 1.92, 1.90]
experiment_result_err = [0.66, 0.35, 0.69, 0.37, 0.47]
experiment_medium = {
'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.],
'EX_lac__L_e': [18.215, 21.334, 20.882, 17.881, 16.577],
'EX_ac_e': [17.058, 18.301, 18.285, 19.703, 19.643],
'EX_etoh_e': [5.135, 4.623, 4.312, 2.558, 2.230],
}
# for k in experiment_medium.keys(): # g/L --> mM
# temp = np.array(experiment_medium[k])*1000/iHL622.metabolites.get_by_id(k.replace('EX_','')).formula_weight
# experiment_medium[k] = temp
predict_result = []
for i in range(0, len(experiment_result)):
model = iHL622.copy()
for rea in experiment_medium.keys():
bound = experiment_medium[rea][i]
if bound <= 0:
model.reactions.get_by_id(rea).bounds = (bound, 0)
elif bound >= 0:
model.reactions.get_by_id(rea).bounds = (0, bound)
sol = model.optimize()
predict_result.append(round(sol.objective_value, 3))
print('Experiment Biomass:', experiment_result)
print('iHL622 Biomass:', predict_result)
# %% <vitmin B12 > NOTE: error
# experiment_medium = {
# 'BIOMASS': predict_result,
# 'EX_glc__D_e': [-20, -20, -20, -20, -20, ],
# 'EX_glyc_e': [0.00, -5.00, -5.00, -10.00, -10.],
# 'EX_fru_e': [0.00, -1.00, -5.00, -1.00, -5.], }
#
# predict_result_b12 = []
# for i in range(0, len(experiment_result)):
# model = iHL622.copy()
# rea = cobra.Reaction('EX_adeadocbl_c')
# model.add_reaction(rea)
# model.reactions.get_by_id('EX_adeadocbl_c').reaction = 'adeadocbl_c --> '
# model.objective = 'EX_adeadocbl_c'
# # model.reactions.get_by_id('EX_ade_e').bounds = (0,0)
# for rea in experiment_medium.keys():
# bound = experiment_medium[rea][i]
# if rea == 'BIOMASS':
# model.reactions.get_by_id(rea).bounds = (bound, bound)
#
# elif bound <= 0:
# model.reactions.get_by_id(rea).bounds = (bound, 0)
# elif bound >= 0:
# model.reactions.get_by_id(rea).bounds = (0, bound)
# predict_result_b12.append(
# round(model.optimize().objective_value * 1355.365, 3)) # Cobalamin: Molar mass: 1,355.365 g/mol
# print('iHL622 b12:', predict_result_b12)
# %% <draw>
import brewer2mpl
fig, ax = plt.subplots(figsize=(6, 4))
ax2 = ax.twinx()
bmap = brewer2mpl.get_map('Set2', 'qualitative', 7)
colors = bmap.mpl_colors
# plt.ylim((0.0, 1.0))
x = np.arange(0, 5)
width = 0.25 # the width of the bars
rects2 = ax.bar(x + width / 2, predict_result, width, label='Model Growth rate', color=colors[0]) # ,
rects1 = ax2.bar(x - width / 2, experiment_result, width, yerr=experiment_result_err, label='Experiment OD600',
color=colors[1]) #
rects1_ = ax2.bar(0, 0, label='Model Growth rate', color=colors[0], )
# Add some text for labels, title and custom x-axis tick labels, etc.
ax2.set_ylabel("OD600", fontsize=16)
ax.set_ylabel('Growth rate (mmol/gDW/h)', fontsize=16) # color = 'tab:blue'
# ax.tick_params(axis='y') # , labelcolor='tab:blue'
ax2.set_ylim((0, 3.2))
ax.set_ylim((0, 2.2))
ax.set_title('Growth rate simulation', fontsize=18)
labels = [''] + experiment_group
ax2.set_xticklabels(labels, fontsize=16)
ax2.legend(loc='best', fontsize=11)
# ax2.legend(loc='best', fontsize=14)
fig.tight_layout()
plt.show()
fig.savefig('Growth rate simulation case2_1.png')
|
[
"matplotlib.pyplot.show",
"numpy.arange",
"brewer2mpl.get_map",
"cobra.io.load_json_model",
"matplotlib.pyplot.subplots",
"os.chdir"
] |
[((234, 293), 'os.chdir', 'os.chdir', (['"""../../ComplementaryData/Step_03_Compare_Refine/"""'], {}), "('../../ComplementaryData/Step_03_Compare_Refine/')\n", (242, 293), False, 'import os\n'), ((338, 394), 'cobra.io.load_json_model', 'cobra.io.load_json_model', (['"""../../ModelFiles/iHL622.json"""'], {}), "('../../ModelFiles/iHL622.json')\n", (362, 394), False, 'import cobra\n'), ((2913, 2941), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (2925, 2941), True, 'import matplotlib.pyplot as plt\n'), ((2966, 3010), 'brewer2mpl.get_map', 'brewer2mpl.get_map', (['"""Set2"""', '"""qualitative"""', '(7)'], {}), "('Set2', 'qualitative', 7)\n", (2984, 3010), False, 'import brewer2mpl\n'), ((3064, 3079), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (3073, 3079), True, 'import numpy as np\n'), ((3946, 3956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3954, 3956), True, 'import matplotlib.pyplot as plt\n')]
|
import miniml
import numpy as np
# Adapted from:
# https://lucidar.me/en/neural-networks/curve-fitting-nonlinear-regression/
# init data
np.random.seed(3)
X = np.linspace(-10, 10, num=1000)
Y = 0.1*X*np.cos(X) + 0.1*np.random.normal(size=1000)
X = X.reshape((len(X), 1))
Y = Y.reshape((len(Y), 1))
# create model
model = miniml.Model()
model.dense(1, None, 'plain')
model.dense(64, 'relu', 'he')
model.dense(32, 'relu', 'he')
model.dense(1, None, 'plain')
# init params
rate = 0.01
epochs = 1000
# train model
optimizer = miniml.Adam(
cost = 'mse',
epochs = epochs,
init_seed = 48,
store = 10,
verbose = 200)
costs = optimizer.train(model, X, Y, rate)
# plot results
miniml.plot_costs(epochs, costs=costs)
miniml.plot_regression(model, X, Y)
|
[
"miniml.Model",
"numpy.random.seed",
"miniml.plot_costs",
"miniml.plot_regression",
"numpy.linspace",
"numpy.cos",
"numpy.random.normal",
"miniml.Adam"
] |
[((139, 156), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (153, 156), True, 'import numpy as np\n'), ((161, 191), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)'], {'num': '(1000)'}), '(-10, 10, num=1000)\n', (172, 191), True, 'import numpy as np\n'), ((325, 339), 'miniml.Model', 'miniml.Model', ([], {}), '()\n', (337, 339), False, 'import miniml\n'), ((528, 603), 'miniml.Adam', 'miniml.Adam', ([], {'cost': '"""mse"""', 'epochs': 'epochs', 'init_seed': '(48)', 'store': '(10)', 'verbose': '(200)'}), "(cost='mse', epochs=epochs, init_seed=48, store=10, verbose=200)\n", (539, 603), False, 'import miniml\n'), ((695, 733), 'miniml.plot_costs', 'miniml.plot_costs', (['epochs'], {'costs': 'costs'}), '(epochs, costs=costs)\n', (712, 733), False, 'import miniml\n'), ((734, 769), 'miniml.plot_regression', 'miniml.plot_regression', (['model', 'X', 'Y'], {}), '(model, X, Y)\n', (756, 769), False, 'import miniml\n'), ((202, 211), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (208, 211), True, 'import numpy as np\n'), ((218, 245), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000)'}), '(size=1000)\n', (234, 245), True, 'import numpy as np\n')]
|
from copy import deepcopy
from .helpers import set_n_jobs, replace_with_in_params
from sklearn.ensemble import (StackingRegressor, StackingClassifier,
VotingClassifier, VotingRegressor)
from joblib import Parallel, delayed
from sklearn.base import clone, is_classifier
from sklearn.utils import Bunch
from sklearn.model_selection import check_cv, cross_val_predict
import numpy as np
import pandas as pd
from .base import _fit_single_estimator, _get_est_fit_params
from ..main.CV import BPtCV
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import available_if, if_delegate_has_method
from sklearn.preprocessing import LabelEncoder
from .helpers import (get_mean_fis, get_concat_fis, get_concat_fis_len,
check_for_nested_loader, get_nested_final_estimator)
def _fit_all_estimators(self, X, y, sample_weight=None, mapping=None,
fit_index=None):
# Validate
names, all_estimators = self._validate_estimators()
# Fit all estimators
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(clone(est), X, y, sample_weight,
mapping, fit_index)
for est in all_estimators if est != 'drop'
)
self.named_estimators_ = Bunch()
est_fitted_idx = 0
for name_est, org_est in zip(names, all_estimators):
if org_est != 'drop':
self.named_estimators_[name_est] = self.estimators_[
est_fitted_idx]
est_fitted_idx += 1
else:
self.named_estimators_[name_est] = 'drop'
return names, all_estimators
def voting_fit(self, X, y, sample_weight=None, mapping=None,
fit_index=None, **kwargs):
# Fit self.estimators_ on all data
self._fit_all_estimators(
X, y, sample_weight=sample_weight, mapping=mapping,
fit_index=fit_index)
return self
def _get_cv_inds(self, index):
# If BPtCV call get_cv
if isinstance(self.cv, BPtCV):
random_state = None
if hasattr(self, 'random_state'):
random_state = self.random_state
return self.cv.get_cv(fit_index=index,
random_state=random_state,
return_index=True)
# Otherwise treat as sklearn arg directly
return self.cv
def stacking_fit(self, X, y, sample_weight=None, mapping=None,
fit_index=None, **kwargs):
# Validate final estimator
self._validate_final_estimator()
# Fit self.estimators_ on all data
names, all_estimators = self._fit_all_estimators(
X, y, sample_weight=sample_weight, mapping=mapping,
fit_index=fit_index)
# To train the meta-classifier using the most data as possible, we use
# a cross-validation to obtain the output of the stacked estimators.
# Get cv inds w/ handle cases for BPtCV
cv_inds = self._get_cv_inds(fit_index)
# To ensure that the data provided to each estimator are the same, we
# need to set the random state of the cv if there is one and we need to
# take a copy.
cv = check_cv(cv_inds, y=y, classifier=is_classifier(self))
if hasattr(cv, 'random_state') and cv.random_state is None:
cv.random_state = np.random.RandomState()
# Proc stack method
stack_method = [self.stack_method] * len(all_estimators)
self.stack_method_ = [
self._method_name(name, est, meth)
for name, est, meth in zip(names, all_estimators, stack_method)
]
# Base fit params for sample weight
sample_weight_params = ({"sample_weight": sample_weight}
if sample_weight is not None else None)
# Get the fit params for each indv estimator
all_fit_params = [_get_est_fit_params(est, mapping=mapping,
fit_index=fit_index,
other_params=sample_weight_params)
for est in all_estimators]
# Catch rare error - TODO come up with fix
if X.shape[0] == X.shape[1]:
raise RuntimeError('Same numbers of data points and ',
'features can lead to error.')
# Make the cross validated internal predictions to train
# the final_estimator
predictions = Parallel(n_jobs=self.n_jobs)(
delayed(cross_val_predict)(clone(est), X, y, cv=deepcopy(cv),
method=meth, n_jobs=self.n_jobs,
fit_params=fit_params,
verbose=self.verbose)
for est, meth, fit_params in zip(all_estimators,
self.stack_method_,
all_fit_params) if est != 'drop'
)
# Only not None or not 'drop' estimators will be used in transform.
# Remove the None from the method as well.
self.stack_method_ = [
meth for (meth, est) in zip(self.stack_method_, all_estimators)
if est != 'drop'
]
# @TODO make sure train data index is concatenated correctly
X_meta = self._concatenate_predictions(X, predictions)
_fit_single_estimator(self.final_estimator_, X_meta, y,
sample_weight=sample_weight,
mapping=None,
fit_index=fit_index)
return self
def ensemble_classifier_fit(self, X, y,
sample_weight=None, mapping=None,
fit_index=None, **kwargs):
check_classification_targets(y)
# To make compatible with each Voting and Stacking ...
self._le = LabelEncoder().fit(y)
self.le_ = self._le
self.classes_ = self._le.classes_
transformed_y = self._le.transform(y)
return self.bpt_fit(X, transformed_y,
sample_weight=sample_weight,
mapping=mapping,
fit_index=fit_index,
**kwargs)
def _base_transform_feat_names(self, X_df, encoders=None, nested_model=False):
'''This base functions works under the assumption of calculating
mean coef's.'''
# Check each sub estimator for the method
# transform feat names
all_feat_names = []
for est in self.estimators_:
if hasattr(est, 'transform_feat_names'):
feat_names = est.transform_feat_names(X_df, encoders=encoders,
nested_model=nested_model)
all_feat_names.append(feat_names)
# If None found
if len(all_feat_names) == 0:
return list(X_df)
# If some found, only return updated if all the same
# So check if all same as first
# if any not the same, return base
for fn in all_feat_names[1:]:
if fn != all_feat_names[0]:
return list(X_df)
# Otherwise, return first
return all_feat_names[0]
def _loader_transform_feat_names(self, X_df, encoders=None, nested_model=False):
# Check each estimator
all_feat_names = []
for est in self.estimators_:
if hasattr(est, 'transform_feat_names'):
feat_names = est.transform_feat_names(X_df, encoders=encoders,
nested_model=nested_model)
all_feat_names.append(feat_names)
# If none found
if len(all_feat_names) == 0:
return list(X_df)
# Get concat list
all_concat = list(np.concatenate(all_feat_names))
# If all unique, return concat
if len(set(all_concat)) == len(all_concat):
return all_concat
# Otherwise, append unique identifier
all_concat = []
for i, fn in enumerate(all_feat_names):
all_concat += [str(i) + '_' + str(name) for name in fn]
return all_concat
def _transform_feat_names(self, X_df, encoders=None, nested_model=False):
if self.has_nested_loader():
return self._loader_transform_feat_names(X_df, encoders=encoders, nested_model=nested_model)
else:
return self._base_transform_feat_names(X_df, encoders=encoders, nested_model=nested_model)
def _get_fis_lens(self):
'''This method is used in loader version of voting ensembles'''
# If already stored as attribute, use that
if hasattr(self, 'concat_est_lens_'):
return getattr(self, 'concat_est_lens_')
# Try coef
fi_len = get_concat_fis_len(self.estimators_, 'coef_')
if fi_len is not None:
return fi_len
# Then feature importances
fi_len = get_concat_fis_len(self.estimators_, 'feature_importances_')
if fi_len is not None:
return fi_len
# TODO - could do a search in each base estimator to try and determine
# the final n features in ?
return None
def base_inverse_transform_fis(self, fis, avg_method):
# If not loader, return as is
if not self.has_nested_loader():
return fis
# Get underlying lengths
concat_fi_lens_ = self._get_fis_lens()
if concat_fi_lens_ is None:
return fis
# Go through and inverse transform each chunk
fi_chunks, ind = [], 0
for est, l in zip(self.estimators_, concat_fi_lens_):
# If any don't have it, return passed original
if not hasattr(est, 'inverse_transform_fis'):
return fis
# Append the inverse transformed chunk
fi_chunks.append(est.inverse_transform_fis(fis.iloc[ind:ind+l]))
ind += l
# Combine together in DataFrame
fi_df = pd.DataFrame(fi_chunks)
avg = avg_method(fi_df)
# Put back together in series, and return that
return pd.Series(avg, index=list(fi_df))
def voting_inverse_transform_fis(self, fis):
def mean_avg(fi_df):
return np.mean(np.array(fi_df), axis=0)
return self.base_inverse_transform_fis(fis, mean_avg)
def _get_estimator_fi_weights(estimator):
weights = None
if hasattr(estimator, 'coef_'):
weights = getattr(estimator, 'coef_')
if weights is None and hasattr(estimator, 'feature_importances_'):
weights = getattr(estimator, 'feature_importances_')
if weights is None:
return None
# Set to absolute
weights = np.abs(weights)
# Shape if not 1D is (1, n_features) or (n_classes, n_features)
# TODO handle multiclass
if len(np.shape(weights)) > 1:
weights = weights[0]
return weights
def stacking_inverse_transform_fis(self, fis):
def stacked_avg(fi_df):
# First assumption we need to make is that we
# are only interested in absolute values
fis = np.abs(np.array(fi_df))
# Use coef / feat importance from estimator as weights
weights = _get_estimator_fi_weights(self.final_estimator_)
if weights is None:
return None
# Return weighted average
try:
return np.average(fis, axis=0, weights=weights)
except ZeroDivisionError:
return np.average(fis, axis=0)
return self.base_inverse_transform_fis(fis, stacked_avg)
def has_nested_loader(self):
# If not already set, set
if not hasattr(self, 'nested_loader_'):
setattr(self, 'nested_loader_',
check_for_nested_loader(self.estimators_))
return getattr(self, 'nested_loader_')
def ensemble_transform(self, X):
# If nested model case, return concatenation of transforms
if self.has_nested_loader():
# Init
Xts, self.concat_est_lens_ = [], []
for estimator in self.estimators_:
# Get transformed X, passing along nested model True
Xt = estimator.transform(X, nested_model=True)
# Keep track of transformed + length
Xts.append(Xt)
self.concat_est_lens_.append(Xt.shape[-1])
# Return concat along axis 1
return np.concatenate(Xts, axis=1)
# TODO - non nested loader case, but still nested model case
else:
raise RuntimeError('Not implemented.')
def _get_estimators_pred_chunks(self, X, method='predict'):
# Convert method to list if not
if not isinstance(method, list):
method = [method for _ in range(len(self.estimators_))]
# Go through each estimator, to make predictions
# on just the chunk of transformed input relevant for each.
pred_chunks, ind = [], 0
for estimator, l, m in zip(self.estimators_, self.concat_est_lens_, method):
# Get the corresponding final estimator
final_estimator = get_nested_final_estimator(estimator)
# Get predictions
pred_chunk = getattr(final_estimator, m)(X[:, ind:ind+l])
# Append predictions
pred_chunks.append(pred_chunk)
# Increment index
ind += l
return np.asarray(pred_chunks)
def _stacked_classifier_predict(self, X, method, **predict_params):
check_is_fitted(self)
# Nested loader case
if self.has_nested_loader():
# Get predict probas from each
predict_probas = self._get_estimators_pred_chunks(X, method=self.stack_method_)
concat_preds = self._concatenate_predictions(X, predict_probas)
# Make preds with final estimator on concat preds
y_pred = getattr(self.final_estimator_, method)(concat_preds)
# If predict, cast to inverse transform
if method == 'predict':
y_pred = self._le.inverse_transform(y_pred)
return y_pred
# TODO finish other case for stacked classifier
raise RuntimeError('Not Implemented')
class BPtStackingRegressor(StackingRegressor):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
fit = stacking_fit
_get_cv_inds = _get_cv_inds
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = stacking_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
_get_estimators_pred_chunks = _get_estimators_pred_chunks
ensemble_transform = ensemble_transform
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
# TODO - average according to stacked ...
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
# TODO - average according to stacked ...
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
def predict(self, X):
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
check_is_fitted(self)
# Nested loader case
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
pred_chunks = self._get_estimators_pred_chunks(X, method='predict').T
# Return predictions from final estimator
return self.final_estimator_.predict(pred_chunks)
# TODO fill in other case?
raise RuntimeError('Not Implemented')
class BPtStackingClassifier(StackingClassifier):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
bpt_fit = stacking_fit
fit = ensemble_classifier_fit
_get_cv_inds = _get_cv_inds
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = stacking_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
_get_estimators_pred_chunks = _get_estimators_pred_chunks
ensemble_transform = ensemble_transform
_stacked_classifier_predict = _stacked_classifier_predict
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
# TODO - average according to stacked ...
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
# TODO - average according to stacked ...
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
@if_delegate_has_method(delegate="final_estimator_")
def predict(self, X, **predict_params):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().predict(X, **predict_params)
# Other case
return self._stacked_classifier_predict(X, method='predict', **predict_params)
@if_delegate_has_method(delegate="final_estimator_")
def predict_proba(self, X):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().predict_proba(X)
# Other case
return self._stacked_classifier_predict(X, method='predict_proba')
@if_delegate_has_method(delegate="final_estimator_")
def decision_function(self, X):
# Base case
if X.shape[-1] == self.n_features_in_:
return super().decision_function(X)
# Other case
return self._stacked_classifier_predict(X, method='decision_function')
class BPtVotingRegressor(VotingRegressor):
# Set tags
_needs_mapping = True
_needs_fit_index = True
# Override / set methods
_fit_all_estimators = _fit_all_estimators
fit = voting_fit
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = voting_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
ensemble_transform = ensemble_transform
_get_estimators_pred_chunks = _get_estimators_pred_chunks
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
return get_mean_fis(self.estimators_, 'feature_importances_')
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
return get_mean_fis(self.estimators_, 'coef_')
def predict(self, X):
# Make sure fitted
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
# Otherwise, two cases, nested loader or not
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
pred_chunks = self._get_estimators_pred_chunks(X, method='predict')
# The voting ensemble just uses the mean from each
mean_preds = np.mean(pred_chunks, axis=0)
return mean_preds
# TODO fill in other case?
raise RuntimeError('Not Implemented')
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
class BPtVotingClassifier(VotingClassifier):
_needs_mapping = True
_needs_fit_index = True
_fit_all_estimators = _fit_all_estimators
bpt_fit = voting_fit
fit = ensemble_classifier_fit
has_nested_loader = has_nested_loader
transform_feat_names = _transform_feat_names
_base_transform_feat_names = _base_transform_feat_names
_loader_transform_feat_names = _loader_transform_feat_names
_get_fis_lens = _get_fis_lens
inverse_transform_fis = voting_inverse_transform_fis
base_inverse_transform_fis = base_inverse_transform_fis
ensemble_transform = ensemble_transform
_get_estimators_pred_chunks = _get_estimators_pred_chunks
@property
def feature_importances_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'feature_importances_')
return get_mean_fis(self.estimators_, 'feature_importances_')
@property
def coef_(self):
if self.has_nested_loader():
return get_concat_fis(self.estimators_, 'coef_')
return get_mean_fis(self.estimators_, 'coef_')
def _check_voting(self):
if self.voting == "hard":
raise AttributeError(
f"predict_proba is not available when voting={repr(self.voting)}"
)
return True
def predict(self, X):
# Make sure fitted
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict(X)
# If loader based
if self.has_nested_loader():
# If nested loader, then the expectation is that this
# predict is receiving the concat fully model nested transformed
# output from each of the self.estimators_
# If soft voting, can use predict proba instead
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
# Hard voting, use base pred
else:
# Get predictions with special nested
predictions = self._get_estimators_pred_chunks(X, method='predict')
# Get majority vote w/
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),
axis=1,
arr=predictions,
)
# Use label encoder to inverse transform before returning
maj = self.le_.inverse_transform(maj)
return maj
# TODO fill in other case?
raise RuntimeError('Not Implemented')
def transform(self, X, nested_model=False):
# Not nested, base case transform
if not nested_model:
return super().transform(X)
return self.ensemble_transform(X)
@available_if(_check_voting)
def predict_proba(self, X):
check_is_fitted(self)
# Base case is when number of features stays the same as expected.
if X.shape[-1] == self.n_features_in_:
return super().predict_proba(X)
# Otherwise, two cases, nested loader or not
if self.has_nested_loader():
# Get predict probas from each
predict_probas = self._get_estimators_pred_chunks(X, method='predict_proba')
# Calculate average
avg = np.average(predict_probas, axis=0, weights=self._weights_not_none)
# And return
return avg
# TODO fill in other case?
raise RuntimeError('Not Implemented')
class EnsembleWrapper():
def __init__(self, model_params, ensemble_params,
_get_ensembler, n_jobs, random_state):
self.model_params = model_params
self.ensemble_params = ensemble_params
self._get_ensembler = _get_ensembler
self.n_jobs = n_jobs
self.random_state = random_state
def _update_params(self, p_name, to_add):
# Get existing
params = getattr(self, p_name)
# Fill in new
new_params = {}
for key in params:
new_params[to_add + '__' + key] = params[key]
# Update
setattr(self, p_name, new_params)
def _update_model_ensemble_params(self, to_add, model=True, ensemble=True):
if model:
self._update_params('model_params', to_add)
if ensemble:
self._update_params('ensemble_params', to_add)
def _basic_ensemble(self, models, name, ensemble=False):
if len(models) == 1:
return models
else:
basic_ensemble = self._get_ensembler(models)
self._update_model_ensemble_params(name, ensemble=ensemble)
return [(name, basic_ensemble)]
def get_updated_params(self):
self.model_params.update(self.ensemble_params)
return self.model_params
def wrap_ensemble(self, models, ensemble, ensemble_params,
final_estimator=None,
final_estimator_params=None):
# If no ensemble is passed, return either the 1 model,
# or a voting wrapper
if ensemble is None or len(ensemble) == 0:
return self._basic_ensemble(models=models,
name='Default Voting',
ensemble=True)
# Otherwise special ensembles
else:
# If needs a single estimator, but multiple models passed,
# wrap in ensemble!
if ensemble_params.single_estimator:
se_ensemb_name = 'Single-Estimator Compatible Ensemble'
models = self._basic_ensemble(models,
se_ensemb_name,
ensemble=False)
# If no split and single estimator
if ensemble_params.single_estimator:
return self._wrap_single(models, ensemble,
ensemble_params.n_jobs_type)
# Last case is, no split/DES ensemble and also
# not single estimator based
# e.g., in case of stacking regressor.
else:
return self._wrap_multiple(models, ensemble,
final_estimator,
final_estimator_params,
ensemble_params.n_jobs_type,
ensemble_params.cv)
def _wrap_single(self, models, ensemble_info, n_jobs_type):
'''If passed single_estimator flag'''
# Unpack ensemble info
ensemble_name = ensemble_info[0]
ensemble_obj = ensemble_info[1][0]
ensemble_extra_params = ensemble_info[1][1]
# Models here since single estimator is assumed
# to be just a list with
# of one tuple as
# [(model or ensemble name, model or ensemble)]
base_estimator = models[0][1]
# Set n jobs based on passed type
if n_jobs_type == 'ensemble':
model_n_jobs = 1
ensemble_n_jobs = self.n_jobs
else:
model_n_jobs = self.n_jobs
ensemble_n_jobs = 1
# Set model / base_estimator n_jobs
set_n_jobs(base_estimator, model_n_jobs)
# Make sure random_state is set (should be already)
if hasattr(base_estimator, 'random_state'):
setattr(base_estimator, 'random_state', self.random_state)
# Create the ensemble object
ensemble = ensemble_obj(base_estimator=base_estimator,
**ensemble_extra_params)
# Set ensemble n_jobs
set_n_jobs(ensemble, ensemble_n_jobs)
# Set random state
if hasattr(ensemble, 'random_state'):
setattr(ensemble, 'random_state', self.random_state)
# Wrap as object
new_ensemble = [(ensemble_name, ensemble)]
# Have to change model name to base_estimator
self.model_params =\
replace_with_in_params(self.model_params, models[0][0],
'base_estimator')
# Append ensemble name to all model params
self._update_model_ensemble_params(ensemble_name,
ensemble=False)
return new_ensemble
def _wrap_multiple(self, models, ensemble_info,
final_estimator, final_estimator_params,
n_jobs_type, cv):
'''In case of no split/DES ensemble, and not single estimator based.'''
# Unpack ensemble info
ensemble_name = ensemble_info[0]
ensemble_obj = ensemble_info[1][0]
ensemble_extra_params = ensemble_info[1][1]
# Models here just self.models a list of tuple of
# all models.
# So, ensemble_extra_params should contain the
# final estimator + other params
# Set model_n_jobs and ensemble n_jobs based on type
if n_jobs_type == 'ensemble':
model_n_jobs = 1
ensemble_n_jobs = self.n_jobs
else:
model_n_jobs = self.n_jobs
ensemble_n_jobs = 1
# Set the model jobs
set_n_jobs(models, model_n_jobs)
# Make sure random state is propegated
for model in models:
if hasattr(model[1], 'random_state'):
setattr(model[1], 'random_state', self.random_state)
# Determine the parameters to init the ensemble
pass_params = ensemble_extra_params
pass_params['estimators'] = models
# Process final_estimator if passed
if final_estimator is not None:
# Replace name of final estimator w/ final_estimator in params
final_estimator_params =\
replace_with_in_params(params=final_estimator_params,
original=final_estimator[0][0],
replace='final_estimator')
# Add final estimator params to model_params - once name changed
# to avoid potential overlap.
self.model_params.update(final_estimator_params)
# Unpack actual model obj
final_estimator_obj = final_estimator[0][1]
# Set final estimator n_jobs to model n_jobs
set_n_jobs(final_estimator_obj, model_n_jobs)
# Redundant random state check
if hasattr(final_estimator_obj, 'random_state'):
setattr(final_estimator_obj, 'random_state', self.random_state)
# Add to pass params
pass_params['final_estimator'] = final_estimator_obj
# Check if cv passed
if cv is not None:
pass_params['cv'] = cv
# Init the ensemble object
ensemble = ensemble_obj(**pass_params)
# Set ensemble n_jobs
set_n_jobs(ensemble, ensemble_n_jobs)
# Set random state
if hasattr(ensemble, 'random_state'):
setattr(ensemble, 'random_state', self.random_state)
# Wrap as pipeline compatible object
new_ensemble = [(ensemble_name, ensemble)]
# Append ensemble name to all model params
self._update_model_ensemble_params(ensemble_name,
ensemble=False)
return new_ensemble
|
[
"numpy.abs",
"numpy.shape",
"numpy.mean",
"sklearn.base.clone",
"pandas.DataFrame",
"sklearn.utils.Bunch",
"numpy.random.RandomState",
"sklearn.preprocessing.LabelEncoder",
"sklearn.base.is_classifier",
"sklearn.utils.metaestimators.available_if",
"numpy.bincount",
"copy.deepcopy",
"numpy.average",
"numpy.asarray",
"sklearn.utils.metaestimators.if_delegate_has_method",
"numpy.concatenate",
"sklearn.utils.validation.check_is_fitted",
"numpy.array",
"joblib.Parallel",
"joblib.delayed",
"sklearn.utils.multiclass.check_classification_targets"
] |
[((1400, 1407), 'sklearn.utils.Bunch', 'Bunch', ([], {}), '()\n', (1405, 1407), False, 'from sklearn.utils import Bunch\n'), ((5663, 5694), 'sklearn.utils.multiclass.check_classification_targets', 'check_classification_targets', (['y'], {}), '(y)\n', (5691, 5694), False, 'from sklearn.utils.multiclass import check_classification_targets\n'), ((9608, 9631), 'pandas.DataFrame', 'pd.DataFrame', (['fi_chunks'], {}), '(fi_chunks)\n', (9620, 9631), True, 'import pandas as pd\n'), ((10299, 10314), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (10305, 10314), True, 'import numpy as np\n'), ((12871, 12894), 'numpy.asarray', 'np.asarray', (['pred_chunks'], {}), '(pred_chunks)\n', (12881, 12894), True, 'import numpy as np\n'), ((12970, 12991), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (12985, 12991), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((17162, 17213), 'sklearn.utils.metaestimators.if_delegate_has_method', 'if_delegate_has_method', ([], {'delegate': '"""final_estimator_"""'}), "(delegate='final_estimator_')\n", (17184, 17213), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((17497, 17548), 'sklearn.utils.metaestimators.if_delegate_has_method', 'if_delegate_has_method', ([], {'delegate': '"""final_estimator_"""'}), "(delegate='final_estimator_')\n", (17519, 17548), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((17796, 17847), 'sklearn.utils.metaestimators.if_delegate_has_method', 'if_delegate_has_method', ([], {'delegate': '"""final_estimator_"""'}), "(delegate='final_estimator_')\n", (17818, 17847), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((23233, 23260), 'sklearn.utils.metaestimators.available_if', 'available_if', (['_check_voting'], {}), '(_check_voting)\n', (23245, 23260), False, 'from sklearn.utils.metaestimators import available_if, if_delegate_has_method\n'), ((1152, 1180), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (1160, 1180), False, 'from joblib import Parallel, delayed\n'), ((3384, 3407), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (3405, 3407), True, 'import numpy as np\n'), ((4424, 4452), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (4432, 4452), False, 'from joblib import Parallel, delayed\n'), ((7573, 7603), 'numpy.concatenate', 'np.concatenate', (['all_feat_names'], {}), '(all_feat_names)\n', (7587, 7603), True, 'import numpy as np\n'), ((11953, 11980), 'numpy.concatenate', 'np.concatenate', (['Xts'], {'axis': '(1)'}), '(Xts, axis=1)\n', (11967, 11980), True, 'import numpy as np\n'), ((15155, 15176), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (15170, 15176), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((19308, 19329), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (19323, 19329), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((21714, 21735), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (21729, 21735), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((23302, 23323), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (23317, 23323), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((3273, 3292), 'sklearn.base.is_classifier', 'is_classifier', (['self'], {}), '(self)\n', (3286, 3292), False, 'from sklearn.base import clone, is_classifier\n'), ((5770, 5784), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5782, 5784), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((9854, 9869), 'numpy.array', 'np.array', (['fi_df'], {}), '(fi_df)\n', (9862, 9869), True, 'import numpy as np\n'), ((10424, 10441), 'numpy.shape', 'np.shape', (['weights'], {}), '(weights)\n', (10432, 10441), True, 'import numpy as np\n'), ((10704, 10719), 'numpy.array', 'np.array', (['fi_df'], {}), '(fi_df)\n', (10712, 10719), True, 'import numpy as np\n'), ((10979, 11019), 'numpy.average', 'np.average', (['fis'], {'axis': '(0)', 'weights': 'weights'}), '(fis, axis=0, weights=weights)\n', (10989, 11019), True, 'import numpy as np\n'), ((19950, 19978), 'numpy.mean', 'np.mean', (['pred_chunks'], {'axis': '(0)'}), '(pred_chunks, axis=0)\n', (19957, 19978), True, 'import numpy as np\n'), ((23766, 23832), 'numpy.average', 'np.average', (['predict_probas'], {'axis': '(0)', 'weights': 'self._weights_not_none'}), '(predict_probas, axis=0, weights=self._weights_not_none)\n', (23776, 23832), True, 'import numpy as np\n'), ((1190, 1220), 'joblib.delayed', 'delayed', (['_fit_single_estimator'], {}), '(_fit_single_estimator)\n', (1197, 1220), False, 'from joblib import Parallel, delayed\n'), ((1221, 1231), 'sklearn.base.clone', 'clone', (['est'], {}), '(est)\n', (1226, 1231), False, 'from sklearn.base import clone, is_classifier\n'), ((4462, 4488), 'joblib.delayed', 'delayed', (['cross_val_predict'], {}), '(cross_val_predict)\n', (4469, 4488), False, 'from joblib import Parallel, delayed\n'), ((4489, 4499), 'sklearn.base.clone', 'clone', (['est'], {}), '(est)\n', (4494, 4499), False, 'from sklearn.base import clone, is_classifier\n'), ((11073, 11096), 'numpy.average', 'np.average', (['fis'], {'axis': '(0)'}), '(fis, axis=0)\n', (11083, 11096), True, 'import numpy as np\n'), ((4510, 4522), 'copy.deepcopy', 'deepcopy', (['cv'], {}), '(cv)\n', (4518, 4522), False, 'from copy import deepcopy\n'), ((22656, 22702), 'numpy.bincount', 'np.bincount', (['x'], {'weights': 'self._weights_not_none'}), '(x, weights=self._weights_not_none)\n', (22667, 22702), True, 'import numpy as np\n')]
|
'''
This script plots spectrograms for pre-ictal periods.
Then, it uses NMF to find subgraphs and expressions for pre-ictal periods.
Finally, it calculates states as the subgraph with maximal expression at each time point
and calculates the dissimilarity between states.
Inputs:
target-electrodes-{mode}.mat
bandpower-windows-pre-sz-{mode}.mat
Outputs:
'''
# %%
# %load_ext autoreload
# %autoreload 2
# Imports and environment setup
import numpy as np
import sys
import os
import pandas as pd
import json
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tqdm import tqdm
from os.path import join as ospj
from scipy.stats import zscore
import time
from kneed import KneeLocator
sys.path.append('tools')
from plot_spectrogram import plot_spectrogram
from movmean import movmean
from pull_sz_starts import pull_sz_starts
from pull_patient_localization import pull_patient_localization
from mpl_toolkits.axes_grid1 import make_axes_locatable
from time2ind import time2ind
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.decomposition import NMF
from sklearn.metrics.cluster import adjusted_rand_score
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
# Get paths from config file and metadata
with open("config.json") as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
palette = config['lightColors']
DTW_FLAG = config['flags']["DTW_FLAG"]
electrodes_opt = config['electrodes']
band_opt = config['bands']
data_path = ospj(repo_path, 'data')
figure_path = ospj(repo_path, 'figures')
metadata_fname = ospj(metadata_path, "DATA_MASTER.json")
with open(metadata_fname) as f:
metadata = json.load(f)['PATIENTS']
patient_cohort = pd.read_excel(ospj(data_path, "patient_cohort.xlsx"))
# flags
SAVE_PLOT = True
NMF_OPT_FLAG = True
SUBJ_LEVEL = False
FIXED_PREICTAL_SEC = 60 * config['preictal_window_min']
LEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
# %%
patient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))
for index, row in patient_cohort.iterrows():
if row['Ignore']:
continue
pt = row["Patient"]
print("Calculating pre-ictal NMF for {}".format(pt))
pt_data_path = ospj(data_path, pt)
pt_figure_path = ospj(figure_path, pt)
if not os.path.exists(pt_figure_path):
os.makedirs(pt_figure_path)
# pull and format electrode metadata
electrodes_mat = loadmat(ospj(pt_data_path, "selected_electrodes_elec-{}.mat".format(electrodes_opt)))
target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]
pt_index = patients.index(pt)
sz_starts = pull_sz_starts(pt, metadata)
# get bandpower from pre-ictal period and log transform
# bandpower_mat_data = loadmat(ospj(pt_data_path, "bandpower-windows-pre-sz-{}.mat".format(electrodes_opt)))
# bandpower_data = 10*np.log10(bandpower_mat_data['allFeats'])
# t_sec = np.squeeze(bandpower_mat_data['entireT']) / 1e6
# sz_id = np.squeeze(bandpower_mat_data['szID'])
df = pd.read_pickle(ospj(pt_data_path, "bandpower_elec-{}_period-preictal.pkl".format(electrodes_opt)))
if band_opt == "all":
bandpower_data = df.filter(regex=("^((?!broad).)*$"), axis=1)
bandpower_data = bandpower_data.drop(['Seizure id'], axis=1)
elif band_opt == "broad":
bandpower_data = df.filter(regex=("broad"), axis=1)
else:
print("Band configuration not given properly")
exit
sz_id = np.squeeze(df['Seizure id'])
t_sec = np.array(df.index / np.timedelta64(1, 's'))
n_sz = np.size(np.unique(sz_id))
# remove short inter-seizure intervals
lead_sz = np.diff(np.insert(sz_starts, 0, [0])) > (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
remaining_sz_ids = np.where(lead_sz)[0]
remove_sz_ids = np.where(~lead_sz)[0]
print("\tremoving seizures {}".format(remove_sz_ids))
print(type(sz_id))
for remv in remove_sz_ids:
t_sec = np.delete(t_sec, np.where(sz_id == remv))
bandpower_data.drop(bandpower_data.index[np.where(sz_id == remv)[0]], inplace=True)
sz_id.drop(sz_id.index[np.where(sz_id == remv)[0]], inplace=True)
np.save(ospj(pt_data_path, "remaining_sz_ids.npy"), remaining_sz_ids)
# Apply NMF to pre-ictal period to find components (H) and expression (W)
n_remaining_sz = np.size(remaining_sz_ids)
n_components = range(2, 20)
for sz_idx in tqdm(range(n_remaining_sz)):
i_sz = remaining_sz_ids[sz_idx]
data = bandpower_data[sz_id == i_sz]
# print("\trunning NMF")
start_time = time.time()
reconstruction_err = np.zeros(np.size(n_components))
for ind, i_components in enumerate(n_components):
# print("\t\tTesting NMF with {} components".format(i_components))
model = NMF(n_components=i_components, init='nndsvd', random_state=0, max_iter=1000)
W = model.fit_transform(data - np.min(data))
reconstruction_err[ind] = model.reconstruction_err_
end_time = time.time()
# print("\tNMF took {} seconds".format(end_time - start_time))
kneedle = KneeLocator(n_components, reconstruction_err, curve="convex", direction="decreasing")
n_opt_components = kneedle.knee
# print("\t{} components was found as optimal, rerunning for final iteration".format(n_opt_components))
model = NMF(n_components=n_opt_components, init='nndsvd', random_state=0, max_iter=1000)
W = model.fit_transform(data - np.min(data))
H = model.components_
np.save(ospj(pt_data_path, "nmf_expression_band-{}_elec-{}_sz-{}.npy".format(band_opt, electrodes_opt, i_sz)), W)
np.save(ospj(pt_data_path, "nmf_components_band-{}_elec-{}_sz-{}.npy".format(band_opt, electrodes_opt, i_sz)), H)
np.save(ospj(pt_data_path, "lead_sz_t_sec_band-{}_elec-{}.npy".format(band_opt, electrodes_opt)), t_sec)
np.save(ospj(pt_data_path, "lead_sz_sz_id_band-{}_elec-{}.npy".format(band_opt, electrodes_opt)), sz_id)
##############################################
# %%
# # States are defined as the max expressed component at each time point
# states = np.argmax(movmean(W[:, 1:].T, k=100).T, axis=-1) + 1
# # take the dissimilarity in states, optionally using fast dynamic time warping
# if DTW_FLAG:
# states_dissim_mat = np.zeros((n_remaining_sz, n_remaining_sz))
# for ind1, i in enumerate(remaining_sz_ids):
# for ind2, j in enumerate(remaining_sz_ids):
# distance, path = fastdtw(states[sz_id == i], states[sz_id == j], dist=euclidean)
# states_dissim_mat[ind1, ind2] = distance
# else:
# # find how long pre-ictal segments are for each sz and take shortest one
# pre_ictal_lengths = np.zeros(remaining_sz_ids.shape, dtype=int)
# for ind, i_sz in enumerate(remaining_sz_ids):
# pre_ictal_lengths[ind] = np.size(states[sz_id == i_sz])
# pre_ictal_length = np.min(pre_ictal_lengths)
# # matrix of adjusted rand score for similar state occurences
# states_dissim_mat = np.zeros((n_remaining_sz, n_remaining_sz))
# for ind1, i in enumerate(remaining_sz_ids):
# for ind2, j in enumerate(remaining_sz_ids):
# rand = adjusted_rand_score(states[sz_id == i][-pre_ictal_length:], states[sz_id == j][-pre_ictal_length:])
# states_dissim_mat[ind1, ind2] = 1 - rand
# np.save(ospj(pt_data_path, "states_dissim_mat_{}.npy".format(mode)), states_dissim_mat)
# np.save(ospj(pt_data_path, "remaining_sz_ids.npy"), remaining_sz_ids)
# # Plot the NMF subgraphs and expression
# if PLOT:
# for i in remaining_sz_ids:
# fig, ax = plt.subplots()
# t_arr_min = (t_sec[sz_id == i] - t_sec[sz_id == i][-1]) / 60
# ax.plot(t_arr_min, movmean(W[sz_id == i, 1:].T, k=100, mode='same').T)
# ax.set_xlabel("Time from seizure onset (min)")
# ax.set_ylabel("Subgraph coefficient")
# ax.set_title("Seizure {}".format(i))
# ax.legend(np.arange(n_components - 1) + 2, title="Component")
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "subgraph_expression_sz_{}_{}.svg".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "subgraph_expression_sz_{}_{}.png".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# ax = plot_spectrogram(H, start_time=0, end_time=n_components)
# ax.set_title("{}".format(pt))
# ax.set_xlabel("Component")
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "subgraphs_{}.svg".format(mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "subgraphs_{}.png".format(mode)), bbox_inches='tight', transparent='true')
# plt.close()
# if PLOT:
# n_electrodes = soz_electrodes.shape[0]
# # plot all states
# component_arr = np.reshape(H, (n_components, -1, n_electrodes))
# # component_z = np.zeros(component_arr.shape)
# # for i_comp in range(n_components):
# # component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)
# # sort to put non-soz first
# sort_soz_inds = np.argsort(soz_electrodes)
# n_soz = np.sum(soz_electrodes)
# n_non_soz = n_electrodes - n_soz
# for i_comp in range(n_components):
# fig, ax = plt.subplots()
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('right', size='5%', pad=0.05)
# im = ax.imshow(component_arr[i_comp, :, sort_soz_inds].T)
# ax.axvline(n_non_soz - 0.5, c='r', lw=2)
# ax.set_title("Subgraph {}, {}".format(i_comp, pt))
# ax.set_yticks(np.arange(6))
# ax.set_yticklabels([r'$\delta$', r'$\theta$', r'$\alpha$', r'$\beta$', r'low-$\gamma$', r'high-$\gamma$'])
# ax.set_xticks(np.arange(n_electrodes))
# ax.set_xticks([n_non_soz / 2, n_non_soz + n_soz / 2])
# ax.set_xticklabels(["Non SOZ", "SOZ"])
# ax.set_xlabel("Electrodes")
# ax.set_ylabel("Frequency band")
# cbar = fig.colorbar(im, cax=cax, orientation='vertical')
# cbar.ax.set_ylabel('Power (dB)', rotation=90)
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "soz_subgraph_{}_heatmap_{}.svg".format(i_comp, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "soz_subgraph_{}_heatmap_{}.png".format(i_comp, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# # plot soz state expression for all seizures
# for i in remaining_sz_ids:
# fig, ax = plt.subplots()
# t_arr_min = (t_sec[sz_id == i] - t_sec[sz_id == i][-1]) / 60
# ax.plot(t_arr_min, movmean(W[sz_id == i,pt_soz_state].T, k=100).T)
# ax.set_xlabel("Time from seizure onset (min)")
# ax.set_ylabel("SOZ subgraph coefficient")
# ax.set_title("Seizure {}".format(i))
# if SAVE_PLOT:
# plt.savefig(ospj(pt_figure_path, "soz_expression_sz_{}_{}.svg".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.savefig(ospj(pt_figure_path, "soz_expression_sz_{}_{}.png".format(i, mode)), bbox_inches='tight', transparent='true')
# plt.close()
# break
# # %%
# min_pre_ictal_size = min([W[sz_id == i,pt_soz_state].shape[0] for i in remaining_sz_ids])
# pre_ictal_soz_state = np.zeros((np.size(remaining_sz_ids), min_pre_ictal_size))
# for ind, i_sz in enumerate(remaining_sz_ids):
# pre_ictal_soz_state[ind, :] = W[sz_id == i_sz,pt_soz_state][-min_pre_ictal_size:]
# # %%
# # %%
|
[
"sys.path.append",
"kneed.KneeLocator",
"numpy.size",
"json.load",
"sklearn.decomposition.NMF",
"os.makedirs",
"warnings.filterwarnings",
"os.path.exists",
"time.time",
"numpy.insert",
"numpy.min",
"pull_sz_starts.pull_sz_starts",
"numpy.where",
"numpy.timedelta64",
"numpy.squeeze",
"os.path.join",
"numpy.unique"
] |
[((699, 723), 'sys.path.append', 'sys.path.append', (['"""tools"""'], {}), "('tools')\n", (714, 723), False, 'import sys\n'), ((1227, 1296), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'ConvergenceWarning'}), "(action='ignore', category=ConvergenceWarning)\n", (1250, 1296), False, 'import warnings\n'), ((1622, 1645), 'os.path.join', 'ospj', (['repo_path', '"""data"""'], {}), "(repo_path, 'data')\n", (1626, 1645), True, 'from os.path import join as ospj\n'), ((1660, 1686), 'os.path.join', 'ospj', (['repo_path', '"""figures"""'], {}), "(repo_path, 'figures')\n", (1664, 1686), True, 'from os.path import join as ospj\n'), ((1705, 1744), 'os.path.join', 'ospj', (['metadata_path', '"""DATA_MASTER.json"""'], {}), "(metadata_path, 'DATA_MASTER.json')\n", (1709, 1744), True, 'from os.path import join as ospj\n'), ((1384, 1396), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1393, 1396), False, 'import json\n'), ((1849, 1887), 'os.path.join', 'ospj', (['data_path', '"""patient_cohort.xlsx"""'], {}), "(data_path, 'patient_cohort.xlsx')\n", (1853, 1887), True, 'from os.path import join as ospj\n'), ((2288, 2341), 'os.path.join', 'ospj', (['metadata_path', '"""patient_localization_final.mat"""'], {}), "(metadata_path, 'patient_localization_final.mat')\n", (2292, 2341), True, 'from os.path import join as ospj\n'), ((2530, 2549), 'os.path.join', 'ospj', (['data_path', 'pt'], {}), '(data_path, pt)\n', (2534, 2549), True, 'from os.path import join as ospj\n'), ((2571, 2592), 'os.path.join', 'ospj', (['figure_path', 'pt'], {}), '(figure_path, pt)\n', (2575, 2592), True, 'from os.path import join as ospj\n'), ((2954, 2982), 'pull_sz_starts.pull_sz_starts', 'pull_sz_starts', (['pt', 'metadata'], {}), '(pt, metadata)\n', (2968, 2982), False, 'from pull_sz_starts import pull_sz_starts\n'), ((3795, 3823), 'numpy.squeeze', 'np.squeeze', (["df['Seizure id']"], {}), "(df['Seizure id'])\n", (3805, 3823), True, 'import numpy as np\n'), ((4663, 4688), 'numpy.size', 'np.size', (['remaining_sz_ids'], {}), '(remaining_sz_ids)\n', (4670, 4688), True, 'import numpy as np\n'), ((1792, 1804), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1801, 1804), False, 'import json\n'), ((2120, 2173), 'os.path.join', 'ospj', (['metadata_path', '"""patient_localization_final.mat"""'], {}), "(metadata_path, 'patient_localization_final.mat')\n", (2124, 2173), True, 'from os.path import join as ospj\n'), ((2604, 2634), 'os.path.exists', 'os.path.exists', (['pt_figure_path'], {}), '(pt_figure_path)\n', (2618, 2634), False, 'import os\n'), ((2644, 2671), 'os.makedirs', 'os.makedirs', (['pt_figure_path'], {}), '(pt_figure_path)\n', (2655, 2671), False, 'import os\n'), ((3899, 3915), 'numpy.unique', 'np.unique', (['sz_id'], {}), '(sz_id)\n', (3908, 3915), True, 'import numpy as np\n'), ((4089, 4106), 'numpy.where', 'np.where', (['lead_sz'], {}), '(lead_sz)\n', (4097, 4106), True, 'import numpy as np\n'), ((4130, 4148), 'numpy.where', 'np.where', (['(~lead_sz)'], {}), '(~lead_sz)\n', (4138, 4148), True, 'import numpy as np\n'), ((4501, 4543), 'os.path.join', 'ospj', (['pt_data_path', '"""remaining_sz_ids.npy"""'], {}), "(pt_data_path, 'remaining_sz_ids.npy')\n", (4505, 4543), True, 'from os.path import join as ospj\n'), ((4909, 4920), 'time.time', 'time.time', ([], {}), '()\n', (4918, 4920), False, 'import time\n'), ((5356, 5367), 'time.time', 'time.time', ([], {}), '()\n', (5365, 5367), False, 'import time\n'), ((5458, 5548), 'kneed.KneeLocator', 'KneeLocator', (['n_components', 'reconstruction_err'], {'curve': '"""convex"""', 'direction': '"""decreasing"""'}), "(n_components, reconstruction_err, curve='convex', direction=\n 'decreasing')\n", (5469, 5548), False, 'from kneed import KneeLocator\n'), ((5714, 5799), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'n_opt_components', 'init': '"""nndsvd"""', 'random_state': '(0)', 'max_iter': '(1000)'}), "(n_components=n_opt_components, init='nndsvd', random_state=0, max_iter=1000\n )\n", (5717, 5799), False, 'from sklearn.decomposition import NMF\n'), ((3856, 3878), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (3870, 3878), True, 'import numpy as np\n'), ((3987, 4015), 'numpy.insert', 'np.insert', (['sz_starts', '(0)', '[0]'], {}), '(sz_starts, 0, [0])\n', (3996, 4015), True, 'import numpy as np\n'), ((4298, 4321), 'numpy.where', 'np.where', (['(sz_id == remv)'], {}), '(sz_id == remv)\n', (4306, 4321), True, 'import numpy as np\n'), ((4959, 4980), 'numpy.size', 'np.size', (['n_components'], {}), '(n_components)\n', (4966, 4980), True, 'import numpy as np\n'), ((5139, 5215), 'sklearn.decomposition.NMF', 'NMF', ([], {'n_components': 'i_components', 'init': '"""nndsvd"""', 'random_state': '(0)', 'max_iter': '(1000)'}), "(n_components=i_components, init='nndsvd', random_state=0, max_iter=1000)\n", (5142, 5215), False, 'from sklearn.decomposition import NMF\n'), ((5834, 5846), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (5840, 5846), True, 'import numpy as np\n'), ((4372, 4395), 'numpy.where', 'np.where', (['(sz_id == remv)'], {}), '(sz_id == remv)\n', (4380, 4395), True, 'import numpy as np\n'), ((4446, 4469), 'numpy.where', 'np.where', (['(sz_id == remv)'], {}), '(sz_id == remv)\n', (4454, 4469), True, 'import numpy as np\n'), ((5259, 5271), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (5265, 5271), True, 'import numpy as np\n')]
|
# This script compares reading from an array in a loop using the
# tables.Array.read method. In the first case, read is used without supplying
# an 'out' argument, which causes a new output buffer to be pre-allocated
# with each call. In the second case, the buffer is created once, and then
# reused.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import numpy as np
import tables
def create_file(array_size):
array = np.ones(array_size, dtype='i8')
with tables.open_file('test.h5', 'w') as fobj:
array = fobj.create_array('/', 'test', array)
print('file created, size: {0} MB'.format(array.size_on_disk / 1e6))
def standard_read(array_size):
N = 10
with tables.open_file('test.h5', 'r') as fobj:
array = fobj.get_node('/', 'test')
start = time.time()
for i in range(N):
output = array.read(0, array_size, 1)
end = time.time()
assert(np.all(output == 1))
print('standard read \t {0:5.5f}'.format((end - start) / N))
def pre_allocated_read(array_size):
N = 10
with tables.open_file('test.h5', 'r') as fobj:
array = fobj.get_node('/', 'test')
start = time.time()
output = np.empty(array_size, 'i8')
for i in range(N):
array.read(0, array_size, 1, out=output)
end = time.time()
assert(np.all(output == 1))
print('pre-allocated read\t {0:5.5f}'.format((end - start) / N))
if __name__ == '__main__':
array_num_bytes = [int(x) for x in [1e5, 1e6, 1e7, 1e8]]
for array_bytes in array_num_bytes:
array_size = int(array_bytes // 8)
create_file(array_size)
standard_read(array_size)
pre_allocated_read(array_size)
print()
|
[
"numpy.empty",
"numpy.ones",
"time.time",
"tables.open_file",
"numpy.all"
] |
[((506, 537), 'numpy.ones', 'np.ones', (['array_size'], {'dtype': '"""i8"""'}), "(array_size, dtype='i8')\n", (513, 537), True, 'import numpy as np\n'), ((547, 579), 'tables.open_file', 'tables.open_file', (['"""test.h5"""', '"""w"""'], {}), "('test.h5', 'w')\n", (563, 579), False, 'import tables\n'), ((773, 805), 'tables.open_file', 'tables.open_file', (['"""test.h5"""', '"""r"""'], {}), "('test.h5', 'r')\n", (789, 805), False, 'import tables\n'), ((874, 885), 'time.time', 'time.time', ([], {}), '()\n', (883, 885), False, 'import time\n'), ((977, 988), 'time.time', 'time.time', ([], {}), '()\n', (986, 988), False, 'import time\n'), ((1004, 1023), 'numpy.all', 'np.all', (['(output == 1)'], {}), '(output == 1)\n', (1010, 1023), True, 'import numpy as np\n'), ((1154, 1186), 'tables.open_file', 'tables.open_file', (['"""test.h5"""', '"""r"""'], {}), "('test.h5', 'r')\n", (1170, 1186), False, 'import tables\n'), ((1255, 1266), 'time.time', 'time.time', ([], {}), '()\n', (1264, 1266), False, 'import time\n'), ((1284, 1310), 'numpy.empty', 'np.empty', (['array_size', '"""i8"""'], {}), "(array_size, 'i8')\n", (1292, 1310), True, 'import numpy as np\n'), ((1405, 1416), 'time.time', 'time.time', ([], {}), '()\n', (1414, 1416), False, 'import time\n'), ((1432, 1451), 'numpy.all', 'np.all', (['(output == 1)'], {}), '(output == 1)\n', (1438, 1451), True, 'import numpy as np\n')]
|
import numpy as np
def distance_from_region(label_mask, distance_mask=None, scale=1, ord=2):
"""Find the distance at every point in an image from a set of labeled points.
Parameters
==========
label_mask : ndarray
A mask designating the points to find the distance from. A True value
indicates that the pixel is in the region, a False value indicates it is not.
distance_mask : ndarray
A mask inidicating which regions to calculate the distance in
scale : int
Scale the calculated distance to another distance measure (eg. to millimeters)
ord : int
Order of norm to use when calculating distance. See np.linalg.norm for more details
Returns
=======
distances : ndarray
A masked array of the same size as label_mask.
If distance_mask is passed in then the output array is masked by it.
"""
if distance_mask is None:
distance_mask = np.ones(label_mask.shape, dtype=bool)
assert label_mask.shape == distance_mask.shape
scale = np.array(scale)
output = np.zeros(label_mask.shape)
indxs = np.indices(label_mask.shape)
X = indxs[:, distance_mask].T
Y = indxs[:, label_mask].T
for x in X:
output[tuple(x)] = np.linalg.norm(scale*(x-Y), ord=ord, axis=1).min()
return np.ma.array(output, mask=np.logical_not(distance_mask))
def contours(distances, contours=10):
amin,amax = distances.min(), distances.max()
edges,step = np.linspace(amin, amax, contours, retstep=True)
mask = np.logical_not(np.ma.getmaskarray(distances))
return [np.ma.getdata(mask & (distances >= cntr) & (distances < (cntr+step))) for cntr in edges[:-1]], edges
def plot_by_contours(arr, contour_masks, contour_vals, ax=None):
if ax is None:
import pylab as pl
_,ax = pl.subplots()
x = contour_vals[:-1]
y = np.array([arr[mask].mean() for mask in contour_masks])
ax.set_xlabel('Distance from surface (mm)')
ax.set_ylabel('Mean R2* value')
return ax.plot(x, y, 'o--')[0], x, y
def plot_by_distance(arr, distances, ax=None):
assert arr.shape == distances.shape
if ax is None:
import pylab
_,ax = pylab.subplots()
mask = np.logical_not(np.ma.getmaskarray(distances))
x = distances[mask].ravel()
y = arr[mask].ravel()
return ax.plot(x,y,'o')
|
[
"numpy.ma.getdata",
"numpy.ma.getmaskarray",
"numpy.zeros",
"numpy.ones",
"numpy.logical_not",
"numpy.indices",
"pylab.subplots",
"numpy.array",
"numpy.linalg.norm",
"numpy.linspace"
] |
[((1046, 1061), 'numpy.array', 'np.array', (['scale'], {}), '(scale)\n', (1054, 1061), True, 'import numpy as np\n'), ((1075, 1101), 'numpy.zeros', 'np.zeros', (['label_mask.shape'], {}), '(label_mask.shape)\n', (1083, 1101), True, 'import numpy as np\n'), ((1115, 1143), 'numpy.indices', 'np.indices', (['label_mask.shape'], {}), '(label_mask.shape)\n', (1125, 1143), True, 'import numpy as np\n'), ((1476, 1523), 'numpy.linspace', 'np.linspace', (['amin', 'amax', 'contours'], {'retstep': '(True)'}), '(amin, amax, contours, retstep=True)\n', (1487, 1523), True, 'import numpy as np\n'), ((945, 982), 'numpy.ones', 'np.ones', (['label_mask.shape'], {'dtype': 'bool'}), '(label_mask.shape, dtype=bool)\n', (952, 982), True, 'import numpy as np\n'), ((1550, 1579), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['distances'], {}), '(distances)\n', (1568, 1579), True, 'import numpy as np\n'), ((1822, 1835), 'pylab.subplots', 'pl.subplots', ([], {}), '()\n', (1833, 1835), True, 'import pylab as pl\n'), ((2195, 2211), 'pylab.subplots', 'pylab.subplots', ([], {}), '()\n', (2209, 2211), False, 'import pylab\n'), ((2239, 2268), 'numpy.ma.getmaskarray', 'np.ma.getmaskarray', (['distances'], {}), '(distances)\n', (2257, 2268), True, 'import numpy as np\n'), ((1339, 1368), 'numpy.logical_not', 'np.logical_not', (['distance_mask'], {}), '(distance_mask)\n', (1353, 1368), True, 'import numpy as np\n'), ((1593, 1662), 'numpy.ma.getdata', 'np.ma.getdata', (['(mask & (distances >= cntr) & (distances < cntr + step))'], {}), '(mask & (distances >= cntr) & (distances < cntr + step))\n', (1606, 1662), True, 'import numpy as np\n'), ((1252, 1300), 'numpy.linalg.norm', 'np.linalg.norm', (['(scale * (x - Y))'], {'ord': 'ord', 'axis': '(1)'}), '(scale * (x - Y), ord=ord, axis=1)\n', (1266, 1300), True, 'import numpy as np\n')]
|
from nose.plugins.attrib import attr
from numpy.testing.utils import assert_equal, assert_allclose, assert_raises
import numpy as np
from brian2.spatialneuron import *
from brian2.units import um, second
@attr('codegen-independent')
def test_basicshapes():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
morpho.right['nextone'] = Cylinder(length=2*um, diameter=1*um, n=3)
# Check total number of compartments
assert_equal(len(morpho),26)
assert_equal(len(morpho.L.main),10)
# Check that end point is at distance 15 um from soma
assert_allclose(morpho.LL.distance[-1],15*um)
@attr('codegen-independent')
def test_subgroup():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
# Getting a single compartment by index
assert_allclose(morpho.L[2].distance,3*um)
# Getting a single compartment by position
assert_allclose(morpho.LL[0*um].distance,11*um)
assert_allclose(morpho.LL[1*um].distance,11*um)
assert_allclose(morpho.LL[1.5*um].distance,12*um)
assert_allclose(morpho.LL[5*um].distance,15*um)
# Getting a segment
assert_allclose(morpho.L[3*um:5.1*um].distance, [3, 4, 5]*um)
# Indices cannot be obtained at this stage
assert_raises(AttributeError,lambda :morpho.L.indices[:])
# Compress the morphology and get absolute compartment indices
N = len(morpho)
morpho.compress(MorphologyData(N))
assert_equal(morpho.LL.indices[:], [11, 12, 13, 14, 15])
assert_equal(morpho.L.indices[3*um:5.1*um], [3, 4, 5])
assert_equal(morpho.L.indices[3*um:5.1*um],
morpho.L[3*um:5.1*um].indices[:])
assert_equal(morpho.L.indices[:5.1*um], [1, 2, 3, 4, 5])
assert_equal(morpho.L.indices[3*um:], [3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[3.5*um], 4)
assert_equal(morpho.L.indices[3], 4)
assert_equal(morpho.L.indices[-1], 10)
assert_equal(morpho.L.indices[3:5], [4, 5])
assert_equal(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[:5], [1, 2, 3, 4, 5])
# Main branch
assert_equal(len(morpho.L.main), 10)
# Non-existing branch
assert_raises(AttributeError, lambda: morpho.axon)
# Incorrect indexing
# wrong units or mixing units
assert_raises(TypeError, lambda: morpho.indices[3*second:5*second])
assert_raises(TypeError, lambda: morpho.indices[3.4:5.3])
assert_raises(TypeError, lambda: morpho.indices[3:5*um])
assert_raises(TypeError, lambda: morpho.indices[3*um:5])
# providing a step
assert_raises(TypeError, lambda: morpho.indices[3*um:5*um:2*um])
assert_raises(TypeError, lambda: morpho.indices[3:5:2])
# incorrect type
assert_raises(TypeError, lambda: morpho.indices[object()])
if __name__ == '__main__':
test_basicshapes()
test_subgroup()
|
[
"numpy.testing.utils.assert_equal",
"numpy.testing.utils.assert_allclose",
"numpy.testing.utils.assert_raises",
"nose.plugins.attrib.attr"
] |
[((207, 234), 'nose.plugins.attrib.attr', 'attr', (['"""codegen-independent"""'], {}), "('codegen-independent')\n", (211, 234), False, 'from nose.plugins.attrib import attr\n'), ((767, 794), 'nose.plugins.attrib.attr', 'attr', (['"""codegen-independent"""'], {}), "('codegen-independent')\n", (771, 794), False, 'from nose.plugins.attrib import attr\n'), ((719, 767), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL.distance[-1]', '(15 * um)'], {}), '(morpho.LL.distance[-1], 15 * um)\n', (734, 767), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1076, 1121), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.L[2].distance', '(3 * um)'], {}), '(morpho.L[2].distance, 3 * um)\n', (1091, 1121), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1170, 1222), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[0 * um].distance', '(11 * um)'], {}), '(morpho.LL[0 * um].distance, 11 * um)\n', (1185, 1222), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1222, 1274), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[1 * um].distance', '(11 * um)'], {}), '(morpho.LL[1 * um].distance, 11 * um)\n', (1237, 1274), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1274, 1328), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[1.5 * um].distance', '(12 * um)'], {}), '(morpho.LL[1.5 * um].distance, 12 * um)\n', (1289, 1328), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1328, 1380), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.LL[5 * um].distance', '(15 * um)'], {}), '(morpho.LL[5 * um].distance, 15 * um)\n', (1343, 1380), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1404, 1471), 'numpy.testing.utils.assert_allclose', 'assert_allclose', (['morpho.L[3 * um:5.1 * um].distance', '([3, 4, 5] * um)'], {}), '(morpho.L[3 * um:5.1 * um].distance, [3, 4, 5] * um)\n', (1419, 1471), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1517, 1576), 'numpy.testing.utils.assert_raises', 'assert_raises', (['AttributeError', '(lambda : morpho.L.indices[:])'], {}), '(AttributeError, lambda : morpho.L.indices[:])\n', (1530, 1576), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1705, 1761), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.LL.indices[:]', '[11, 12, 13, 14, 15]'], {}), '(morpho.LL.indices[:], [11, 12, 13, 14, 15])\n', (1717, 1761), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1766, 1824), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3 * um:5.1 * um]', '[3, 4, 5]'], {}), '(morpho.L.indices[3 * um:5.1 * um], [3, 4, 5])\n', (1778, 1824), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1825, 1915), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3 * um:5.1 * um]', 'morpho.L[3 * um:5.1 * um].indices[:]'], {}), '(morpho.L.indices[3 * um:5.1 * um], morpho.L[3 * um:5.1 * um].\n indices[:])\n', (1837, 1915), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1924, 1982), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[:5.1 * um]', '[1, 2, 3, 4, 5]'], {}), '(morpho.L.indices[:5.1 * um], [1, 2, 3, 4, 5])\n', (1936, 1982), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((1985, 2051), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3 * um:]', '[3, 4, 5, 6, 7, 8, 9, 10]'], {}), '(morpho.L.indices[3 * um:], [3, 4, 5, 6, 7, 8, 9, 10])\n', (1997, 2051), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2054, 2097), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3.5 * um]', '(4)'], {}), '(morpho.L.indices[3.5 * um], 4)\n', (2066, 2097), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2100, 2136), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3]', '(4)'], {}), '(morpho.L.indices[3], 4)\n', (2112, 2136), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2141, 2179), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[-1]', '(10)'], {}), '(morpho.L.indices[-1], 10)\n', (2153, 2179), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2184, 2227), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3:5]', '[4, 5]'], {}), '(morpho.L.indices[3:5], [4, 5])\n', (2196, 2227), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2232, 2290), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[3:]', '[4, 5, 6, 7, 8, 9, 10]'], {}), '(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10])\n', (2244, 2290), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2295, 2346), 'numpy.testing.utils.assert_equal', 'assert_equal', (['morpho.L.indices[:5]', '[1, 2, 3, 4, 5]'], {}), '(morpho.L.indices[:5], [1, 2, 3, 4, 5])\n', (2307, 2346), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2438, 2489), 'numpy.testing.utils.assert_raises', 'assert_raises', (['AttributeError', '(lambda : morpho.axon)'], {}), '(AttributeError, lambda : morpho.axon)\n', (2451, 2489), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2554, 2626), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3 * second:5 * second])'], {}), '(TypeError, lambda : morpho.indices[3 * second:5 * second])\n', (2567, 2626), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2626, 2684), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3.4:5.3])'], {}), '(TypeError, lambda : morpho.indices[3.4:5.3])\n', (2639, 2684), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2688, 2747), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3:5 * um])'], {}), '(TypeError, lambda : morpho.indices[3:5 * um])\n', (2701, 2747), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2749, 2808), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3 * um:5])'], {}), '(TypeError, lambda : morpho.indices[3 * um:5])\n', (2762, 2808), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2835, 2906), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3 * um:5 * um:2 * um])'], {}), '(TypeError, lambda : morpho.indices[3 * um:5 * um:2 * um])\n', (2848, 2906), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n'), ((2904, 2960), 'numpy.testing.utils.assert_raises', 'assert_raises', (['TypeError', '(lambda : morpho.indices[3:5:2])'], {}), '(TypeError, lambda : morpho.indices[3:5:2])\n', (2917, 2960), False, 'from numpy.testing.utils import assert_equal, assert_allclose, assert_raises\n')]
|
import os
import cv2
from matplotlib.pyplot import gray
import numpy as np
people = ['<NAME>', '<NAME>', '<NAME>', 'Madonna', '<NAME>']
DIR = r'/home/senai/tiago-projects/opencv-course/Resources/Faces/train'
haar_cascade = cv2.CascadeClassifier('/home/senai/tiago-projects/opencv-course/face_detection/haar_face.xml')
features = []
labels = []
def create_train():
for person in people:
path = os.path.join(DIR, person)
label = people.index(person)
for img in os.listdir(path):
img_path = os.path.join(path, img)
img_array = cv2.imread(img_path)
gray = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=4)
for(x,y,w,h) in faces_rect:
faces_roi = gray[y:y+h, x:x+w]
features.append(faces_roi)
labels.append(label)
create_train()
features = np.array(features)
labels = np.array(labels)
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
# Train the recognizer on the features list and the labels list
face_recognizer.train(features, labels)
face_recognizer.save('face_trained.yml')
np.save('features.npy', features)
np.save('labels.npy', labels)
|
[
"numpy.save",
"cv2.face.LBPHFaceRecognizer_create",
"cv2.cvtColor",
"cv2.imread",
"numpy.array",
"cv2.CascadeClassifier",
"os.path.join",
"os.listdir"
] |
[((224, 323), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""/home/senai/tiago-projects/opencv-course/face_detection/haar_face.xml"""'], {}), "(\n '/home/senai/tiago-projects/opencv-course/face_detection/haar_face.xml')\n", (245, 323), False, 'import cv2\n'), ((956, 974), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (964, 974), True, 'import numpy as np\n'), ((985, 1001), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (993, 1001), True, 'import numpy as np\n'), ((1021, 1057), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (1055, 1057), False, 'import cv2\n'), ((1205, 1238), 'numpy.save', 'np.save', (['"""features.npy"""', 'features'], {}), "('features.npy', features)\n", (1212, 1238), True, 'import numpy as np\n'), ((1239, 1268), 'numpy.save', 'np.save', (['"""labels.npy"""', 'labels'], {}), "('labels.npy', labels)\n", (1246, 1268), True, 'import numpy as np\n'), ((408, 433), 'os.path.join', 'os.path.join', (['DIR', 'person'], {}), '(DIR, person)\n', (420, 433), False, 'import os\n'), ((491, 507), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (501, 507), False, 'import os\n'), ((532, 555), 'os.path.join', 'os.path.join', (['path', 'img'], {}), '(path, img)\n', (544, 555), False, 'import os\n'), ((581, 601), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (591, 601), False, 'import cv2\n'), ((621, 664), 'cv2.cvtColor', 'cv2.cvtColor', (['img_array', 'cv2.COLOR_BGR2GRAY'], {}), '(img_array, cv2.COLOR_BGR2GRAY)\n', (633, 664), False, 'import cv2\n')]
|
'''
Specialized scientific functions for biogeophysical variables and L4C model
processes.
'''
import numpy as np
from functools import partial
from scipy.ndimage import generic_filter
from scipy.linalg import solve_banded
from scipy.sparse import dia_matrix
from pyl4c import suppress_warnings
from pyl4c.data.fixtures import HDF_PATHS, BPLUT
from pyl4c.utils import get_pft_array, subset
from pyl4c.stats import ols, ols_variance, linear_constraint
def arrhenius(
tsoil, beta0: float, beta1: float = 66.02, beta2: float = 227.13):
r'''
The Arrhenius equation for response of enzymes to (soil) temperature,
constrained to lie on the closed interval [0, 1].
$$
f(T_{SOIL}) = \mathrm{exp}\left[\beta_0\left( \frac{1}{\beta_1} -
\frac{1}{T_{SOIL} - \beta_2} \right) \right]
$$
Parameters
----------
tsoil : numpy.ndarray
Array of soil temperature in degrees K
beta0 : float
Coefficient for soil temperature (deg K)
beta1 : float
Coefficient for ... (deg K)
beta2 : float
Coefficient for ... (deg K)
Returns
-------
numpy.ndarray
Array of soil temperatures mapped through the Arrhenius function
'''
a = (1.0 / beta1)
b = np.divide(1.0, np.subtract(tsoil, beta2))
# This is the simple answer, but it takes on values >1
y0 = np.exp(np.multiply(beta0, np.subtract(a, b)))
# Constrain the output to the interval [0, 1]
return np.where(y0 > 1, 1, np.where(y0 < 0, 0, y0))
def bias_correction_parameters(
series, npoly: int = 1, cutoff: float = 1, var_cutoff: float = None,
add_intercept: bool = True):
'''
Calculate the bias correction parameters for two overlapping time series,
nominally the Nature Run and L4C Operational products, of a given
variable using quantile mapping. For example, can correct the bias in
Nature Run (2000-2017) against the L4C Ops record (2015-Present) by
fitting bias correction parameters for the overlap period 2015-2017.
Model can be specified:
y = alpha + X beta_0 + X^2 beta_1 + ...
NOTE: Because Nature Run and L4C Ops compare very well in some locations,
a degree-1 polynomial (straight line) is fit first (regardless of npoly);
if this solution produces corrections that are <1 gC m^-2, the degree-1
solution is used. In some areas, there is a strong linear correspondence
between most measurements but a small number have a super-linear
relationship that is poorly fit by a degree-2 polynomial; in these cases
(where model variance of the degree-2 fit is > var_cutoff), the degree-1
solution is used. Forcing the line of best fit through the origin (with
intercept=False) is also not recommended.
Parameters
----------
series : numpy.ndarray
A (t x 2) NumPy array where t rows correspond to t time steps and
each column is a product; the first column is the reference product
or dependent variable in the linear bias correction.
npoly : int
Degree of the polynomial to use in bias correction (Default: 1)
cutoff : float
Cutoff for the degree-1 bias correction, in data units (e.g.,
1 g C m-2 day-1); defaults to 1.0, i.e., the residual after correction
must be greater than 1 g C m-2 day-1, which is the average impact of
L4SM versus model-only observations. If this cutoff is exceeded, the
degree-1 solution is returned.
var_cutoff : float or None
Cutoff in variance for higher-order solutions; if the residual model
variance exceeds this threshold for the degree-N solution, then return
the degree (N-1) solution (Default: None)
add_intercept : bool
True to add a the y-intercept term (Default: True)
Returns
-------
numpy.ndarray
A vector of length N + 1 where N is the degree of the polynomial
fit requested
'''
def xmat(x, npoly):
# Creates the design/ model matrix for a polynomial series
# Add a column for each power of the requested polynomial series
x = np.repeat(x.reshape((t, 1)), npoly, axis = 1)
for i in range(1, npoly):
# Calculate X^n for n up to N powers
x[:,i] = np.power(x[:,0], npoly + 1)
return x
def fit(x, y, npoly):
# Fits the model using OLS
# If all of the Y values are NaN
if np.all(np.isnan(y)): return np.ones((npoly + 1,)) * np.nan
try:
return ols(xmat(x, npoly), y, add_intercept)
except np.linalg.linalg.LinAlgError:
return np.ones((npoly + 1,)) * np.nan
# Sort the input series from low -> high
t = series.shape[0]
y = np.sort(series[:,0])
x = np.sort(series[:,1])
# For some pixels, the time series has zero variance, and this can produce
# unstable OLS estimates (e.g., zero slope)
if np.var(y) == 0 or np.var(x) == 0:
# Return coefficients: (0, 1, 0, ..., 0)
return np.hstack(((0, 1), list(0 for i in range(1, npoly))))
if np.var(y) == 0 and np.var(x) == 0:
# Intercept (mean) is the only necessary predictor
return np.hstack(((1, 0), list(0 for i in range(1, npoly))))
fit1 = np.hstack(
(fit(x, y, npoly = 1), list(0 for i in range(1, npoly))))
if npoly == 1:
return fit1
# First, try a degree-1 polynomial (straight-line) fit; if the bias
# correction slope is such that the correction is < 1 gC/m^-2,
# which is similar to the average impact of L4SM vs. model-only
# observations, then use the degree-1 fit parameters
if x.mean() - (fit1[1] * x.mean()) < cutoff:
return fit1
# Second, starting with the simpler model, check if progressively more
# complicated models (up to a maximum of npoly) really do fit the data
# better; if not, or if the model variance is above a cutoff, use the
# next most-complicated model (last_model)
last_model = fit1 # Starting with the simplest model...
for p in range(2, npoly + 1):
model = fit(x, y, npoly = p)
# Calculates unbiased estimate of model variance
model_var = ols_variance(xmat(x, p), y, model, add_intercept)
# Without a cutoff for guidance, if the model variance of the degree-1
# fit is lower than that of the degree-2 fit...
if var_cutoff is None:
if model_var > ols_variance(
xmat(x, 1), y, last_model[0:p], add_intercept):
return last_model
else:
if model_var > var_cutoff:
return last_model
last_model = model
# Unless a simpler model was better, return coefficients for the requested
# polynomial degree
return model
def climatology365(series, dates):
'''
Computes a 365-day climatology for different locations from a time series
of length T. Ignores leap days. The climatology could then be indexed
using ordinals generated by `ordinals365()`.
Parameters
----------
series : numpy.ndarray
T x ... array of data
dates : list or tuple
Sequence of datetime.datetime or datetime.date instances
Returns
-------
numpy.ndarray
'''
@suppress_warnings
def calc_climatology(x):
return np.array([
np.nanmean(x[ordinal == day,...], axis = 0)
for day in range(1, 366)
])
# Get first and last day of the year (DOY)
ordinal = np.array([
# Finally, subtract 1 from each day in a leap year after Leap Day
(doy - 1) if ((dates[i].year % 4 == 0) and doy >= 60) else doy
for i, doy in enumerate([
# Next, fill in 0 wherever Leap Day occurs
0 if (dates[i].year % 4 == 0 and doy == 60) else doy
for i, doy in enumerate([
# First, convert datetime.datetime to ordinal day-of-year (DOY)
int(dt.strftime('%j')) for dt in dates
])
])
])
return calc_climatology(series)
def daynight_partition(arr_24hr, updown, reducer = 'mean'):
'''
Partitions a 24-hour time series array into daytime and nighttime values,
then calculates the mean in each group. Daytime is defined as when the sun
is above the horizon; nighttime is the complement.
Parameters
----------
arr_24hr : numpy.ndarray
A size (24 x ...) array; the first axis must have 24 elements
corresponding to the measurement in each hour
updown: numpy.ndarray
A size (2 x ...) array, compatible with arr_24hr, where the first axis
has the hour of sunrise and sunset, in that order, for each element
reducer : str
One of "mean" or "sum" indicating whether an average or a total of the
daytime/ nighttime values should be calculated; e.g., for "mean", the
hourly values from daytime hours are added up and divided by the
length of the day (in hours).
Returns
-------
numpy.ndarray
A size (2 x ...) array where the first axis enumerates the daytime and
nighttime mean values, respectively
'''
assert reducer in ('mean', 'sum'),\
'Argument "reducer" must be one of: "mean", "sum"'
# Prepare single-valued output array
arr_daytime = np.zeros(arr_24hr.shape[1:])
arr_nighttime = arr_daytime.copy()
daylight_hrs = arr_daytime.copy().astype(np.int16)
# Do sunrise and sunset define an interval? (Sunset > Sunrise)?
inside_interval = np.apply_along_axis(lambda x: x[1] > x[0], 0, updown)
# Or is the sun never up?
never_up = np.logical_and(updown[0,...] == -1, updown[1,...] == -1)
# Iteratively sum daytime VPD and temperature values
for hr in range(0, 24):
# Given only hour of sunrise/set on a 24-hour clock...
# if sun rises and sets on same day: SUNRISE <= HOUR <= SUNSET;
# if sun sets on next day: either SUNRISE <= HOUR or HOUR <= SUNSET;
sun_is_up = np.logical_or( # Either...
np.logical_and(inside_interval, # ...Rises and sets same day
np.logical_and(updown[0,...] <= hr, hr <= updown[1,...])),
np.logical_and(~inside_interval, # ...Sets on next day
np.logical_or(updown[0,...] <= hr, hr <= updown[1,...])))
# For simplicity, compute a 24-hour mean even if the sun never rises;
# there's no way to know what the "correct" daytime value is
mask = np.logical_or(never_up, sun_is_up)
np.add(np.where(
mask, arr_24hr[hr,...], 0), arr_daytime, out = arr_daytime)
np.add(np.where(
~mask, arr_24hr[hr,...], 0), arr_nighttime, out = arr_nighttime)
# Keep track of the denominator (hours) for calculating the mean;
# note that this over-estimates actual daylight hours by 1 hour
# but results in the correct denominator for the sums above
np.add(np.where(mask, 1, 0), daylight_hrs, out = daylight_hrs)
arr_24hr = None
# Calculate mean quantities
if reducer == 'mean':
arr_daytime = np.divide(arr_daytime, daylight_hrs)
arr_nighttime = np.divide(arr_nighttime, 24 - daylight_hrs)
# For sites where the sun is always above/ below the horizon, set missing
# nighttime values to zero
arr_nighttime[~np.isfinite(arr_nighttime)] = 0
return np.stack((arr_daytime, arr_nighttime))
def e_mult(params, tmin, vpd, smrz, ft):
'''
Calculate environmental constraint multiplier for gross primary
productivity (GPP), E_mult, based on current model parameters. The
expected parameter names are "LUE" for the maximum light-use
efficiency; "smrz0" and "smrz1" for the lower and upper bounds on root-
zone soil moisture; "vpd0" and "vpd1" for the lower and upper bounds on
vapor pressure deficity (VPD); "tmin0" and "tmin1" for the lower and
upper bounds on minimum temperature; and "ft0" for the multiplier during
frozen ground conditions.
Parameters
----------
params : dict
A dict-like data structure with named model parameters
tmin : numpy.ndarray
(T x N) vector of minimum air temperature (deg K), where T is the
number of time steps, N the number of sites
vpd : numpy.ndarray
(T x N) vector of vapor pressure deficit (Pa), where T is the number
of time steps, N the number of sites
smrz : numpy.ndarray
(T x N) vector of root-zone soil moisture wetness (%), where T is the
number of time steps, N the number of sites
ft : numpy.ndarray
(T x N) vector of the (binary) freeze-thaw status, where T is the
number of time steps, N the number of sites (Frozen = 0, Thawed = 1)
Returns
-------
numpy.ndarray
'''
# Calculate E_mult based on current parameters
f_tmin = linear_constraint(params['tmin0'], params['tmin1'])
f_vpd = linear_constraint(params['vpd0'], params['vpd1'], 'reversed')
f_smrz = linear_constraint(params['smrz0'], params['smrz1'])
f_ft = linear_constraint(params['ft0'], 1.0, 'binary')
return f_tmin(tmin) * f_vpd(vpd) * f_smrz(smrz) * f_ft(ft)
def k_mult(params, tsoil, smsf):
'''
Calculate environmental constraint multiplier for soil heterotrophic
respiration (RH), K_mult, based on current model parameters. The expected
parameter names are "tsoil" for the Arrhenius function of soil temperature
and "smsf0" and "smsf1" for the lower and upper bounds of the ramp
function on surface soil moisture.
Parameters
----------
params : dict
A dict-like data structure with named model parameters
tsoil : numpy.ndarray
(T x N) vector of soil temperature (deg K), where T is the number of
time steps, N the number of sites
smsf : numpy.ndarray
(T x N) vector of surface soil wetness (%), where T is the number of
time steps, N the number of sites
Returns
-------
numpy.ndarray
'''
f_tsoil = partial(arrhenius, beta0 = params['tsoil'])
f_smsf = linear_constraint(params['smsf0'], params['smsf1'])
return f_tsoil(tsoil) * f_smsf(smsf)
def litterfall_casa(lai, years, dt = 1/365):
'''
Calculates daily litterfall fraction after the CASA model (Randerson et
al. 1996). Computes the fraction of evergreen versus deciduous canopy and
allocates a constant daily fraction (out of the year) for evergreen canopy
but a varying daily fraction for deciduous, where the fraction varies with
"leaf loss," a function of leaf area index (LAI). Canopies are assumed to
be a mix of evergreen and deciduous, so the litterfall fraction is a sum
of these two approaches.
<NAME>., <NAME>, <NAME>., <NAME>., &
<NAME>. (1996). Substrate limitations for heterotrophs: Implications
for models that estimate the seasonal cycle of atmospheric CO2.
*Global Biogeochemical Cycles,* 10(4), 585–602.
The approach here is a bit different from Randerson et al. (1996) because
we re- calculate the evergreen fraction each year; however, this is a
reasonable elaboration that, incidentally, accounts for potential changes
in the evergreen-vs-deciduous mix of the canopy. The result is an array
of daily litterfall fractions, i.e., the result multiplied by the annual
NPP sum (for a given site and year) obtains the daily litterfall.
Parameters
----------
lai : numpy.ndarray
The (T x N) leaf-area index (LAI) array, for T time steps and N sites
years : numpy.ndarray
A length-T 1D array indexing the years, e.g., [2001, 2001, 2001, ...];
used to identify which of T time steps belong to a year, so that
litterfall fractions sum to one over a year
dt : float
The fraction of a year that each time step represents, e.g., for daily
time steps, should be close to 1/365 (Default: 1/365)
Returns
-------
numpy.ndarray
The fraction of available inputs (e.g., annual NPP) that should be
allocated to litterfall at each time step
'''
def leaf_loss(lai):
# Leaf loss function from CASA, a triangular averaging function
# centered on the current date, where the right limb of the
# triangle is subtracted from the left limb (leading minus
# lagged LAI is equated to leaf loss)
ll = generic_filter(
lai, lambda x: (0.5 * x[0] + x[1]) - (x[3] + 0.5 * x[4]),
size = 5, mode = 'mirror')
return np.where(ll < 0, 0, ll) # Leaf loss cannot be < 0
# Get leaf loss at each site (column-wise)
ll = np.apply_along_axis(leaf_loss, 0, lai)
ll = np.where(np.isnan(ll), 0, ll) # Fill NaNs with zero leaf loss
unique_years = np.unique(years).tolist()
unique_years.sort()
for each_year in unique_years:
# For those dates in this year...
idx = years == each_year
# Calculate the evergreen fraction (ratio of min LAI to mean LAI over
# the course of a year)
efrac = np.apply_along_axis(
lambda x: np.nanmin(x) / np.nanmean(x), 0, lai[idx,:])
# Calculate sum of 1/AnnualNPP (Evergreen input) plus daily leaf loss
# fraction (Deciduous input); Evergreen canopies have constant daily
# inputs
ll[idx,:] = (efrac * dt) + (1 - efrac) * np.divide(
ll[idx,:], ll[idx,:].sum(axis = 0))
return ll
def mean_residence_time(
hdf, units = 'years', subset_id = None, nodata = -9999):
'''
Calculates the mean residence time (MRT) of soil organic carbon (SOC)
pools as the quotient of SOC stock size and heterotrophic respiration
(RH). Chen et al. (2013, Global and Planetary Change), provide a formal
equation for mean residence time: (SOC/R_H).
Parameters
----------
hdf : h5py.File
The HDF5 file / h5py.File object
units : str
Either "years" (default) or "days"
subset_id : str
(Optional) Can provide keyword designating the desired subset area
nodata : float
(Optional) The NoData or Fill value (Default: -9999)
Returns
-------
tuple
Tuple of: subset array, xoff, yoff, i.e., (numpy.ndarray, Int, Int)
'''
assert units in ('days', 'years'), 'The units argument must be one of: "days" or "years"'
soc_field = HDF_PATHS['SPL4CMDL']['4']['SOC']
rh_field = HDF_PATHS['SPL4CMDL']['4']['RH']
if subset_id is not None:
# Get X- and Y-offsets while we're at it
soc, xoff, yoff = subset(
hdf, soc_path, None, None, subset_id = subset_id)
rh, _, _ = subset(
hdf, rh_path, None, None, subset_id = subset_id)
else:
xoff = yoff = 0
soc = hdf[soc_path][:]
rh = hdf[rh_path][:]
# Find those areas of NoData in either array
mask = np.logical_or(soc == nodata, rh == nodata)
mrt = np.divide(soc, rh)
if units == 'years':
# NOTE: No need to guard against NaNs/ NoData here because of mask
mrt = np.divide(mrt, 365.0)
np.place(mrt, mask, nodata) # Put NoData values back in
return (mrt, xoff, yoff)
def npp(
hdf, use_subgrid = False, subset_id = None, subset_bbox = None,
nodata = -9999):
'''
Calculates net primary productivity (NPP), based on the carbon use
efficiency (CUE) of each plant functional type (PFT). NPP is derived
as: `NPP = GPP * CUE`, where `CUE = NPP/GPP`.
Parameters
----------
hdf : h5py.File
The HDF5 file / h5py.File object
use_subgrid : bool
True to use the 1-km subgrid; requires iterating through the PFT means
subset_id : str
(Optional) Can provide keyword designating the desired subset area
subset_bbox : list or tuple
(Optional) Can provide a bounding box to define a desired subset area
nodata : float
The NoData value to mask (Default: -9999)
Returns
-------
numpy.ndarray
NPP values on an EASE-Grid 2.0 array
'''
grid = 'M01' if use_subgrid else 'M09'
cue_array = cue(get_pft_array(grid, subset_id, subset_bbox))
if not use_subgrid:
if subset_id is not None or subset_bbox is not None:
gpp, _, _ = subset(
hdf, 'GPP/gpp_mean', subset_id = subset_id,
subset_bbox = subset_bbox)
else:
gpp = hdf['GPP/gpp_mean'][:]
else:
raise NotImplementedError('No support for the 1-km subgrid')
gpp[gpp == nodata] = np.nan
return np.multiply(gpp, cue_array)
def ordinals365(dates):
'''
Returns a length-T sequence of ordinals on [1,365]. Can be used for
indexing a 365-day climatology; see `climatology365()`.
Parameters
----------
dates : list or tuple
Sequence of datetime.datetime or datetime.date instances
Returns
-------
list
'''
return [
t - 1 if (year % 4 == 0 and t >= 60) else t
for t, year in [(int(t.strftime('%j')), t.year) for t in dates]
]
def rescale_smrz(smrz0, smrz_min, smrz_max = 100):
'''
Rescales root-zone soil-moisture (SMRZ); original SMRZ is in percent
saturation units. NOTE: Although Jones et al. (2017) write "SMRZ_wp is
the plant wilting point moisture level determined by ancillary soil
texture data provided by L4SM..." in actuality it is just `smrz_min`.
Parameters
----------
smrz0 : numpy.ndarray
(T x N) array of original SMRZ data, in percent (%) saturation units
for N sites and T time steps
smrz_min : numpy.ndarray or float
Site-level long-term minimum SMRZ (percent saturation)
smrz_max : numpy.ndarray or float
Site-level long-term maximum SMRZ (percent saturation); can optionally
provide a fixed upper-limit on SMRZ; useful for calculating SMRZ100.
Returns
-------
numpy.ndarray
'''
if smrz_min.ndim == 1:
smrz_min = smrz_min[np.newaxis,:]
assert smrz0.ndim == 2,\
'Expected smrz0 to be a 2D array'
assert smrz0.shape[1] == smrz_min.shape[1],\
'smrz_min should have one value per site'
# Clip input SMRZ to the lower, upper bounds
smrz0 = np.where(smrz0 < smrz_min, smrz_min, smrz0)
smrz0 = np.where(smrz0 > smrz_max, smrz_max, smrz0)
smrz_norm = np.add(np.multiply(100, np.divide(
np.subtract(smrz0, smrz_min),
np.subtract(smrz_max, smrz_min))), 1)
# Log-transform normalized data and rescale to range between
# 5.0 and 100 ()% saturation)
return np.add(
np.multiply(95, np.divide(np.log(smrz_norm), np.log(101))), 5)
def soc_analytical_spinup(litterfall, k_mult, fmet, fstr, decay_rates):
r'''
Using the solution to the differential equations governing change in the
soil organic carbon (SOC) pools, calculates the steady-state size of each
SOC pool.
The analytical steady-state value for the metabolic ("fast") pool is:
$$
C_{met} = \frac{f_{met} \sum NPP}{R_{opt} \sum K_{mult}}
$$
The analytical steady-state value for the structural ("medium") pool is:
$$
C_{str} = \frac{(1 - f_{met})\sum NPP}{R_{opt}\, k_{str} \sum K_{mult}}
$$
The analytical steady-state value for the recalcitrant ("slow") pool is:
$$
C_{rec} = \frac{f_{str}\, k_{str}\, C_{str}}{k_{rec}}
$$
Parameters
----------
litterfall : numpy.ndarray
Average daily litterfall
k_mult : numpy.ndarray
The K_mult climatology, i.e., a (365 x N x 81) array of the long-term
average K_mult value at each of N sites (with 81 1-km subgrid sites)
fmet : numpy.ndarray
The f_metabolic model parameter, as an (N x 81) array
fstr : numpy.ndarray
The f_structural model parameter, as an (N x 81) array
decay_rates : numpy.ndarray
The optimal decay rates for each SOC pool, as a (3 x N x 81) array
Returns
-------
tuple
A 3-element tuple, each element the steady-state values for that pool,
i.e., `(metabolic, structural, recalcitrant)`
'''
# NOTE: litterfall is average daily litterfall (see upstream where we
# divided by 365), so, to obtain annual sum, multiply by 365
c0 = np.divide(
fmet * (litterfall * 365),
decay_rates[0,...] * np.sum(k_mult, axis = 0))
c1 = np.divide(
(1 - fmet) * (litterfall * 365),
decay_rates[1,...] * np.sum(k_mult, axis = 0))
c2 = np.divide(
fstr * decay_rates[1,...] * c1,
decay_rates[2,...])
c0[np.isnan(c0)] = 0
c1[np.isnan(c1)] = 0
c2[np.isnan(c2)] = 0
return (c0, c1, c2)
def tridiag_solver(tri, r, kl = 1, ku = 1, banded = None):
'''
Solution to the tridiagonal equation by solving the system of equations
in sparse form. Creates a banded matrix consisting of the diagonals,
starting with the lowest diagonal and moving up, e.g., for matrix:
A = [[10., 2., 0., 0.],
[ 3., 10., 4., 0.],
[ 0., 1., 7., 5.],
[ 0., 0., 3., 4.]]
banded = [[ 3., 1., 3., 0.],
[10., 10., 7., 4.],
[ 0., 2., 4., 5.]]
The banded matrix is what should be provided to the optoinal "banded"
argument, which should be used if the banded matrix can be created faster
than `scipy.sparse.dia_matrix()`.
Parameters
----------
tri : numpy.ndarray
A tridiagonal matrix (N x N)
r : numpy.ndarray
Vector of solutions to the system, Ax = r, where A is the tridiagonal
matrix
kl : int
Lower bandwidth (number of lower diagonals) (Default: 1)
ku : int
Upper bandwidth (number of upper diagonals) (Default: 1)
banded : numpy.ndarray
(Optional) Provide the banded matrix with diagonals along the rows;
this can be faster than scipy.sparse.dia_matrix()
Returns
-------
numpy.ndarray
'''
assert tri.ndim == 2 and (tri.shape[0] == tri.shape[1]),\
'Only supports 2-dimensional square matrices'
if banded is None:
banded = dia_matrix(tri).data
# If it is necessary, in a future implementation, to extract diagonals;
# this is a starting point for problems where kl = ku = 1
# n = tri.shape[0]
# a, b, c = [ # (n-1, n, n-1) refer to the lengths of each vector
# sparse[(i+1),(max(0,i)):j]
# for i, j in zip(range(-1, 2), (n-1, n, n+1))
# ]
return solve_banded((kl, ku), np.flipud(banded), r)
def vpd(qv2m, ps, temp_k):
r'''
Calculates vapor pressure deficit (VPD); unfortunately, the provenance
of this formula cannot be properly attributed. It is taken from the
SMAP L4C Science code base, so it is exactly how L4C calculates VPD.
$$
\mathrm{VPD} = 610.7 \times \mathrm{exp}\left(
\frac{17.38 \times T_C}{239 + T_C}
\right) - \frac{(P \times [\mathrm{QV2M}]}{0.622 + (0.378 \times [\mathrm{QV2M}])}
$$
Where P is the surface pressure (Pa), QV2M is the water vapor mixing
ratio at 2-meter height, and T is the temperature in degrees C (though
this function requires units of Kelvin when called).
NOTE: A variation on this formula can be found in the text:
<NAME>. and <NAME>. 1990.
Principles of Environmental Physics, 2nd. Ed. Edward Arnold Publisher.
See also:
https://glossary.ametsoc.org/wiki/Mixing_ratio
Parameters
----------
qv2m : numpy.ndarray or float
QV2M, the water vapor mixing ratio at 2-m height
ps : numpy.ndarray or float
The surface pressure, in Pascals
temp_k : numpy.ndarray or float
The temperature at 2-m height in degrees Kelvin
Returns
-------
numpy.ndarray or float
VPD in Pascals
'''
temp_c = temp_k - 273.15 # Convert temperature to degrees C
avp = np.divide(np.multiply(qv2m, ps), 0.622 + (0.378 * qv2m))
x = np.divide(17.38 * temp_c, (239 + temp_c))
esat = 610.7 * np.exp(x)
return np.subtract(esat, avp)
|
[
"numpy.sum",
"numpy.ones",
"numpy.isnan",
"numpy.exp",
"numpy.unique",
"numpy.nanmean",
"numpy.multiply",
"pyl4c.stats.linear_constraint",
"numpy.power",
"numpy.isfinite",
"numpy.place",
"numpy.apply_along_axis",
"pyl4c.utils.get_pft_array",
"scipy.sparse.dia_matrix",
"numpy.var",
"numpy.stack",
"functools.partial",
"numpy.divide",
"numpy.flipud",
"numpy.sort",
"scipy.ndimage.generic_filter",
"pyl4c.utils.subset",
"numpy.subtract",
"numpy.logical_and",
"numpy.log",
"numpy.zeros",
"numpy.nanmin",
"numpy.where",
"numpy.logical_or"
] |
[((4740, 4761), 'numpy.sort', 'np.sort', (['series[:, 0]'], {}), '(series[:, 0])\n', (4747, 4761), True, 'import numpy as np\n'), ((4769, 4790), 'numpy.sort', 'np.sort', (['series[:, 1]'], {}), '(series[:, 1])\n', (4776, 4790), True, 'import numpy as np\n'), ((9339, 9367), 'numpy.zeros', 'np.zeros', (['arr_24hr.shape[1:]'], {}), '(arr_24hr.shape[1:])\n', (9347, 9367), True, 'import numpy as np\n'), ((9552, 9605), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: x[1] > x[0])', '(0)', 'updown'], {}), '(lambda x: x[1] > x[0], 0, updown)\n', (9571, 9605), True, 'import numpy as np\n'), ((9651, 9709), 'numpy.logical_and', 'np.logical_and', (['(updown[0, ...] == -1)', '(updown[1, ...] == -1)'], {}), '(updown[0, ...] == -1, updown[1, ...] == -1)\n', (9665, 9709), True, 'import numpy as np\n'), ((11422, 11460), 'numpy.stack', 'np.stack', (['(arr_daytime, arr_nighttime)'], {}), '((arr_daytime, arr_nighttime))\n', (11430, 11460), True, 'import numpy as np\n'), ((12901, 12952), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['tmin0']", "params['tmin1']"], {}), "(params['tmin0'], params['tmin1'])\n", (12918, 12952), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((12966, 13027), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['vpd0']", "params['vpd1']", '"""reversed"""'], {}), "(params['vpd0'], params['vpd1'], 'reversed')\n", (12983, 13027), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((13041, 13092), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['smrz0']", "params['smrz1']"], {}), "(params['smrz0'], params['smrz1'])\n", (13058, 13092), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((13106, 13153), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['ft0']", '(1.0)', '"""binary"""'], {}), "(params['ft0'], 1.0, 'binary')\n", (13123, 13153), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((14066, 14107), 'functools.partial', 'partial', (['arrhenius'], {'beta0': "params['tsoil']"}), "(arrhenius, beta0=params['tsoil'])\n", (14073, 14107), False, 'from functools import partial\n'), ((14124, 14175), 'pyl4c.stats.linear_constraint', 'linear_constraint', (["params['smsf0']", "params['smsf1']"], {}), "(params['smsf0'], params['smsf1'])\n", (14141, 14175), False, 'from pyl4c.stats import ols, ols_variance, linear_constraint\n'), ((16701, 16739), 'numpy.apply_along_axis', 'np.apply_along_axis', (['leaf_loss', '(0)', 'lai'], {}), '(leaf_loss, 0, lai)\n', (16720, 16739), True, 'import numpy as np\n'), ((18932, 18974), 'numpy.logical_or', 'np.logical_or', (['(soc == nodata)', '(rh == nodata)'], {}), '(soc == nodata, rh == nodata)\n', (18945, 18974), True, 'import numpy as np\n'), ((18985, 19003), 'numpy.divide', 'np.divide', (['soc', 'rh'], {}), '(soc, rh)\n', (18994, 19003), True, 'import numpy as np\n'), ((19144, 19171), 'numpy.place', 'np.place', (['mrt', 'mask', 'nodata'], {}), '(mrt, mask, nodata)\n', (19152, 19171), True, 'import numpy as np\n'), ((20608, 20635), 'numpy.multiply', 'np.multiply', (['gpp', 'cue_array'], {}), '(gpp, cue_array)\n', (20619, 20635), True, 'import numpy as np\n'), ((22281, 22324), 'numpy.where', 'np.where', (['(smrz0 < smrz_min)', 'smrz_min', 'smrz0'], {}), '(smrz0 < smrz_min, smrz_min, smrz0)\n', (22289, 22324), True, 'import numpy as np\n'), ((22337, 22380), 'numpy.where', 'np.where', (['(smrz0 > smrz_max)', 'smrz_max', 'smrz0'], {}), '(smrz0 > smrz_max, smrz_max, smrz0)\n', (22345, 22380), True, 'import numpy as np\n'), ((24538, 24601), 'numpy.divide', 'np.divide', (['(fstr * decay_rates[1, ...] * c1)', 'decay_rates[2, ...]'], {}), '(fstr * decay_rates[1, ...] * c1, decay_rates[2, ...])\n', (24547, 24601), True, 'import numpy as np\n'), ((27992, 28031), 'numpy.divide', 'np.divide', (['(17.38 * temp_c)', '(239 + temp_c)'], {}), '(17.38 * temp_c, 239 + temp_c)\n', (28001, 28031), True, 'import numpy as np\n'), ((28068, 28090), 'numpy.subtract', 'np.subtract', (['esat', 'avp'], {}), '(esat, avp)\n', (28079, 28090), True, 'import numpy as np\n'), ((1266, 1291), 'numpy.subtract', 'np.subtract', (['tsoil', 'beta2'], {}), '(tsoil, beta2)\n', (1277, 1291), True, 'import numpy as np\n'), ((1488, 1511), 'numpy.where', 'np.where', (['(y0 < 0)', '(0)', 'y0'], {}), '(y0 < 0, 0, y0)\n', (1496, 1511), True, 'import numpy as np\n'), ((10509, 10543), 'numpy.logical_or', 'np.logical_or', (['never_up', 'sun_is_up'], {}), '(never_up, sun_is_up)\n', (10522, 10543), True, 'import numpy as np\n'), ((11132, 11168), 'numpy.divide', 'np.divide', (['arr_daytime', 'daylight_hrs'], {}), '(arr_daytime, daylight_hrs)\n', (11141, 11168), True, 'import numpy as np\n'), ((11193, 11236), 'numpy.divide', 'np.divide', (['arr_nighttime', '(24 - daylight_hrs)'], {}), '(arr_nighttime, 24 - daylight_hrs)\n', (11202, 11236), True, 'import numpy as np\n'), ((16454, 16552), 'scipy.ndimage.generic_filter', 'generic_filter', (['lai', '(lambda x: 0.5 * x[0] + x[1] - (x[3] + 0.5 * x[4]))'], {'size': '(5)', 'mode': '"""mirror"""'}), "(lai, lambda x: 0.5 * x[0] + x[1] - (x[3] + 0.5 * x[4]), size\n =5, mode='mirror')\n", (16468, 16552), False, 'from scipy.ndimage import generic_filter\n'), ((16594, 16617), 'numpy.where', 'np.where', (['(ll < 0)', '(0)', 'll'], {}), '(ll < 0, 0, ll)\n', (16602, 16617), True, 'import numpy as np\n'), ((16758, 16770), 'numpy.isnan', 'np.isnan', (['ll'], {}), '(ll)\n', (16766, 16770), True, 'import numpy as np\n'), ((18619, 18673), 'pyl4c.utils.subset', 'subset', (['hdf', 'soc_path', 'None', 'None'], {'subset_id': 'subset_id'}), '(hdf, soc_path, None, None, subset_id=subset_id)\n', (18625, 18673), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((18708, 18761), 'pyl4c.utils.subset', 'subset', (['hdf', 'rh_path', 'None', 'None'], {'subset_id': 'subset_id'}), '(hdf, rh_path, None, None, subset_id=subset_id)\n', (18714, 18761), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((19118, 19139), 'numpy.divide', 'np.divide', (['mrt', '(365.0)'], {}), '(mrt, 365.0)\n', (19127, 19139), True, 'import numpy as np\n'), ((20166, 20209), 'pyl4c.utils.get_pft_array', 'get_pft_array', (['grid', 'subset_id', 'subset_bbox'], {}), '(grid, subset_id, subset_bbox)\n', (20179, 20209), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((24624, 24636), 'numpy.isnan', 'np.isnan', (['c0'], {}), '(c0)\n', (24632, 24636), True, 'import numpy as np\n'), ((24649, 24661), 'numpy.isnan', 'np.isnan', (['c1'], {}), '(c1)\n', (24657, 24661), True, 'import numpy as np\n'), ((24674, 24686), 'numpy.isnan', 'np.isnan', (['c2'], {}), '(c2)\n', (24682, 24686), True, 'import numpy as np\n'), ((26576, 26593), 'numpy.flipud', 'np.flipud', (['banded'], {}), '(banded)\n', (26585, 26593), True, 'import numpy as np\n'), ((27940, 27961), 'numpy.multiply', 'np.multiply', (['qv2m', 'ps'], {}), '(qv2m, ps)\n', (27951, 27961), True, 'import numpy as np\n'), ((28050, 28059), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (28056, 28059), True, 'import numpy as np\n'), ((1387, 1404), 'numpy.subtract', 'np.subtract', (['a', 'b'], {}), '(a, b)\n', (1398, 1404), True, 'import numpy as np\n'), ((4279, 4307), 'numpy.power', 'np.power', (['x[:, 0]', '(npoly + 1)'], {}), '(x[:, 0], npoly + 1)\n', (4287, 4307), True, 'import numpy as np\n'), ((4445, 4456), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (4453, 4456), True, 'import numpy as np\n'), ((4927, 4936), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (4933, 4936), True, 'import numpy as np\n'), ((4945, 4954), 'numpy.var', 'np.var', (['x'], {}), '(x)\n', (4951, 4954), True, 'import numpy as np\n'), ((5087, 5096), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (5093, 5096), True, 'import numpy as np\n'), ((5106, 5115), 'numpy.var', 'np.var', (['x'], {}), '(x)\n', (5112, 5115), True, 'import numpy as np\n'), ((10559, 10595), 'numpy.where', 'np.where', (['mask', 'arr_24hr[hr, ...]', '(0)'], {}), '(mask, arr_24hr[hr, ...], 0)\n', (10567, 10595), True, 'import numpy as np\n'), ((10656, 10693), 'numpy.where', 'np.where', (['(~mask)', 'arr_24hr[hr, ...]', '(0)'], {}), '(~mask, arr_24hr[hr, ...], 0)\n', (10664, 10693), True, 'import numpy as np\n'), ((10976, 10996), 'numpy.where', 'np.where', (['mask', '(1)', '(0)'], {}), '(mask, 1, 0)\n', (10984, 10996), True, 'import numpy as np\n'), ((16830, 16846), 'numpy.unique', 'np.unique', (['years'], {}), '(years)\n', (16839, 16846), True, 'import numpy as np\n'), ((20320, 20393), 'pyl4c.utils.subset', 'subset', (['hdf', '"""GPP/gpp_mean"""'], {'subset_id': 'subset_id', 'subset_bbox': 'subset_bbox'}), "(hdf, 'GPP/gpp_mean', subset_id=subset_id, subset_bbox=subset_bbox)\n", (20326, 20393), False, 'from pyl4c.utils import get_pft_array, subset\n'), ((24387, 24409), 'numpy.sum', 'np.sum', (['k_mult'], {'axis': '(0)'}), '(k_mult, axis=0)\n', (24393, 24409), True, 'import numpy as np\n'), ((24503, 24525), 'numpy.sum', 'np.sum', (['k_mult'], {'axis': '(0)'}), '(k_mult, axis=0)\n', (24509, 24525), True, 'import numpy as np\n'), ((26188, 26203), 'scipy.sparse.dia_matrix', 'dia_matrix', (['tri'], {}), '(tri)\n', (26198, 26203), False, 'from scipy.sparse import dia_matrix\n'), ((4466, 4487), 'numpy.ones', 'np.ones', (['(npoly + 1,)'], {}), '((npoly + 1,))\n', (4473, 4487), True, 'import numpy as np\n'), ((7369, 7411), 'numpy.nanmean', 'np.nanmean', (['x[ordinal == day, ...]'], {'axis': '(0)'}), '(x[ordinal == day, ...], axis=0)\n', (7379, 7411), True, 'import numpy as np\n'), ((10145, 10203), 'numpy.logical_and', 'np.logical_and', (['(updown[0, ...] <= hr)', '(hr <= updown[1, ...])'], {}), '(updown[0, ...] <= hr, hr <= updown[1, ...])\n', (10159, 10203), True, 'import numpy as np\n'), ((10287, 10344), 'numpy.logical_or', 'np.logical_or', (['(updown[0, ...] <= hr)', '(hr <= updown[1, ...])'], {}), '(updown[0, ...] <= hr, hr <= updown[1, ...])\n', (10300, 10344), True, 'import numpy as np\n'), ((11379, 11405), 'numpy.isfinite', 'np.isfinite', (['arr_nighttime'], {}), '(arr_nighttime)\n', (11390, 11405), True, 'import numpy as np\n'), ((22440, 22468), 'numpy.subtract', 'np.subtract', (['smrz0', 'smrz_min'], {}), '(smrz0, smrz_min)\n', (22451, 22468), True, 'import numpy as np\n'), ((22478, 22509), 'numpy.subtract', 'np.subtract', (['smrz_max', 'smrz_min'], {}), '(smrz_max, smrz_min)\n', (22489, 22509), True, 'import numpy as np\n'), ((22670, 22687), 'numpy.log', 'np.log', (['smrz_norm'], {}), '(smrz_norm)\n', (22676, 22687), True, 'import numpy as np\n'), ((22689, 22700), 'numpy.log', 'np.log', (['(101)'], {}), '(101)\n', (22695, 22700), True, 'import numpy as np\n'), ((4631, 4652), 'numpy.ones', 'np.ones', (['(npoly + 1,)'], {}), '((npoly + 1,))\n', (4638, 4652), True, 'import numpy as np\n'), ((17161, 17173), 'numpy.nanmin', 'np.nanmin', (['x'], {}), '(x)\n', (17170, 17173), True, 'import numpy as np\n'), ((17176, 17189), 'numpy.nanmean', 'np.nanmean', (['x'], {}), '(x)\n', (17186, 17189), True, 'import numpy as np\n')]
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)
# @author <NAME>
from .view import PlotView
from ..core import zeros, scalar
import numpy as np
from matplotlib.collections import PathCollection
class PlotView2d(PlotView):
"""
View object for 2 dimensional plots. Contains a `PlotFigure2d`.
The difference between `PlotView2d` and `PlotFigure2d` is that
`PlotView2d` also handles the communications with the `PlotController` that
are to do with the `PlotProfile` plot displayed below the `PlotFigure2d`.
In addition, `PlotView2d` provides a dynamic image resampling for large
input data.
"""
def __init__(self, figure, formatters):
super().__init__(figure=figure, formatters=formatters)
self._axes = ['y', 'x']
self._marker_index = []
self._marks_scatter = None
self._lim_updated = False
self.current_lims = {}
self.global_lims = {}
for event in ['xlim_changed', 'ylim_changed']:
self.figure.ax.callbacks.connect(event, self._lims_changed)
def _make_data(self, new_values, mask_info):
dims = new_values.dims
for dim in dims:
xmin = new_values.coords[dim].values[0]
xmax = new_values.coords[dim].values[-1]
if dim not in self.global_lims:
self.global_lims[dim] = [xmin, xmax]
self.current_lims[dim] = [xmin, xmax]
values = new_values.values
slice_values = {
"values":
values,
"extent":
np.array([self.current_lims[dims[1]],
self.current_lims[dims[0]]]).flatten()
}
mask_info = next(iter(mask_info.values()))
if len(mask_info) > 0:
# Use automatic broadcasting in Scipp variables
msk = zeros(sizes=new_values.sizes, dtype='int32', unit=None)
for m, val in mask_info.items():
if val:
msk += new_values.masks[m].astype(msk.dtype)
slice_values["masks"] = msk.values
return slice_values
def _lims_changed(self, *args):
"""
Update limits and resample the image according to new viewport.
When we use the zoom tool, the event listener on the displayed axes
limits detects two separate events: one for the x axis and another for
the y axis. We use a small locking mechanism here to trigger only a
single resampling update by waiting for the y limits to also change.
"""
for dim in self.dims:
if dim not in self.global_lims:
return
if not self._lim_updated:
self._lim_updated = True
return
self._lim_updated = False
# Make sure we don't overrun the original array bounds
dimx = self.dims[1]
dimy = self.dims[0]
xylims = {
dimx: np.clip(self.figure.ax.get_xlim(), *sorted(self.global_lims[dimx])),
dimy: np.clip(self.figure.ax.get_ylim(), *sorted(self.global_lims[dimy]))
}
dx = np.abs(self.current_lims[dimx][1] - self.current_lims[dimx][0])
dy = np.abs(self.current_lims[dimy][1] - self.current_lims[dimy][0])
diffx = np.abs(self.current_lims[dimx] - xylims[dimx]) / dx
diffy = np.abs(self.current_lims[dimy] - xylims[dimy]) / dy
diff = diffx.sum() + diffy.sum()
# Only resample image if the changes in axes limits are large enough to
# avoid too many updates while panning.
if diff > 0.1:
self.current_lims.update(xylims)
self.controller.update_data(slices=self.current_limits)
# If we are zooming, rescale to data?
# TODO This will trigger a second call to view.refresh and thus
# self.update_data. Why does the controller have to call refresh
# to make view.rescale_to_data take effect?
if self.figure.rescale_on_zoom():
self.controller.rescale_to_data()
@property
def current_limits(self):
limits = {}
for dim in self.dims:
low, high = self.current_lims[dim]
unit = self._data.coords[dim].unit
limits[dim] = [scalar(low, unit=unit), scalar(high, unit=unit)]
return limits
@property
def global_limits(self):
limits = {}
for dim in self.dims:
low, high = self.global_lims[dim]
unit = self._data.coords[dim].unit
limits[dim] = [scalar(low, unit=unit), scalar(high, unit=unit)]
return limits
def _update_axes(self):
"""
Update the current and global axes limits, before updating the figure
axes.
"""
super()._update_axes()
self.clear_marks()
def clear_marks(self):
"""
Reset all scatter markers when a profile is reset.
"""
if self._marks_scatter is not None:
self._marks_scatter = None
self.figure.ax.collections = []
self.figure.draw()
def _do_handle_pick(self, event):
"""
Return the index of the picked scatter point, None if something else
is picked.
"""
if isinstance(event.artist, PathCollection):
return self._marker_index[event.ind[0]]
def _do_mark(self, index, color, x, y):
"""
Add a marker (colored scatter point).
"""
if self._marks_scatter is None:
self._marks_scatter = self.figure.ax.scatter([x], [y],
c=[color],
edgecolors="w",
picker=5,
zorder=10)
else:
new_offsets = np.concatenate((self._marks_scatter.get_offsets(), [[x, y]]),
axis=0)
new_colors = np.concatenate((self._marks_scatter.get_facecolors(), [color]),
axis=0)
self._marks_scatter.set_offsets(new_offsets)
self._marks_scatter.set_facecolors(new_colors)
self._marker_index.append(index)
self.figure.draw()
def remove_mark(self, index):
"""
Remove a marker (scatter point).
"""
i = self._marker_index.index(index)
xy = np.delete(self._marks_scatter.get_offsets(), i, axis=0)
c = np.delete(self._marks_scatter.get_facecolors(), i, axis=0)
self._marks_scatter.set_offsets(xy)
self._marks_scatter.set_facecolors(c)
self._marker_index.remove(index)
self.figure.draw()
|
[
"numpy.array",
"numpy.abs"
] |
[((3143, 3206), 'numpy.abs', 'np.abs', (['(self.current_lims[dimx][1] - self.current_lims[dimx][0])'], {}), '(self.current_lims[dimx][1] - self.current_lims[dimx][0])\n', (3149, 3206), True, 'import numpy as np\n'), ((3220, 3283), 'numpy.abs', 'np.abs', (['(self.current_lims[dimy][1] - self.current_lims[dimy][0])'], {}), '(self.current_lims[dimy][1] - self.current_lims[dimy][0])\n', (3226, 3283), True, 'import numpy as np\n'), ((3300, 3346), 'numpy.abs', 'np.abs', (['(self.current_lims[dimx] - xylims[dimx])'], {}), '(self.current_lims[dimx] - xylims[dimx])\n', (3306, 3346), True, 'import numpy as np\n'), ((3368, 3414), 'numpy.abs', 'np.abs', (['(self.current_lims[dimy] - xylims[dimy])'], {}), '(self.current_lims[dimy] - xylims[dimy])\n', (3374, 3414), True, 'import numpy as np\n'), ((1610, 1676), 'numpy.array', 'np.array', (['[self.current_lims[dims[1]], self.current_lims[dims[0]]]'], {}), '([self.current_lims[dims[1]], self.current_lims[dims[0]]])\n', (1618, 1676), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Measure Rabi oscillation by changing the amplitude of the control pulse.
The control pulse has a sin^2 envelope, while the readout pulse is square.
"""
import ast
import math
import os
import time
import h5py
import numpy as np
from numpy.typing import ArrayLike
from mla_server import set_dc_bias
from presto.hardware import AdcFSample, AdcMode, DacFSample, DacMode
from presto import pulsed
from presto.utils import get_sourcecode, sin2
class RabiAmp:
def __init__(
self,
readout_freq: float,
control_freq: float,
readout_port: int,
control_port: int,
readout_amp: float,
readout_duration: float,
control_duration: float,
sample_duration: float,
sample_port: int,
control_amp_arr: ArrayLike,
wait_delay: float,
readout_sample_delay: float,
num_averages: int,
jpa_params=None,
):
self.readout_freq = readout_freq
self.control_freq = control_freq
self.readout_port = readout_port
self.control_port = control_port
self.readout_amp = readout_amp
self.readout_duration = readout_duration
self.control_duration = control_duration
self.sample_duration = sample_duration
self.sample_port = sample_port
self.control_amp_arr = control_amp_arr
self.wait_delay = wait_delay
self.readout_sample_delay = readout_sample_delay
self.num_averages = num_averages
self.rabi_n = len(control_amp_arr)
self.t_arr = None # replaced by run
self.store_arr = None # replaced by run
self.jpa_params = jpa_params
def run(
self,
presto_address,
presto_port=None,
ext_ref_clk=False,
):
# Instantiate interface class
with pulsed.Pulsed(
address=presto_address,
port=presto_port,
ext_ref_clk=ext_ref_clk,
adc_mode=AdcMode.Mixed,
adc_fsample=AdcFSample.G2,
dac_mode=[DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.Mixed02],
dac_fsample=[DacFSample.G10, DacFSample.G6, DacFSample.G6, DacFSample.G6],
) as pls:
pls.hardware.set_adc_attenuation(self.sample_port, 0.0)
pls.hardware.set_dac_current(self.readout_port, 32_000)
pls.hardware.set_dac_current(self.control_port, 32_000)
pls.hardware.set_inv_sinc(self.readout_port, 0)
pls.hardware.set_inv_sinc(self.control_port, 0)
pls.hardware.configure_mixer(
freq=self.readout_freq,
in_ports=self.sample_port,
out_ports=self.readout_port,
sync=False, # sync in next call
)
pls.hardware.configure_mixer(
freq=self.control_freq,
out_ports=self.control_port,
sync=True, # sync here
)
if self.jpa_params is not None:
pls.hardware.set_lmx(self.jpa_params['jpa_pump_freq'], self.jpa_params['jpa_pump_pwr'])
set_dc_bias(self.jpa_params['jpa_bias_port'], self.jpa_params['jpa_bias'])
time.sleep(1.0)
# ************************************
# *** Setup measurement parameters ***
# ************************************
# Setup lookup tables for frequencies
pls.setup_freq_lut(
output_ports=self.readout_port,
group=0,
frequencies=0.0,
phases=0.0,
phases_q=0.0,
)
pls.setup_freq_lut(
output_ports=self.control_port,
group=0,
frequencies=0.0,
phases=0.0,
phases_q=0.0,
)
# Setup lookup tables for amplitudes
pls.setup_scale_lut(
output_ports=self.readout_port,
group=0,
scales=self.readout_amp,
)
pls.setup_scale_lut(
output_ports=self.control_port,
group=0,
scales=self.control_amp_arr,
)
# Setup readout and control pulses
# use setup_long_drive to create a pulse with square envelope
# setup_long_drive supports smooth rise and fall transitions for the pulse,
# but we keep it simple here
readout_pulse = pls.setup_long_drive(
output_port=self.readout_port,
group=0,
duration=self.readout_duration,
amplitude=1.0,
amplitude_q=1.0,
rise_time=0e-9,
fall_time=0e-9,
)
# For the control pulse we create a sine-squared envelope,
# and use setup_template to use the user-defined envelope
control_ns = int(round(self.control_duration *
pls.get_fs("dac"))) # number of samples in the control template
control_envelope = sin2(control_ns)
control_pulse = pls.setup_template(
output_port=self.control_port,
group=0,
template=control_envelope,
template_q=control_envelope,
envelope=True,
)
# Setup sampling window
pls.set_store_ports(self.sample_port)
pls.set_store_duration(self.sample_duration)
# ******************************
# *** Program pulse sequence ***
# ******************************
T = 0.0 # s, start at time zero ...
# Control pulse
pls.reset_phase(T, self.control_port)
pls.output_pulse(T, control_pulse)
# Readout pulse starts right after control pulse
T += self.control_duration
pls.reset_phase(T, self.readout_port)
pls.output_pulse(T, readout_pulse)
# Sampling window
pls.store(T + self.readout_sample_delay)
# Move to next Rabi amplitude
T += self.readout_duration
pls.next_scale(T, self.control_port) # every iteration will have a different amplitude
# Wait for decay
T += self.wait_delay
# **************************
# *** Run the experiment ***
# **************************
# repeat the whole sequence `rabi_n` times
# then average `num_averages` times
pls.run(
period=T,
repeat_count=self.rabi_n,
num_averages=self.num_averages,
print_time=True,
)
t_arr, (data_I, data_Q) = pls.get_store_data()
if self.jpa_params is not None:
pls.hardware.set_lmx(0.0, 0.0)
set_dc_bias(self.jpa_params['jpa_bias_port'], 0.0)
self.t_arr = t_arr
self.store_arr = data_I + 1j * data_Q
return self.save()
def save(self, save_filename=None):
# *************************
# *** Save data to HDF5 ***
# *************************
if save_filename is None:
script_path = os.path.realpath(__file__) # full path of current script
current_dir, script_basename = os.path.split(script_path)
script_filename = os.path.splitext(script_basename)[0] # name of current script
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) # current date and time
save_basename = f"{script_filename:s}_{timestamp:s}.h5" # name of save file
save_path = os.path.join(current_dir, "data", save_basename) # full path of save file
else:
save_path = os.path.realpath(save_filename)
source_code = get_sourcecode(__file__) # save also the sourcecode of the script for future reference
with h5py.File(save_path, "w") as h5f:
dt = h5py.string_dtype(encoding='utf-8')
ds = h5f.create_dataset("source_code", (len(source_code), ), dt)
for ii, line in enumerate(source_code):
ds[ii] = line
for attribute in self.__dict__:
print(f"{attribute}: {self.__dict__[attribute]}")
if attribute.startswith("_"):
# don't save private attributes
continue
if attribute == "jpa_params":
h5f.attrs[attribute] = str(self.__dict__[attribute])
elif np.isscalar(self.__dict__[attribute]):
h5f.attrs[attribute] = self.__dict__[attribute]
else:
h5f.create_dataset(attribute, data=self.__dict__[attribute])
print(f"Data saved to: {save_path}")
return save_path
@classmethod
def load(cls, load_filename):
with h5py.File(load_filename, "r") as h5f:
num_averages = h5f.attrs["num_averages"]
control_freq = h5f.attrs["control_freq"]
readout_freq = h5f.attrs["readout_freq"]
readout_duration = h5f.attrs["readout_duration"]
control_duration = h5f.attrs["control_duration"]
readout_amp = h5f.attrs["readout_amp"]
sample_duration = h5f.attrs["sample_duration"]
# rabi_n = h5f.attrs["rabi_n"]
wait_delay = h5f.attrs["wait_delay"]
readout_sample_delay = h5f.attrs["readout_sample_delay"]
control_amp_arr = h5f["control_amp_arr"][()]
t_arr = h5f["t_arr"][()]
store_arr = h5f["store_arr"][()]
# source_code = h5f["source_code"][()]
# these were added later
try:
readout_port = h5f.attrs["readout_port"]
except KeyError:
readout_port = 0
try:
control_port = h5f.attrs["control_port"]
except KeyError:
control_port = 0
try:
sample_port = h5f.attrs["sample_port"]
except KeyError:
sample_port = 0
try:
jpa_params = ast.literal_eval(h5f.attrs["jpa_params"])
except KeyError:
jpa_params = None
self = cls(
readout_freq,
control_freq,
readout_port,
control_port,
readout_amp,
readout_duration,
control_duration,
sample_duration,
sample_port,
control_amp_arr,
wait_delay,
readout_sample_delay,
num_averages,
jpa_params,
)
self.control_amp_arr = control_amp_arr
self.t_arr = t_arr
self.store_arr = store_arr
return self
def analyze(self, all_plots=False):
if self.t_arr is None:
raise RuntimeError
if self.store_arr is None:
raise RuntimeError
import matplotlib.pyplot as plt
from presto.utils import rotate_opt
ret_fig = []
t_low = 1500 * 1e-9
t_high = 2000 * 1e-9
# t_span = t_high - t_low
idx_low = np.argmin(np.abs(self.t_arr - t_low))
idx_high = np.argmin(np.abs(self.t_arr - t_high))
idx = np.arange(idx_low, idx_high)
# nr_samples = len(idx)
if all_plots:
# Plot raw store data for first iteration as a check
fig1, ax1 = plt.subplots(2, 1, sharex=True, tight_layout=True)
ax11, ax12 = ax1
ax11.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax12.axvspan(1e9 * t_low, 1e9 * t_high, facecolor="#dfdfdf")
ax11.plot(1e9 * self.t_arr, np.abs(self.store_arr[0, 0, :]))
ax12.plot(1e9 * self.t_arr, np.angle(self.store_arr[0, 0, :]))
ax12.set_xlabel("Time [ns]")
fig1.show()
ret_fig.append(fig1)
# Analyze Rabi
resp_arr = np.mean(self.store_arr[:, 0, idx], axis=-1)
data = rotate_opt(resp_arr)
# Fit data
popt_x, perr_x = _fit_period(self.control_amp_arr, np.real(data))
period = popt_x[3]
period_err = perr_x[3]
pi_amp = period / 2
pi_2_amp = period / 4
print("Tau pulse amplitude: {} +- {} FS".format(period, period_err))
print("Pi pulse amplitude: {} +- {} FS".format(pi_amp, period_err / 2))
print("Pi/2 pulse amplitude: {} +- {} FS".format(pi_2_amp, period_err / 4))
if all_plots:
fig2, ax2 = plt.subplots(4, 1, sharex=True, figsize=(6.4, 6.4), tight_layout=True)
ax21, ax22, ax23, ax24 = ax2
ax21.plot(self.control_amp_arr, np.abs(data))
ax22.plot(self.control_amp_arr, np.angle(data))
ax23.plot(self.control_amp_arr, np.real(data))
ax23.plot(self.control_amp_arr, _func(self.control_amp_arr, *popt_x), '--')
ax24.plot(self.control_amp_arr, np.imag(data))
ax21.set_ylabel("Amplitude [FS]")
ax22.set_ylabel("Phase [rad]")
ax23.set_ylabel("I [FS]")
ax24.set_ylabel("Q [FS]")
ax2[-1].set_xlabel("Pulse amplitude [FS]")
fig2.show()
ret_fig.append(fig2)
data_max = np.abs(data).max()
unit = ""
mult = 1.0
if data_max < 1e-6:
unit = "n"
mult = 1e9
elif data_max < 1e-3:
unit = "μ"
mult = 1e6
elif data_max < 1e0:
unit = "m"
mult = 1e3
fig3, ax3 = plt.subplots(tight_layout=True)
ax3.plot(self.control_amp_arr, mult * np.real(data), '.')
ax3.plot(self.control_amp_arr, mult * _func(self.control_amp_arr, *popt_x), '--')
ax3.set_ylabel(f"I quadrature [{unit:s}FS]")
ax3.set_xlabel("Pulse amplitude [FS]")
fig3.show()
ret_fig.append(fig3)
return ret_fig
def _func(t, offset, amplitude, T2, period, phase):
frequency = 1 / period
return offset + amplitude * np.exp(-t / T2) * np.cos(math.tau * frequency * t + phase)
def _fit_period(x: list[float], y: list[float]) -> tuple[list[float], list[float]]:
from scipy.optimize import curve_fit
# from scipy.optimize import least_squares
pkpk = np.max(y) - np.min(y)
offset = np.min(y) + pkpk / 2
amplitude = 0.5 * pkpk
T2 = 0.5 * (np.max(x) - np.min(x))
freqs = np.fft.rfftfreq(len(x), x[1] - x[0])
fft = np.fft.rfft(y)
frequency = freqs[1 + np.argmax(np.abs(fft[1:]))]
period = 1 / frequency
first = (y[0] - offset) / amplitude
if first > 1.:
first = 1.
elif first < -1.:
first = -1.
phase = np.arccos(first)
p0 = (
offset,
amplitude,
T2,
period,
phase,
)
res = curve_fit(_func, x, y, p0=p0)
popt = res[0]
pcov = res[1]
perr = np.sqrt(np.diag(pcov))
offset, amplitude, T2, period, phase = popt
return popt, perr
# def _residuals(p, x, y):
# offset, amplitude, T2, period, phase = p
# return _func(x, offset, amplitude, T2, period, phase) - y
# res = least_squares(_residuals, p0, args=(x, y))
# # offset, amplitude, T2, period, phase = res.x
# return res.x, np.zeros_like(res.x)
if __name__ == "__main__":
WHICH_QUBIT = 2 # 1 (higher resonator) or 2 (lower resonator)
USE_JPA = False
WITH_COUPLER = False
# Presto's IP address or hostname
# ADDRESS = "172.16.17.32"
# PORT = 42874
ADDRESS = "127.0.0.1"
PORT = 7878
EXT_REF_CLK = False # set to True to lock to an external reference clock
jpa_bias_port = 1
if WHICH_QUBIT == 1:
if WITH_COUPLER:
readout_freq = 6.167_009 * 1e9 # Hz, frequency for resonator readout
control_freq = 3.556_520 * 1e9 # Hz
else:
readout_freq = 6.166_600 * 1e9 # Hz, frequency for resonator readout
control_freq = 3.557_866 * 1e9 # Hz
control_port = 3
jpa_pump_freq = 2 * 6.169e9 # Hz
jpa_pump_pwr = 11 # lmx units
jpa_bias = +0.437 # V
elif WHICH_QUBIT == 2:
if WITH_COUPLER:
readout_freq = 6.029_130 * 1e9 # Hz, frequency for resonator readout
control_freq = 4.093_042 * 1e9 # Hz
else:
readout_freq = 6.028_450 * 1e9 # Hz, frequency for resonator readout
control_freq = 4.093_372 * 1e9 # Hz
control_port = 4
jpa_pump_freq = 2 * 6.031e9 # Hz
jpa_pump_pwr = 9 # lmx units
jpa_bias = +0.449 # V
else:
raise ValueError
# cavity drive: readout
readout_amp = 0.4 # FS
readout_duration = 2e-6 # s, duration of the readout pulse
readout_port = 1
# qubit drive: control
control_duration = 20e-9 # s, duration of the control pulse
# cavity readout: sample
sample_duration = 4 * 1e-6 # s, duration of the sampling window
sample_port = 1
# Rabi experiment
num_averages = 1_000
rabi_n = 128 # number of steps when changing duration of control pulse
control_amp_arr = np.linspace(0.0, 1.0, rabi_n) # FS, amplitudes for control pulse
wait_delay = 200e-6 # s, delay between repetitions to allow the qubit to decay
readout_sample_delay = 290 * 1e-9 # s, delay between readout pulse and sample window to account for latency
jpa_params = {
'jpa_bias': jpa_bias,
'jpa_bias_port': jpa_bias_port,
'jpa_pump_freq': jpa_pump_freq,
'jpa_pump_pwr': jpa_pump_pwr,
} if USE_JPA else None
rabi = RabiAmp(
readout_freq,
control_freq,
readout_port,
control_port,
readout_amp,
readout_duration,
control_duration,
sample_duration,
sample_port,
control_amp_arr,
wait_delay,
readout_sample_delay,
num_averages,
jpa_params,
)
rabi.run(ADDRESS, PORT, EXT_REF_CLK)
|
[
"presto.pulsed.Pulsed",
"numpy.fft.rfft",
"presto.utils.rotate_opt",
"numpy.abs",
"numpy.angle",
"numpy.imag",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"os.path.join",
"presto.utils.sin2",
"numpy.max",
"numpy.linspace",
"numpy.real",
"mla_server.set_dc_bias",
"numpy.arccos",
"matplotlib.pyplot.subplots",
"time.localtime",
"presto.utils.get_sourcecode",
"h5py.File",
"os.path.realpath",
"h5py.string_dtype",
"scipy.optimize.curve_fit",
"time.sleep",
"numpy.min",
"numpy.cos",
"numpy.isscalar",
"os.path.splitext",
"ast.literal_eval",
"os.path.split"
] |
[((14644, 14658), 'numpy.fft.rfft', 'np.fft.rfft', (['y'], {}), '(y)\n', (14655, 14658), True, 'import numpy as np\n'), ((14872, 14888), 'numpy.arccos', 'np.arccos', (['first'], {}), '(first)\n', (14881, 14888), True, 'import numpy as np\n'), ((14994, 15023), 'scipy.optimize.curve_fit', 'curve_fit', (['_func', 'x', 'y'], {'p0': 'p0'}), '(_func, x, y, p0=p0)\n', (15003, 15023), False, 'from scipy.optimize import curve_fit\n'), ((17298, 17327), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'rabi_n'], {}), '(0.0, 1.0, rabi_n)\n', (17309, 17327), True, 'import numpy as np\n'), ((7958, 7982), 'presto.utils.get_sourcecode', 'get_sourcecode', (['__file__'], {}), '(__file__)\n', (7972, 7982), False, 'from presto.utils import get_sourcecode, sin2\n'), ((11440, 11468), 'numpy.arange', 'np.arange', (['idx_low', 'idx_high'], {}), '(idx_low, idx_high)\n', (11449, 11468), True, 'import numpy as np\n'), ((12128, 12171), 'numpy.mean', 'np.mean', (['self.store_arr[:, 0, idx]'], {'axis': '(-1)'}), '(self.store_arr[:, 0, idx], axis=-1)\n', (12135, 12171), True, 'import numpy as np\n'), ((12187, 12207), 'presto.utils.rotate_opt', 'rotate_opt', (['resp_arr'], {}), '(resp_arr)\n', (12197, 12207), False, 'from presto.utils import rotate_opt\n'), ((13742, 13773), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'tight_layout': '(True)'}), '(tight_layout=True)\n', (13754, 13773), True, 'import matplotlib.pyplot as plt\n'), ((14463, 14472), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (14469, 14472), True, 'import numpy as np\n'), ((14475, 14484), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (14481, 14484), True, 'import numpy as np\n'), ((14498, 14507), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (14504, 14507), True, 'import numpy as np\n'), ((15079, 15092), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (15086, 15092), True, 'import numpy as np\n'), ((1850, 2153), 'presto.pulsed.Pulsed', 'pulsed.Pulsed', ([], {'address': 'presto_address', 'port': 'presto_port', 'ext_ref_clk': 'ext_ref_clk', 'adc_mode': 'AdcMode.Mixed', 'adc_fsample': 'AdcFSample.G2', 'dac_mode': '[DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.Mixed02]', 'dac_fsample': '[DacFSample.G10, DacFSample.G6, DacFSample.G6, DacFSample.G6]'}), '(address=presto_address, port=presto_port, ext_ref_clk=\n ext_ref_clk, adc_mode=AdcMode.Mixed, adc_fsample=AdcFSample.G2,\n dac_mode=[DacMode.Mixed42, DacMode.Mixed02, DacMode.Mixed02, DacMode.\n Mixed02], dac_fsample=[DacFSample.G10, DacFSample.G6, DacFSample.G6,\n DacFSample.G6])\n', (1863, 2153), False, 'from presto import pulsed\n'), ((5171, 5187), 'presto.utils.sin2', 'sin2', (['control_ns'], {}), '(control_ns)\n', (5175, 5187), False, 'from presto.utils import get_sourcecode, sin2\n'), ((7358, 7384), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (7374, 7384), False, 'import os\n'), ((7459, 7485), 'os.path.split', 'os.path.split', (['script_path'], {}), '(script_path)\n', (7472, 7485), False, 'import os\n'), ((7790, 7838), 'os.path.join', 'os.path.join', (['current_dir', '"""data"""', 'save_basename'], {}), "(current_dir, 'data', save_basename)\n", (7802, 7838), False, 'import os\n'), ((7903, 7934), 'os.path.realpath', 'os.path.realpath', (['save_filename'], {}), '(save_filename)\n', (7919, 7934), False, 'import os\n'), ((8059, 8084), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (8068, 8084), False, 'import h5py\n'), ((8110, 8145), 'h5py.string_dtype', 'h5py.string_dtype', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (8127, 8145), False, 'import h5py\n'), ((9028, 9057), 'h5py.File', 'h5py.File', (['load_filename', '"""r"""'], {}), "(load_filename, 'r')\n", (9037, 9057), False, 'import h5py\n'), ((11340, 11366), 'numpy.abs', 'np.abs', (['(self.t_arr - t_low)'], {}), '(self.t_arr - t_low)\n', (11346, 11366), True, 'import numpy as np\n'), ((11397, 11424), 'numpy.abs', 'np.abs', (['(self.t_arr - t_high)'], {}), '(self.t_arr - t_high)\n', (11403, 11424), True, 'import numpy as np\n'), ((11613, 11663), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'tight_layout': '(True)'}), '(2, 1, sharex=True, tight_layout=True)\n', (11625, 11663), True, 'import matplotlib.pyplot as plt\n'), ((12287, 12300), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (12294, 12300), True, 'import numpy as np\n'), ((12706, 12776), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'sharex': '(True)', 'figsize': '(6.4, 6.4)', 'tight_layout': '(True)'}), '(4, 1, sharex=True, figsize=(6.4, 6.4), tight_layout=True)\n', (12718, 12776), True, 'import matplotlib.pyplot as plt\n'), ((14234, 14274), 'numpy.cos', 'np.cos', (['(math.tau * frequency * t + phase)'], {}), '(math.tau * frequency * t + phase)\n', (14240, 14274), True, 'import numpy as np\n'), ((14562, 14571), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (14568, 14571), True, 'import numpy as np\n'), ((14574, 14583), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (14580, 14583), True, 'import numpy as np\n'), ((3169, 3243), 'mla_server.set_dc_bias', 'set_dc_bias', (["self.jpa_params['jpa_bias_port']", "self.jpa_params['jpa_bias']"], {}), "(self.jpa_params['jpa_bias_port'], self.jpa_params['jpa_bias'])\n", (3180, 3243), False, 'from mla_server import set_dc_bias\n'), ((3260, 3275), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (3270, 3275), False, 'import time\n'), ((6996, 7046), 'mla_server.set_dc_bias', 'set_dc_bias', (["self.jpa_params['jpa_bias_port']", '(0.0)'], {}), "(self.jpa_params['jpa_bias_port'], 0.0)\n", (7007, 7046), False, 'from mla_server import set_dc_bias\n'), ((7516, 7549), 'os.path.splitext', 'os.path.splitext', (['script_basename'], {}), '(script_basename)\n', (7532, 7549), False, 'import os\n'), ((7634, 7650), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7648, 7650), False, 'import time\n'), ((10298, 10339), 'ast.literal_eval', 'ast.literal_eval', (["h5f.attrs['jpa_params']"], {}), "(h5f.attrs['jpa_params'])\n", (10314, 10339), False, 'import ast\n'), ((11879, 11910), 'numpy.abs', 'np.abs', (['self.store_arr[0, 0, :]'], {}), '(self.store_arr[0, 0, :])\n', (11885, 11910), True, 'import numpy as np\n'), ((11952, 11985), 'numpy.angle', 'np.angle', (['self.store_arr[0, 0, :]'], {}), '(self.store_arr[0, 0, :])\n', (11960, 11985), True, 'import numpy as np\n'), ((12862, 12874), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (12868, 12874), True, 'import numpy as np\n'), ((12920, 12934), 'numpy.angle', 'np.angle', (['data'], {}), '(data)\n', (12928, 12934), True, 'import numpy as np\n'), ((12980, 12993), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (12987, 12993), True, 'import numpy as np\n'), ((13127, 13140), 'numpy.imag', 'np.imag', (['data'], {}), '(data)\n', (13134, 13140), True, 'import numpy as np\n'), ((13440, 13452), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (13446, 13452), True, 'import numpy as np\n'), ((13820, 13833), 'numpy.real', 'np.real', (['data'], {}), '(data)\n', (13827, 13833), True, 'import numpy as np\n'), ((14216, 14231), 'numpy.exp', 'np.exp', (['(-t / T2)'], {}), '(-t / T2)\n', (14222, 14231), True, 'import numpy as np\n'), ((14695, 14710), 'numpy.abs', 'np.abs', (['fft[1:]'], {}), '(fft[1:])\n', (14701, 14710), True, 'import numpy as np\n'), ((8683, 8720), 'numpy.isscalar', 'np.isscalar', (['self.__dict__[attribute]'], {}), '(self.__dict__[attribute])\n', (8694, 8720), True, 'import numpy as np\n')]
|
import copy
import warnings
from collections.abc import Iterable
from inspect import Parameter, signature
import numpy as np
from sklearn.utils.validation import (
check_array,
column_or_1d,
assert_all_finite,
check_consistent_length,
check_random_state as check_random_state_sklearn,
)
from ._label import MISSING_LABEL, check_missing_label, is_unlabeled
def check_scalar(
x,
name,
target_type,
min_inclusive=True,
max_inclusive=True,
min_val=None,
max_val=None,
):
"""Validate scalar parameters type and value.
Parameters
----------
x : object
The scalar parameter to validate.
name : str
The name of the parameter to be printed in error messages.
target_type : type or tuple
Acceptable data types for the parameter.
min_val : float or int, optional (default=None)
The minimum valid value the parameter can take. If None (default) it
is implied that the parameter does not have a lower bound.
min_inclusive : bool, optional (default=True)
If true, the minimum valid value is inclusive, otherwise exclusive.
max_val : float or int, optional (default=None)
The maximum valid value the parameter can take. If None (default) it
is implied that the parameter does not have an upper bound.
max_inclusive : bool, optional (default=True)
If true, the maximum valid value is inclusive, otherwise exclusive.
Raises
-------
TypeError
If the parameter's type does not match the desired type.
ValueError
If the parameter's value violates the given bounds.
"""
if not isinstance(x, target_type):
raise TypeError(
"`{}` must be an instance of {}, not {}.".format(
name, target_type, type(x)
)
)
if min_inclusive:
if min_val is not None and x < min_val:
raise ValueError(
"`{}`= {}, must be >= " "{}.".format(name, x, min_val)
)
else:
if min_val is not None and x <= min_val:
raise ValueError(
"`{}`= {}, must be > " "{}.".format(name, x, min_val)
)
if max_inclusive:
if max_val is not None and x > max_val:
raise ValueError(
"`{}`= {}, must be <= " "{}.".format(name, x, max_val)
)
else:
if max_val is not None and x >= max_val:
raise ValueError(
"`{}`= {}, must be < " "{}.".format(name, x, max_val)
)
def check_classifier_params(classes, missing_label, cost_matrix=None):
"""Check whether the parameters are compatible to each other (only if
`classes` is not None).
Parameters
----------
classes : array-like, shape (n_classes)
Array of class labels.
missing_label : {number, str, None, np.nan}
Symbol to represent a missing label.
cost_matrix : array-like, shape (n_classes, n_classes), default=None
Cost matrix. If None, cost matrix will be not checked.
"""
check_missing_label(missing_label)
if classes is not None:
check_classes(classes)
dtype = np.array(classes).dtype
check_missing_label(missing_label, target_type=dtype, name="classes")
n_labeled = is_unlabeled(y=classes, missing_label=missing_label).sum()
if n_labeled > 0:
raise ValueError(
f"`classes={classes}` contains "
f"`missing_label={missing_label}.`"
)
if cost_matrix is not None:
check_cost_matrix(cost_matrix=cost_matrix, n_classes=len(classes))
else:
if cost_matrix is not None:
raise ValueError(
"You cannot specify 'cost_matrix' without "
"specifying 'classes'."
)
def check_classes(classes):
"""Check whether class labels are uniformly strings or numbers.
Parameters
----------
classes : array-like, shape (n_classes)
Array of class labels.
"""
if not isinstance(classes, Iterable):
raise TypeError(
"'classes' is not iterable. Got {}".format(type(classes))
)
try:
classes_sorted = np.array(sorted(set(classes)))
if len(classes) != len(classes_sorted):
raise ValueError("Duplicate entries in 'classes'.")
except TypeError:
types = sorted(t.__qualname__ for t in set(type(v) for v in classes))
raise TypeError(
"'classes' must be uniformly strings or numbers. Got {}".format(
types
)
)
def check_class_prior(class_prior, n_classes):
"""Check if the class_prior is a valid prior.
Parameters
----------
class_prior : numeric | array_like, shape (n_classes)
A class prior.
n_classes : int
The number of classes.
Returns
-------
class_prior : np.ndarray, shape (n_classes)
Numpy array as prior.
"""
if class_prior is None:
raise TypeError("'class_prior' must not be None.")
check_scalar(n_classes, name="n_classes", target_type=int, min_val=1)
if np.isscalar(class_prior):
check_scalar(
class_prior,
name="class_prior",
target_type=(int, float),
min_val=0,
)
class_prior = np.array([class_prior] * n_classes)
else:
class_prior = check_array(class_prior, ensure_2d=False)
is_negative = np.sum(class_prior < 0)
if class_prior.shape != (n_classes,) or is_negative:
raise ValueError(
"`class_prior` must be either a non-negative"
"float or a list of `n_classes` non-negative "
"floats."
)
return class_prior.reshape(-1)
def check_cost_matrix(
cost_matrix,
n_classes,
only_non_negative=False,
contains_non_zero=False,
diagonal_is_zero=False,
):
"""Check whether cost matrix has shape `(n_classes, n_classes)`.
Parameters
----------
cost_matrix : array-like, shape (n_classes, n_classes)
Cost matrix.
n_classes : int
Number of classes.
only_non_negative : bool, optional (default=True)
This parameter determines whether the matrix must contain only non
negative cost entries.
contains_non_zero : bool, optional (default=True)
This parameter determines whether the matrix must contain at least on
non-zero cost entry.
diagonal_is_zero : bool, optional (default=True)
This parameter determines whether the diagonal cost entries must be
zero.
Returns
-------
cost_matrix_new : np.ndarray, shape (n_classes, n_classes)
Numpy array as cost matrix.
"""
check_scalar(n_classes, target_type=int, name="n_classes", min_val=1)
cost_matrix_new = check_array(
np.array(cost_matrix, dtype=float), ensure_2d=True
)
if cost_matrix_new.shape != (n_classes, n_classes):
raise ValueError(
"'cost_matrix' must have shape ({}, {}). "
"Got {}.".format(n_classes, n_classes, cost_matrix_new.shape)
)
if np.sum(cost_matrix_new < 0) > 0:
if only_non_negative:
raise ValueError(
"'cost_matrix' must contain only non-negative cost entries."
)
else:
warnings.warn("'cost_matrix' contains negative cost entries.")
if n_classes != 1 and np.sum(cost_matrix_new != 0) == 0:
if contains_non_zero:
raise ValueError(
"'cost_matrix' must contain at least one non-zero cost "
"entry."
)
else:
warnings.warn(
"'cost_matrix' contains contains no non-zero cost entry."
)
if np.sum(np.diag(cost_matrix_new) != 0) > 0:
if diagonal_is_zero:
raise ValueError(
"'cost_matrix' must contain only cost entries being zero on "
"its diagonal."
)
else:
warnings.warn(
"'cost_matrix' contains non-zero cost entries on its diagonal."
)
return cost_matrix_new
def check_X_y(
X=None,
y=None,
X_cand=None,
sample_weight=None,
sample_weight_cand=None,
accept_sparse=False,
*,
accept_large_sparse=True,
dtype="numeric",
order=None,
copy=False,
force_all_finite=True,
ensure_2d=True,
allow_nd=False,
multi_output=False,
allow_nan=None,
ensure_min_samples=1,
ensure_min_features=1,
y_numeric=False,
estimator=None,
missing_label=MISSING_LABEL,
):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X to be 2D and y 1D. By
default, X is checked to be non-empty and containing only finite values.
Standard input checks are also applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2D and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Labeled input data.
y : nd-array, list or sparse matrix
Labels for X.
X_cand : nd-array, list or sparse matrix (default=None)
Unlabeled input data
sample_weight : array-like of shape (n_samples,) (default=None)
Sample weights.
sample_weight_cand : array-like of shape (n_candidates,) (default=None)
Sample weights of the candidates.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
accept_large_sparse : bool (default=True)
If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by
accept_sparse, accept_large_sparse will cause it to be accepted only
if its indices are stored with a 32-bit dtype.
.. versionadded:: 0.20
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean or 'allow-nan', (default=True)
Whether to raise an error on np.inf, np.nan, pd.NA in X. This parameter
does not influence whether y can have np.inf, np.nan, pd.NA values.
The possibilities are:
- True: Force all values of X to be finite.
- False: accepts np.inf, np.nan, pd.NA in X.
- 'allow-nan': accepts only np.nan or pd.NA values in X. Values cannot
be infinite.
.. versionadded:: 0.20
``force_all_finite`` accepts the string ``'allow-nan'``.
.. versionchanged:: 0.23
Accepts `pd.NA` and converts it into `np.nan`
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2D.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2D y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
allow_nan : boolean (default=None)
Whether to allow np.nan in y.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
missing_label : {scalar, string, np.nan, None}, (default=np.nan)
Value to represent a missing label.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
candidates : object
The converted and validated candidates
Only returned if candidates is not None.
sample_weight : np.ndarray
The converted and validated sample_weight.
sample_weight_cand : np.ndarray
The converted and validated sample_weight_cand.
Only returned if candidates is not None.
"""
if allow_nan is None:
allow_nan = True if missing_label is np.nan else False
if X is not None:
X = check_array(
X,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
if y is not None:
if multi_output:
y = check_array(
y,
accept_sparse="csr",
force_all_finite=True,
ensure_2d=False,
dtype=None,
)
else:
y = column_or_1d(y, warn=True)
assert_all_finite(y, allow_nan=allow_nan)
if y_numeric and y.dtype.kind == "O":
y = y.astype(np.float64)
if X is not None and y is not None:
check_consistent_length(X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape)
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
if (
y.ndim > 1
and y.shape[1] > 1
or sample_weight.ndim > 1
and sample_weight.shape[1] > 1
):
check_consistent_length(y.T, sample_weight.T)
if X_cand is not None:
X_cand = check_array(
X_cand,
accept_sparse=accept_sparse,
accept_large_sparse=accept_large_sparse,
dtype=dtype,
order=order,
copy=copy,
force_all_finite=force_all_finite,
ensure_2d=ensure_2d,
allow_nd=allow_nd,
ensure_min_samples=ensure_min_samples,
ensure_min_features=ensure_min_features,
estimator=estimator,
)
if X is not None and X_cand.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of candidates does not match"
"the number of features of X"
)
if sample_weight_cand is None:
sample_weight_cand = np.ones(len(X_cand))
sample_weight_cand = check_array(sample_weight_cand, ensure_2d=False)
check_consistent_length(X_cand, sample_weight_cand)
if X_cand is None:
return X, y, sample_weight
else:
return X, y, X_cand, sample_weight, sample_weight_cand
def check_random_state(random_state, seed_multiplier=None):
"""Check validity of the given random state.
Parameters
----------
random_state : None | int | instance of RandomState
If random_state is None, return the RandomState singleton used by
np.random.
If random_state is an int, return a new RandomState.
If random_state is already a RandomState instance, return it.
Otherwise raise ValueError.
seed_multiplier : None | int, optional (default=None)
If the random_state and seed_multiplier are not None, draw a new int
from the random state, multiply it with the multiplier, and use the
product as the seed of a new random state.
Returns
-------
random_state: instance of RandomState
The validated random state.
"""
if random_state is None or seed_multiplier is None:
return check_random_state_sklearn(random_state)
check_scalar(
seed_multiplier, name="seed_multiplier", target_type=int, min_val=1
)
random_state = copy.deepcopy(random_state)
random_state = check_random_state_sklearn(random_state)
seed = (random_state.randint(1, 2**31) * seed_multiplier) % (2**31)
return np.random.RandomState(seed)
def check_indices(indices, A, dim="adaptive", unique=True):
"""Check if indices fit to array.
Parameters
----------
indices : array-like of shape (n_indices, n_dim) or (n_indices,)
The considered indices, where for every `i = 0, ..., n_indices - 1`
`indices[i]` is interpreted as an index to the array `A`.
A : array-like
The array that is indexed.
dim : int or tuple of ints
The dimensions of the array that are indexed.
If `dim` equals `'adaptive'`, `dim` is set to first indices
corresponding to the shape of `indices`. E.g., if `indices` is of
shape (n_indices,), `dim` is set `0`.
unique: bool or `check_unique`
If `unique` is `True` unique indices are returned. If `unique` is
`'check_unique'` an exception is raised if the indices are not unique.
Returns
-------
indices: tuple of np.ndarrays or np.ndarray
The validated indices.
"""
indices = check_array(indices, dtype=int, ensure_2d=False)
A = check_array(A, allow_nd=True, force_all_finite=False, ensure_2d=False)
if unique == "check_unique":
if indices.ndim == 1:
n_unique_indices = len(np.unique(indices))
else:
n_unique_indices = len(np.unique(indices, axis=0))
if n_unique_indices < len(indices):
raise ValueError(
f"`indices` contains two different indices of the "
f"same value."
)
elif unique:
if indices.ndim == 1:
indices = np.unique(indices)
else:
indices = np.unique(indices, axis=0)
check_type(dim, "dim", int, tuple, target_vals=["adaptive"])
if dim == "adaptive":
if indices.ndim == 1:
dim = 0
else:
dim = tuple(range(indices.shape[1]))
if isinstance(dim, tuple):
for n in dim:
check_type(n, "entry of `dim`", int)
if A.ndim <= max(dim):
raise ValueError(
f"`dim` contains entry of value {max(dim)}, but all"
f"entries of dim must be smaller than {A.ndim}."
)
if len(dim) != indices.shape[1]:
raise ValueError(
f"shape of `indices` along dimension 1 is "
f"{indices.shape[0]}, but must be {len(dim)}"
)
indices = tuple(indices.T)
for (i, n) in enumerate(indices):
if np.any(indices[i] >= A.shape[dim[i]]):
raise ValueError(
f"`indices[{i}]` contains index of value "
f"{np.max(indices[i])} but all indices must be"
f" less than {A.shape[dim[i]]}."
)
return indices
else:
if A.ndim <= dim:
raise ValueError(
f"`dim` has value {dim}, but must be smaller than "
f"{A.ndim}."
)
if np.any(indices >= A.shape[dim]):
raise ValueError(
f"`indices` contains index of value "
f"{np.max(indices)} but all indices must be"
f" less than {A.shape[dim]}."
)
return indices
def check_type(
obj, name, *target_types, target_vals=None, indicator_funcs=None
):
"""Check if obj is one of the given types. It is also possible to allow
specific values. Further it is possible to pass indicator functions
that can also accept obj. Thereby obj must either have a correct type
a correct value or be accepted by an indicator function.
Parameters
----------
obj: object
The object to be checked.
name: str
The variable name of the object.
target_types : iterable
The possible types.
target_vals : iterable, optional (default=None)
Possible further values that the object is allowed to equal.
indicator_funcs : iterable, optional (default=None)
Possible further custom indicator (boolean) functions that accept
the object by returning `True` if the object is passed as a parameter.
"""
target_vals = target_vals if target_vals is not None else []
indicator_funcs = indicator_funcs if indicator_funcs is not None else []
wrong_type = not isinstance(obj, target_types)
wrong_value = obj not in target_vals
wrong_index = all(not i_func(obj) for i_func in indicator_funcs)
if wrong_type and wrong_value and wrong_index:
error_str = f"`{name}` "
if len(target_types) == 0 and len(target_vals) == 0:
error_str += f" must"
if len(target_vals) == 0 and len(target_types) > 0:
error_str += f" has type `{type(obj)}`, but must"
elif len(target_vals) > 0 and len(target_types) == 0:
error_str += f" has value `{obj}`, but must"
else:
error_str += f" has type `{type(obj)}` and value `{obj}`, but must"
if len(target_types) == 1:
error_str += f" have type `{target_types[0]}`"
elif 1 <= len(target_types) <= 3:
error_str += " have type"
for i in range(len(target_types) - 1):
error_str += f" `{target_types[i]}`,"
error_str += f" or `{target_types[len(target_types) - 1]}`"
elif len(target_types) > 3:
error_str += (
f" have one of the following types: {set(target_types)}"
)
if len(target_vals) > 0:
if len(target_types) > 0 and len(indicator_funcs) == 0:
error_str += " or"
elif len(target_types) > 0 and len(indicator_funcs) > 0:
error_str += ","
error_str += (
f" equal one of the following values: {set(target_vals)}"
)
if len(indicator_funcs) > 0:
if len(target_types) > 0 or len(target_vals) > 0:
error_str += " or"
error_str += (
f" be accepted by one of the following custom boolean "
f"functions: {set(i_f.__name__ for i_f in indicator_funcs)}"
)
raise TypeError(error_str + ".")
def check_callable(func, name, n_free_parameters=None):
"""Checks if function is a callable and if the number of free parameters is
correct.
Parameters
----------
func: callable
The functions to be validated.
name: str
The name of the function
n_free_parameters: int, optional (default=None)
The number of free parameters. If `n_free_parameters` is `None`,
`n_free_parameters` is set to `1`.
"""
if n_free_parameters is None:
n_free_parameters = 1
if not callable(func):
raise TypeError(
f"`{name}` must be callable. " f"`{name}` is of type {type(func)}"
)
# count the number of arguments that have no default value
n_free_params = len(
list(
filter(
lambda x: x.default == Parameter.empty,
signature(func).parameters.values(),
)
)
)
if n_free_params != n_free_parameters:
raise ValueError(
f"The number of free parameters of the callable has to "
f"equal {n_free_parameters}. "
f"The number of free parameters is {n_free_params}."
)
def check_bound(
bound=None, X=None, ndim=2, epsilon=0, bound_must_be_given=False
):
"""Validates bound and returns the bound of X if bound is None.
`bound` or `X` must not be None.
Parameters
----------
bound: array-like, shape (2, ndim), optional (default=None)
The given bound of shape
[[x1_min, x2_min, ..., xndim_min], [x1_max, x2_max, ..., xndim_max]]
X: matrix-like, shape (n_samples, ndim), optional (default=None)
The sample matrix X is the feature matrix representing samples.
ndim: int, optional (default=2)
The number of dimensions.
epsilon: float, optional (default=0)
The minimal distance between the returned bound and the values of `X`,
if `bound` is not specified.
bound_must_be_given: bool, optional (default=False)
Whether it is allowed for the bound to be `None` and to be inferred by
`X`.
Returns
-------
bound: array-like, shape (2, ndim), optional (default=None)
The given bound or bound of X.
"""
if X is not None:
X = check_array(X)
if X.shape[1] != ndim:
raise ValueError(
f"`X` along axis 1 must be of length {ndim}. "
f"`X` along axis 1 is of length {X.shape[1]}."
)
if bound is not None:
bound = check_array(bound)
if bound.shape != (2, ndim):
raise ValueError(
f"Shape of `bound` must be (2, {ndim}). "
f"Shape of `bound` is {bound.shape}."
)
elif bound_must_be_given:
raise ValueError("`bound` must not be `None`.")
if bound is None and X is not None:
minima = np.nanmin(X, axis=0) - epsilon
maxima = np.nanmax(X, axis=0) + epsilon
bound = np.append(minima.reshape(1, -1), maxima.reshape(1, -1), axis=0)
return bound
elif bound is not None and X is not None:
if np.any(np.logical_or(bound[0] > X, X > bound[1])):
warnings.warn("`X` contains values not within range of `bound`.")
return bound
elif bound is not None:
return bound
else:
raise ValueError("`X` or `bound` must not be None.")
def check_budget_manager(
budget,
budget_manager,
default_budget_manager_class,
default_budget_manager_dict=None,
):
"""Validate if budget manager is a budgetmanager class and create a
copy 'budget_manager_'.
"""
if default_budget_manager_dict is None:
default_budget_manager_dict = {}
if budget_manager is None:
budget_manager_ = default_budget_manager_class(
budget=budget, **default_budget_manager_dict
)
else:
if budget is not None and budget != budget_manager.budget:
warnings.warn(
"budgetmanager is already given such that the budget "
"is not used. The given budget differs from the "
"budget_managers budget."
)
budget_manager_ = copy.deepcopy(budget_manager)
return budget_manager_
|
[
"numpy.sum",
"numpy.ones",
"sklearn.utils.validation.check_consistent_length",
"numpy.diag",
"numpy.unique",
"numpy.random.RandomState",
"numpy.max",
"inspect.signature",
"sklearn.utils.validation.check_array",
"copy.deepcopy",
"sklearn.utils.validation.column_or_1d",
"numpy.nanmax",
"numpy.isscalar",
"numpy.nanmin",
"numpy.any",
"sklearn.utils.validation.check_random_state",
"numpy.array",
"numpy.logical_or",
"sklearn.utils.validation.assert_all_finite",
"warnings.warn"
] |
[((5175, 5199), 'numpy.isscalar', 'np.isscalar', (['class_prior'], {}), '(class_prior)\n', (5186, 5199), True, 'import numpy as np\n'), ((17167, 17194), 'copy.deepcopy', 'copy.deepcopy', (['random_state'], {}), '(random_state)\n', (17180, 17194), False, 'import copy\n'), ((17214, 17254), 'sklearn.utils.validation.check_random_state', 'check_random_state_sklearn', (['random_state'], {}), '(random_state)\n', (17240, 17254), True, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((17339, 17366), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (17360, 17366), True, 'import numpy as np\n'), ((18350, 18398), 'sklearn.utils.validation.check_array', 'check_array', (['indices'], {'dtype': 'int', 'ensure_2d': '(False)'}), '(indices, dtype=int, ensure_2d=False)\n', (18361, 18398), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((18407, 18477), 'sklearn.utils.validation.check_array', 'check_array', (['A'], {'allow_nd': '(True)', 'force_all_finite': '(False)', 'ensure_2d': '(False)'}), '(A, allow_nd=True, force_all_finite=False, ensure_2d=False)\n', (18418, 18477), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((5373, 5408), 'numpy.array', 'np.array', (['([class_prior] * n_classes)'], {}), '([class_prior] * n_classes)\n', (5381, 5408), True, 'import numpy as np\n'), ((5441, 5482), 'sklearn.utils.validation.check_array', 'check_array', (['class_prior'], {'ensure_2d': '(False)'}), '(class_prior, ensure_2d=False)\n', (5452, 5482), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((5505, 5528), 'numpy.sum', 'np.sum', (['(class_prior < 0)'], {}), '(class_prior < 0)\n', (5511, 5528), True, 'import numpy as np\n'), ((6906, 6940), 'numpy.array', 'np.array', (['cost_matrix'], {'dtype': 'float'}), '(cost_matrix, dtype=float)\n', (6914, 6940), True, 'import numpy as np\n'), ((7191, 7218), 'numpy.sum', 'np.sum', (['(cost_matrix_new < 0)'], {}), '(cost_matrix_new < 0)\n', (7197, 7218), True, 'import numpy as np\n'), ((13632, 13948), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {'accept_sparse': 'accept_sparse', 'accept_large_sparse': 'accept_large_sparse', 'dtype': 'dtype', 'order': 'order', 'copy': 'copy', 'force_all_finite': 'force_all_finite', 'ensure_2d': 'ensure_2d', 'allow_nd': 'allow_nd', 'ensure_min_samples': 'ensure_min_samples', 'ensure_min_features': 'ensure_min_features', 'estimator': 'estimator'}), '(X, accept_sparse=accept_sparse, accept_large_sparse=\n accept_large_sparse, dtype=dtype, order=order, copy=copy,\n force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=\n allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=\n ensure_min_features, estimator=estimator)\n', (13643, 13948), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14573, 14602), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['X', 'y'], {}), '(X, y)\n', (14596, 14602), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14706, 14749), 'sklearn.utils.validation.check_array', 'check_array', (['sample_weight'], {'ensure_2d': '(False)'}), '(sample_weight, ensure_2d=False)\n', (14717, 14749), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14758, 14799), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['y', 'sample_weight'], {}), '(y, sample_weight)\n', (14781, 14799), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((15062, 15383), 'sklearn.utils.validation.check_array', 'check_array', (['X_cand'], {'accept_sparse': 'accept_sparse', 'accept_large_sparse': 'accept_large_sparse', 'dtype': 'dtype', 'order': 'order', 'copy': 'copy', 'force_all_finite': 'force_all_finite', 'ensure_2d': 'ensure_2d', 'allow_nd': 'allow_nd', 'ensure_min_samples': 'ensure_min_samples', 'ensure_min_features': 'ensure_min_features', 'estimator': 'estimator'}), '(X_cand, accept_sparse=accept_sparse, accept_large_sparse=\n accept_large_sparse, dtype=dtype, order=order, copy=copy,\n force_all_finite=force_all_finite, ensure_2d=ensure_2d, allow_nd=\n allow_nd, ensure_min_samples=ensure_min_samples, ensure_min_features=\n ensure_min_features, estimator=estimator)\n', (15073, 15383), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((15863, 15911), 'sklearn.utils.validation.check_array', 'check_array', (['sample_weight_cand'], {'ensure_2d': '(False)'}), '(sample_weight_cand, ensure_2d=False)\n', (15874, 15911), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((15920, 15971), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['X_cand', 'sample_weight_cand'], {}), '(X_cand, sample_weight_cand)\n', (15943, 15971), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((17006, 17046), 'sklearn.utils.validation.check_random_state', 'check_random_state_sklearn', (['random_state'], {}), '(random_state)\n', (17032, 17046), True, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((20312, 20343), 'numpy.any', 'np.any', (['(indices >= A.shape[dim])'], {}), '(indices >= A.shape[dim])\n', (20318, 20343), True, 'import numpy as np\n'), ((25798, 25812), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (25809, 25812), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((26056, 26074), 'sklearn.utils.validation.check_array', 'check_array', (['bound'], {}), '(bound)\n', (26067, 26074), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((27724, 27753), 'copy.deepcopy', 'copy.deepcopy', (['budget_manager'], {}), '(budget_manager)\n', (27737, 27753), False, 'import copy\n'), ((3193, 3210), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (3201, 3210), True, 'import numpy as np\n'), ((7401, 7463), 'warnings.warn', 'warnings.warn', (['"""\'cost_matrix\' contains negative cost entries."""'], {}), '("\'cost_matrix\' contains negative cost entries.")\n', (7414, 7463), False, 'import warnings\n'), ((7490, 7518), 'numpy.sum', 'np.sum', (['(cost_matrix_new != 0)'], {}), '(cost_matrix_new != 0)\n', (7496, 7518), True, 'import numpy as np\n'), ((7723, 7795), 'warnings.warn', 'warnings.warn', (['"""\'cost_matrix\' contains contains no non-zero cost entry."""'], {}), '("\'cost_matrix\' contains contains no non-zero cost entry.")\n', (7736, 7795), False, 'import warnings\n'), ((8085, 8163), 'warnings.warn', 'warnings.warn', (['"""\'cost_matrix\' contains non-zero cost entries on its diagonal."""'], {}), '("\'cost_matrix\' contains non-zero cost entries on its diagonal.")\n', (8098, 8163), False, 'import warnings\n'), ((14148, 14239), 'sklearn.utils.validation.check_array', 'check_array', (['y'], {'accept_sparse': '"""csr"""', 'force_all_finite': '(True)', 'ensure_2d': '(False)', 'dtype': 'None'}), "(y, accept_sparse='csr', force_all_finite=True, ensure_2d=False,\n dtype=None)\n", (14159, 14239), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14361, 14387), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y'], {'warn': '(True)'}), '(y, warn=True)\n', (14373, 14387), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14400, 14441), 'sklearn.utils.validation.assert_all_finite', 'assert_all_finite', (['y'], {'allow_nan': 'allow_nan'}), '(y, allow_nan=allow_nan)\n', (14417, 14441), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((14665, 14681), 'numpy.ones', 'np.ones', (['y.shape'], {}), '(y.shape)\n', (14672, 14681), True, 'import numpy as np\n'), ((14971, 15016), 'sklearn.utils.validation.check_consistent_length', 'check_consistent_length', (['y.T', 'sample_weight.T'], {}), '(y.T, sample_weight.T)\n', (14994, 15016), False, 'from sklearn.utils.validation import check_array, column_or_1d, assert_all_finite, check_consistent_length, check_random_state as check_random_state_sklearn\n'), ((19826, 19863), 'numpy.any', 'np.any', (['(indices[i] >= A.shape[dim[i]])'], {}), '(indices[i] >= A.shape[dim[i]])\n', (19832, 19863), True, 'import numpy as np\n'), ((26412, 26432), 'numpy.nanmin', 'np.nanmin', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (26421, 26432), True, 'import numpy as np\n'), ((26460, 26480), 'numpy.nanmax', 'np.nanmax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (26469, 26480), True, 'import numpy as np\n'), ((27490, 27639), 'warnings.warn', 'warnings.warn', (['"""budgetmanager is already given such that the budget is not used. The given budget differs from the budget_managers budget."""'], {}), "(\n 'budgetmanager is already given such that the budget is not used. The given budget differs from the budget_managers budget.'\n )\n", (27503, 27639), False, 'import warnings\n'), ((7840, 7864), 'numpy.diag', 'np.diag', (['cost_matrix_new'], {}), '(cost_matrix_new)\n', (7847, 7864), True, 'import numpy as np\n'), ((18576, 18594), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (18585, 18594), True, 'import numpy as np\n'), ((18645, 18671), 'numpy.unique', 'np.unique', (['indices'], {'axis': '(0)'}), '(indices, axis=0)\n', (18654, 18671), True, 'import numpy as np\n'), ((18929, 18947), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (18938, 18947), True, 'import numpy as np\n'), ((18984, 19010), 'numpy.unique', 'np.unique', (['indices'], {'axis': '(0)'}), '(indices, axis=0)\n', (18993, 19010), True, 'import numpy as np\n'), ((26656, 26697), 'numpy.logical_or', 'np.logical_or', (['(bound[0] > X)', '(X > bound[1])'], {}), '(bound[0] > X, X > bound[1])\n', (26669, 26697), True, 'import numpy as np\n'), ((26712, 26777), 'warnings.warn', 'warnings.warn', (['"""`X` contains values not within range of `bound`."""'], {}), "('`X` contains values not within range of `bound`.')\n", (26725, 26777), False, 'import warnings\n'), ((20448, 20463), 'numpy.max', 'np.max', (['indices'], {}), '(indices)\n', (20454, 20463), True, 'import numpy as np\n'), ((19985, 20003), 'numpy.max', 'np.max', (['indices[i]'], {}), '(indices[i])\n', (19991, 20003), True, 'import numpy as np\n'), ((24386, 24401), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (24395, 24401), False, 'from inspect import Parameter, signature\n')]
|
#!/usr/bin/env python
# coding: utf-8
import argparse
import concurrent.futures
import logging
import numpy as np
import pandas as pd
import pyBigWig
import pysam
import os
import re
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from collections import Counter
from numpy.lib.stride_tricks import sliding_window_view
from operator import itemgetter
from tqdm import tqdm
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def get_args():
"""
Get arguments from command line with argparse.
"""
parser = argparse.ArgumentParser(
prog='aligned_bam_to_cpg_scores.py',
description="""Calculate CpG positions and scores from an aligned bam file. Outputs raw and
coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available).""")
parser.add_argument("-b", "--bam",
required=True,
metavar="input.bam",
help="The aligned BAM file.")
parser.add_argument("-f", "--fasta",
required=True,
metavar="ref.fasta",
help="The reference fasta file.")
parser.add_argument("-o", "--output_label",
required=True,
metavar="label",
help="Label for output files, which results in [label].bed/bw.")
parser.add_argument("-p", "--pileup_mode",
required=False,
choices=["model", "count"],
default="model",
help="Use a model-based approach to score modifications across sites (model) "
"or a simple count-based approach (count). [default = %(default)s]")
parser.add_argument("-d", "--model_dir",
required=False,
default=None,
metavar="/path/to/model/dir",
help="Full path to the directory containing the model (*.pb files) to load. [default = None]")
parser.add_argument("-m", "--modsites",
required=False,
choices=["denovo", "reference"],
default="denovo",
help="Only output CG sites with a modification probability > 0 "
"(denovo), or output all CG sites based on the "
"supplied reference fasta (reference). [default = %(default)s]")
parser.add_argument("-c", "--min_coverage",
required=False,
default=4,
type=int,
metavar="int",
help="Minimum coverage required for filtered outputs. [default: %(default)d]")
parser.add_argument("-q", "--min_mapq",
required=False,
default=0,
type=int,
metavar="int",
help="Ignore alignments with MAPQ < N. [default: %(default)d]")
parser.add_argument("-a", "--hap_tag",
required=False,
default="HP",
metavar="TAG",
help="The SAM tag containing haplotype information. [default: %(default)s]")
parser.add_argument("-s", "--chunksize",
required=False,
default=500000,
type=int,
metavar="int",
help="Break reference regions into chunks "
"of this size for parallel processing. [default = %(default)d]")
parser.add_argument("-t", "--threads",
required=False,
default=1,
type=int,
metavar="int",
help="Number of threads for parallel processing. [default = %(default)d]")
return parser.parse_args()
def setup_logging(output_label):
"""
Set up logging to file.
"""
logname = "{}-aligned_bam_to_cpg_scores.log".format(output_label)
# ensure logging file does not exist, if so remove
if os.path.exists(logname):
os.remove(logname)
# set up logging to file
logging.basicConfig(filename=logname,
format="%(asctime)s: %(levelname)s: %(message)s",
datefmt='%d-%b-%y %H:%M:%S',
level=logging.DEBUG)
def log_args(args):
"""
Record argument settings in log file.
"""
logging.info("Using following argument settings:")
for arg, val in vars(args).items():
logging.info("\t--{}: {}".format(arg, val))
def get_regions_to_process(input_bam, input_fasta, chunksize, modsites, pileup_mode, model_dir, min_mapq, hap_tag):
"""
Breaks reference regions into smaller regions based on chunk
size specified. Returns a list of lists that can be used for
multiprocessing. Each sublist contains:
[bam path (str), fasta path (str), modsites (str),
reference name (str), start coordinate (int), stop coordinate (int)]
:param input_bam: Path to input bam file. (str)
:param input_fasta: Path to reference fasta file. (str)
:param chunksize: Chunk size (default = 500000). (int)
:param modsites: Filtering method. (str: "denovo", "reference")
:param pileup_mode: Site modification calling method. (str: "model", "count")
:param model_dir: Full path to model directory to load (if supplied), otherwise is None.
:param min_mapq: Minimum mapping quality score. (int)
:param hap_tag: The SAM tag label containing haplotype information. (str)
:return regions_to_process: List of lists containing region sizes. (list)
"""
logging.info("get_regions_to_process: Starting chunking.")
# open the input bam file with pysam
bamIn = pysam.AlignmentFile(input_bam, 'rb')
# empty list to store sublists with region information
regions_to_process = []
# iterate over reference names and their corresponding lengths
references = zip(bamIn.references, bamIn.lengths)
for ref, length in references:
start = 1
while start < length:
end = start + chunksize
if end < length:
regions_to_process.append([input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, start, end - 1, min_mapq, hap_tag])
else:
regions_to_process.append([input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, start, length, min_mapq, hap_tag])
start = start + chunksize
# close bam
bamIn.close()
logging.info("get_regions_to_process: Created {:,} region chunks.\n".format(len(regions_to_process)))
return regions_to_process
def cg_sites_from_fasta(input_fasta, ref):
"""
Gets all CG site positions from a given reference region, and
make positions keys in a dict with empty strings as vals.
:param input_fasta: A path to reference fasta file. (str)
:param ref: Reference name. (str)
:return cg_sites_ref_set: Set with all CG ref positions. (set)
"""
# open fasta with BioPython and iterated over records
with open(input_fasta) as fh:
for record in SeqIO.parse(fh, "fasta"):
# if record name matches this particular ref,
if record.id == ref:
# use regex to find all indices for 'CG' in the reference seq, e.g. the C positions
cg_sites_ref_set = {i.start() for i in re.finditer('CG', str(record.seq.upper()))}
# there may be some stretches without any CpGs in a reference region
# handle these edge cases by adding a dummy value of -1 (an impossible coordinate)
if not cg_sites_ref_set:
cg_sites_ref_set.add(-1)
# once seq is found, stop iterating
break
# make sure the ref region was matched to a ref fasta seq
if not cg_sites_ref_set:
logging.error("cg_sites_from_fasta: The sequence '{}' was not found in the reference fasta file.".format(ref))
raise ValueError('The sequence "{}" was not found in the reference fasta file!'.format(ref))
return cg_sites_ref_set
def get_mod_sequence(integers):
"""
A generator that takes an iterable of integers coding mod bases from the SAM Mm tags, and yields an iterable of
positions of sequential bases.
Example: [5, 12, 0] -> [6, 19, 20]
In above example the 6th C, 19th C, and 20th C are modified
See this example described in: https://samtools.github.io/hts-specs/SAMtags.pdf; Dec 9 2021
:param integers: Iterable of integers (parsed from SAM Mm tag). (iter)
:return mod_sequence: Iterator of integers, 1-based counts of position of modified base in set of bases. (iter)
"""
base_count = 0
for i in integers:
base_count += i + 1
yield base_count
def get_base_indices(query_seq, base, reverse):
"""
Find all occurrences of base in query sequence and make a list of their
indices. Return the list of indices.
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return: List of integers, 0-based indices of all bases in query seq. (list)
"""
if reverse == False:
return [i.start() for i in re.finditer(base, query_seq)]
# if seq stored in reverse, need reverse complement to get correct indices for base
# use biopython for this (convert to Seq, get RC, convert to string)
else:
return [i.start() for i in re.finditer(base, str(Seq(query_seq).reverse_complement()))]
def parse_mmtag(query_seq, mmtag, modcode, base, reverse):
"""
Get a generator of the 0-based indices of the modified bases in the query sequence.
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param mmtag: The Mm tag obtained for the read ('C+m,5,12,0;'). (str)
:param modcode: The modification code to search for in the tag ('C+m'). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return mod_base_indices: Generator of integers, 0-based indices of all mod bases in query seq. (iter)
"""
try:
# tags are written as: C+m,5,12,0;C+h,5,12,0;
# if multiple mod types present in tag, must find relevant one first
modline = next(x[len(modcode)+1:] for x in mmtag.split(';') if x.startswith(modcode))
# first get the sequence of the mod bases from tag integers
# this is a 1-based position of each mod base in the complete set of this base from this read
# e.g., [6, 19, 20] = the 6th, 19th, and 20th C bases are modified in the set of Cs
mod_sequence = get_mod_sequence((int(x) for x in modline.split(',')))
# get all 0-based indices of this base in this read, e.g. every C position
base_indices = get_base_indices(query_seq, base, reverse)
# use the mod sequence to identify indices of the mod bases in the read
return (base_indices[i - 1] for i in mod_sequence)
except:
return iter(())
def parse_mltag(mltag):
"""
Convert 255 discrete integer code into mod score 0-1, return as a generator.
This is NOT designed to handle interleaved Ml format for multiple mod types!
:param mltag: The Ml tag obtained for the read with('Ml:B:C,204,89,26'). (str)
:return: Generator of floats, probabilities of all mod bases in query seq. (iter)
"""
return (round(x / 256, 3) if x > 0 else 0 for x in mltag)
def get_mod_dict(query_seq, mmtag, modcode, base, mltag, reverse):
"""
Make a dictionary from the Mm and Ml tags, in which the
modified base index (in the query seq) is the key and the
mod score is the value.
This is NOT designed to handle interleaved Ml format for multiple mod types!
:param query_seq: The original read sequence (not aligned read sequence). (str)
:param mmtag: The Mm tag obtained for the read ('C+m,5,12,0;'). (str)
:param modcode: The modification code to search for in the tag ('C+m'). (str)
:param base: The nucleotide modifications occur on ('C'). (str)
:param mltag: The Ml tag obtained for the read with('Ml:B:C,204,89,26'). (str)
:param reverse: True/False whether sequence is reversed. (Boolean)
:return mod_dict: Dictionary with mod positions and scores. (dict)
"""
mod_base_indices = parse_mmtag(query_seq, mmtag, modcode, base, reverse)
mod_scores = parse_mltag(mltag)
mod_dict = dict(zip(mod_base_indices, mod_scores))
return mod_dict
def pileup_from_reads(bamIn, ref, pos_start, pos_stop, min_mapq, hap_tag, modsites):
"""
For a given region, retrieve all reads.
For each read, iterate over positions aligned to this region.
Build a list with an entry for each ref position in the region. Each entry has a list of 3-tuples, each of which
includes information from a read base read aligned to that site. The 3-tuple contains strand information,
modification score, and haplotype.
(strand symbol (str), mod score (float), haplotype (int))
Return the unfiltered list of base modification data.
:param bamIn: AlignmentFile object of input bam file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param min_mapq: Minimum mapping quality score. (int)
:param hap_tag: Name of SAM tag containing haplotype information. (str)
:param modsites: Filtering method. (str: "denovo", "reference")
:return basemod_data: Unfiltered list of base modification data (list)
:return cg_sites_read_set: Set of positions in read consensus sequence with CG, given as reference position. The
set is empty unless modsites is 'denovo' (set)
"""
logging.debug("coordinates {}: {:,}-{:,}: (2) pileup_from_reads".format(ref, pos_start, pos_stop))
basemod_data = []
# These structures are only used for modsites denovo mode
pos_pileup = []
pos_pileup_hap1 = []
pos_pileup_hap2 = []
is_denovo_modsites = modsites == "denovo"
# iterate over all reads present in this region
for read in bamIn.fetch(contig=ref, start=pos_start, stop=pos_stop):
# check if passes minimum mapping quality score
if read.mapping_quality < min_mapq:
#logging.warning("pileup_from_reads: read did not pass minimum mapQV: {}".format(read.query_name))
continue
# identify the haplotype tag, if any (default tag = HP)
# values are 1 or 2 (for haplotypes), or 0 (no haplotype)
# an integer is expected but custom tags can produce strings instead
try:
hap_val = read.get_tag(hap_tag)
try:
hap = int(hap_val)
except ValueError:
logging.error("coordinates {}: {:,}-{:,}: (2) pileup_from_reads: illegal haplotype value {}".format(ref, pos_start, pos_stop, hap_val))
except KeyError:
hap = 0
# check for SAM-spec methylation tags
# draft tags were Ml and Mm, accepted tags are now ML and MM
# check for both types, set defaults to None and change if found
mmtag, mltag = None, None
try:
mmtag = read.get_tag('Mm')
mltag = read.get_tag('Ml')
except KeyError:
pass
try:
mmtag = read.get_tag('MM')
mltag = read.get_tag('ML')
except KeyError:
pass
if mmtag is not None and mltag is not None:
if not basemod_data:
ref_pos_count = 1 + pos_stop - pos_start
basemod_data = [[] for _ in range(ref_pos_count)]
if is_denovo_modsites:
pos_pileup = [[] for _ in range(ref_pos_count)]
pos_pileup_hap1 = [[] for _ in range(ref_pos_count)]
pos_pileup_hap2 = [[] for _ in range(ref_pos_count)]
is_reverse = bool(read.is_reverse)
strand = "+"
if is_reverse :
strand = "-"
rev_strand_offset = len(read.query_sequence) - 2
# note that this could potentially be used for other mod types, but
# the Mm and Ml parsing functions are not set up for the interleaved format
# e.g., ‘Mm:Z:C+mh,5,12; Ml:B:C,204,26,89,130’ does NOT work
# to work it must be one mod type, and one score per mod position
mod_dict = get_mod_dict(read.query_sequence, mmtag, 'C+m', 'C', mltag, is_reverse)
if True:
# iterate over positions
for query_pos, ref_pos in read.get_aligned_pairs(matches_only=True)[20:-20]:
# make sure ref position is in range of ref target region
if ref_pos >= pos_start and ref_pos <= pos_stop:
ref_offset = ref_pos - pos_start
# building a consensus is MUCH faster when we iterate over reads (vs. by column then by read)
# we are building a dictionary with ref position as key and list of bases as val
if is_denovo_modsites:
query_base = read.query_sequence[query_pos]
pos_pileup[ref_offset].append(query_base)
if hap == 1:
pos_pileup_hap1[ref_offset].append(query_base)
elif hap == 2:
pos_pileup_hap2[ref_offset].append(query_base)
# identify if read is reverse strand or forward to set correct location
if is_reverse:
location = (rev_strand_offset - query_pos)
else:
location = query_pos
# check if this position has a mod score in the dictionary,
# if not assign score of zero
score = mod_dict.get(location, 0)
# Add tuple with strand, modification score, and haplotype to the list for this position
basemod_data[ref_offset].append((strand, score, hap))
# if no SAM-spec methylation tags present, ignore read and log
else:
logging.warning("pileup_from_reads: read missing MM and/or ML tag(s): {}".format(read.query_name))
cg_sites_read_set = set()
if is_denovo_modsites:
for refpos_list in (pos_pileup, pos_pileup_hap1, pos_pileup_hap2):
last_base = 'N'
last_index = 0
for index,v in enumerate(refpos_list):
# find the most common base, if no reads present use N
if len(v):
base = Counter(v).most_common(1)[0][0]
else:
base = 'N'
if last_base == 'C' and base == 'G' :
cg_sites_read_set.add(pos_start+last_index)
# This restriction recreates the original code behavior:
# - Advantage: Method can find a CpG aligning across a deletion in the reference
# - Disadvantage: Method will find 'fake' CpG across gaps in the haplotype phasing
#
# The disadvantage is fixable, but first focus on identical output to make verification easy
if base != 'N':
last_base = base
last_index = index
return basemod_data, cg_sites_read_set
def filter_basemod_data(basemod_data, cg_sites_read_set, ref, pos_start, pos_stop, input_fasta, modsites):
"""
Filter the per-position base modification data, based on the modsites option selected:
"reference": Keep all sites that match a reference CG site (this includes both
modified and unmodified sites). It will exclude all modified sites
that are not CG sites, according to the ref sequence.
"denovo": Keep all sites which have at least one modification score > 0, per strand.
This can include sites that are CG in the reads, but not in the reference.
It can exclude CG sites with no modifications on either strand from being
written to the bed file.
Return the filtered list.
:param basemod_data: List of base modification data per position, offset by pos_start. (list)
:param cg_sites_read_set: Set with reference coordinates for all CG sites in consensus from reads. (set)
:param ref: A path to reference fasta file. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param modsites: Filtering method. (str: "denovo", "reference")
:param ref: Reference name. (str)
:return filtered_basemod_data: List of 2-tuples for each position retained after filtering. Each 2-tuple is the
reference position and base mod data list. The list is sorted by reference position (list)
"""
filtered_basemod_data = []
if modsites == "reference":
if basemod_data:
# Get CG positions in reference
cg_sites_ref_set = cg_sites_from_fasta(input_fasta, ref)
# Keep all sites that match a reference CG position and have at least one basemod observation.
filtered_basemod_data=[(i+pos_start,v) for i, v in enumerate(basemod_data) if (i + pos_start) in cg_sites_ref_set and v]
logging.debug("coordinates {}: {:,}-{:,}: (3) filter_basemod_data: sites kept = {:,}".format(ref, pos_start, pos_stop, len(filtered_basemod_data)))
elif modsites == "denovo":
if basemod_data:
# Keep all sites that match position of a read consensus CG site.
filtered_basemod_data=[(i+pos_start,v) for i, v in enumerate(basemod_data) if (i + pos_start) in cg_sites_read_set]
logging.debug("coordinates {}: {:,}-{:,}: (3) filter_basemod_data: sites kept = {:,}".format(ref, pos_start, pos_stop, len(filtered_basemod_data)))
del basemod_data
del cg_sites_read_set
return filtered_basemod_data
def calc_stats(df):
"""
Gets summary stats from a given dataframe p.
:param df: Pandas dataframe.
:return: Summary statistics
"""
total = df.shape[0]
mod = df[df['prob'] > 0.5].shape[0]
unMod = df[df['prob'] <= 0.5].shape[0]
modScore = "." if mod == 0 else str(round(df[df['prob'] > 0.5]['prob'].mean(), 3))
unModScore = "." if unMod == 0 else str(round(df[df['prob'] <= 0.5]['prob'].mean(), 3))
percentMod = 0.0 if mod == 0 else round((mod / total) * 100, 1)
return percentMod, mod, unMod, modScore, unModScore
def collect_bed_results_count(ref, pos_start, pos_stop, filtered_basemod_data):
"""
Iterates over reference positions and for each position, makes a pandas dataframe from the sublists.
The dataframe is filtered for strands and haplotypes, and summary statistics are
calculated with calc_stats().
For each position and strand/haploytpe combination, a sublist of summary information
is appended to the bed_results list:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
This information is used to write the output bed file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param filtered_basemod_data: List of 2-tuples for each position remaining after filtration. Each 2-tuple is the
reference position and base mod dat. The list is sorted by reference position (list)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.debug("coordinates {}: {:,}-{:,}: (4) collect_bed_results_count".format(ref, pos_start, pos_stop))
# intiate empty list to store bed sublists
bed_results = []
# iterate over the ref positions and corresponding vals
for (refPosition, modinfoList) in filtered_basemod_data:
# create pandas dataframe from this list of sublists
df = pd.DataFrame(modinfoList, columns=['strand', 'prob', 'hap'])
# Filter dataframe based on strand/haplotype combinations, get information,
# and create sublists and append to bed_results.
# merged strands / haplotype 1
percentMod, mod, unMod, modScore, unModScore = calc_stats(df[df['hap'] == 1])
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"hap1", mod + unMod, mod, unMod, modScore, unModScore])
# merged strands / haplotype 2
percentMod, mod, unMod, modScore, unModScore = calc_stats(df[df['hap'] == 2])
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"hap2", mod + unMod, mod, unMod, modScore, unModScore])
# merged strands / both haplotypes
percentMod, mod, unMod, modScore, unModScore = calc_stats(df)
if mod + unMod >= 1:
bed_results.append([ref, refPosition, (refPosition + 1), percentMod,
"Total", mod + unMod, mod, unMod, modScore, unModScore])
return bed_results
def get_normalized_histo(probs, adj):
"""
Create the array data structure needed to apply the model, for a given site.
:param probs: List of methylation probabilities. (list)
:param adj: A 0 or 1 indicating whether previous position was a CG. (int)
:return: List with normalized histogram and coverage (if min coverage met), else returns empty list. (list)
"""
cov = len(probs)
if (cov >= 4):
hist = np.histogram(probs, bins=20, range=[0, 1])[0]
norm = np.linalg.norm(hist)
# divide hist by norm and add values to array
# add either 0 (not adjacent to a prior CG) or 1 (adjacent to a prior CG) to final spot in array
norm_hist = np.append(hist / norm, adj)
return [norm_hist, cov]
else:
return []
def discretize_score(score, coverage):
"""
Apply a small correction to the model probability to make it
compatible with the number of reads at that site. Allows the number
of modified and unmodified reads to be estimated.
:param score: Modification probability, from model. (float)
:param coverage: Number of reads. (int)
:return mod_reads: Estimated number of modified reads. (int)
:return unmod_reads: Estimated number of unmodified reads. (int)
:return adjusted_score: Adjusted probability score, based on percent modified reads. (float)
"""
# need to round up or round down modified read numbers based on score
# which allows a push towards 0/50/100 for adjusted score
if score > 50:
if score < 65:
mod_reads = int(np.floor(score/100 * float(coverage)))
else:
mod_reads = int(np.ceil(score/100 * float(coverage)))
else:
if score > 35:
mod_reads = int(np.ceil(score/100 * float(coverage)))
else:
mod_reads = int(np.floor(score/100 * float(coverage)))
unmod_reads = int(coverage) - mod_reads
if mod_reads == 0:
adjusted_score = 0.0
else:
adjusted_score = round((mod_reads / (mod_reads + unmod_reads)) * 100, 1)
return mod_reads, unmod_reads, adjusted_score
def apply_model(refpositions, normhistos, coverages, ref, pos_start, pos_stop, model, hap, bed_results):
"""
Apply model to make modification calls for all sites using a sliding window approach.
Append to a list of results, ultimately for bed file:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param refpositions: List with all CG positions. (list)
:param normhistos: List with all normalized histogram data structures. (list)
:param coverages: List with all CG coverages. (list)
:param ref: Reference contig name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param model: The tensorflow model object.
:param hap: Label of haplotype (hap1, hap2, or Total). (str)
:param bed_results: List of bed results to which these model results will be appended (list)
"""
if len(normhistos) > 11:
featPad = np.pad(np.stack(normhistos), pad_width=((6, 4), (0, 0)), mode='constant', constant_values=0)
featuresWindow = sliding_window_view(featPad, 11, axis=0)
featuresWindow = np.swapaxes(featuresWindow, 1, 2)
predict = model.predict(featuresWindow)
predict = np.clip(predict, 0, 1)
for i, position in enumerate(refpositions):
model_score = round(predict[i][0] * 100, 1)
mod_reads, unmod_reads, adjusted_score = discretize_score(model_score, coverages[i])
bed_results.append((ref, position, (position + 1), model_score, hap, coverages[i], mod_reads, unmod_reads, adjusted_score))
else:
logging.warning("coordinates {}: {:,}-{:,}: apply_model: insufficient data for {}".format(ref, pos_start, pos_stop, hap))
def collect_bed_results_model(ref, pos_start, pos_stop, filtered_basemod_data, model_dir):
"""
Iterates over reference positions and creates normalized histograms of scores,
feeds all sites and scores into model function to assign modification probabilities,
and creates a list of sublists for writing bed files:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
This information is returned and ultimately used to write the output bed file.
:param ref: Reference name. (str)
:param pos_start: Start coordinate for region. (int)
:param pos_stop: Stop coordinate for region. (int)
:param filtered_basemod_data: List of 2-tuples for each position remaining after filtration. Each 2-tuple is the
reference position and base mod dat. The list is sorted by reference position (list)
:param model_dir: Full path to directory containing model. (str)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.debug("coordinates {}: {:,}-{:,}: (4) collect_bed_results_model".format(ref, pos_start, pos_stop))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
logging.getLogger('tensorflow').setLevel(logging.ERROR)
# this may or may not do anything to help with the greedy thread situation...
#tf.config.threading.set_intra_op_parallelism_threads(1)
#tf.config.threading.set_inter_op_parallelism_threads(1)
model = tf.keras.models.load_model(model_dir, compile=False)
total_refpositions, total_normhistos, total_coverages = [], [], []
hap1_refpositions, hap1_normhistos, hap1_coverages = [], [], []
hap2_refpositions, hap2_normhistos, hap2_coverages = [], [], []
# set initial C index for CG location to 0
previousLocation = 0
# iterate over reference positions and values (list containing [strand, score, hap]) in filtered_basemod_data
for (refPosition, modinfoList) in filtered_basemod_data:
# determine if there is an adjacent prior CG, score appropriately
if (refPosition - previousLocation) == 2:
adj = 1
else:
adj = 0
# update CG position
previousLocation = refPosition
# build lists for combined haplotypes
# returns [norm_hist, cov] if min coverage met, otherwise returns empty list
total_result_list = get_normalized_histo([x[1] for x in modinfoList], adj)
if total_result_list:
total_normhistos.append(total_result_list[0])
total_coverages.append(total_result_list[1])
total_refpositions.append(refPosition)
# build lists for hap1
hap1_result_list = get_normalized_histo([x[1] for x in modinfoList if x[2] == 1], adj)
if hap1_result_list:
hap1_normhistos.append(hap1_result_list[0])
hap1_coverages.append(hap1_result_list[1])
hap1_refpositions.append(refPosition)
# build lists for hap2
hap2_result_list = get_normalized_histo([x[1] for x in modinfoList if x[2] == 2], adj)
if hap2_result_list:
hap2_normhistos.append(hap2_result_list[0])
hap2_coverages.append(hap2_result_list[1])
hap2_refpositions.append(refPosition)
# initiate empty list to store all bed results
bed_results = []
# run model for total, hap1, hap2, and add to bed results if non-empty list was returned
apply_model(total_refpositions, total_normhistos, total_coverages, ref, pos_start, pos_stop, model, "Total", bed_results)
apply_model(hap1_refpositions, hap1_normhistos, hap1_coverages, ref, pos_start, pos_stop, model, "hap1", bed_results)
apply_model(hap2_refpositions, hap2_normhistos, hap2_coverages, ref, pos_start, pos_stop, model, "hap2", bed_results)
return bed_results
def run_process_region(arguments):
"""
Process a given reference region to identify modified bases.
Uses pickled args (input_file, ref, pos_start, pos_stop) to run
pileup_from_reads() to get all desired sites (based on modsites option),
then runs collect_bed_results() to summarize information.
The sublists will differ between model or count method, but they always share the first 7 elements:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage, ...]
:param arguments: Pickled list. (list)
:return bed_results: List of sublists with information to write the output bed file. (list)
"""
# unpack pickled items:
# [bam path (str), fasta path (str), modsites option (str),
# pileup_mode option (str), model directory path (str),
# reference contig name (str), start coordinate (int),
# stop coordinate (int), minimum mapping QV (int), haplotype tag name (str)]
input_bam, input_fasta, modsites, pileup_mode, model_dir, ref, pos_start, pos_stop, min_mapq, hap_tag = arguments
logging.debug("coordinates {}: {:,}-{:,}: (1) run_process_region: start".format(ref, pos_start, pos_stop))
# open the input bam file with pysam
bamIn = pysam.AlignmentFile(input_bam, 'rb')
# get all ref sites with mods and information from corresponding aligned reads
basemod_data, cg_sites_read_set = pileup_from_reads(bamIn, ref, pos_start, pos_stop, min_mapq, hap_tag, modsites)
# filter based on denovo or reference sites
filtered_basemod_data = filter_basemod_data(basemod_data, cg_sites_read_set, ref, pos_start, pos_stop, input_fasta, modsites)
# bam object no longer needed, close file
bamIn.close()
if filtered_basemod_data:
# summarize the mod results, depends on pileup_mode option selected
if pileup_mode == "count":
bed_results = collect_bed_results_count(ref, pos_start, pos_stop, filtered_basemod_data)
elif pileup_mode == "model":
bed_results = collect_bed_results_model(ref, pos_start, pos_stop, filtered_basemod_data, model_dir)
else:
bed_results = []
logging.debug("coordinates {}: {:,}-{:,}: (5) run_process_region: finish".format(ref, pos_start, pos_stop))
if len(bed_results) > 1:
return bed_results
else:
return
def run_process_region_wrapper(arguments):
try:
return run_process_region(arguments)
except Exception as e:
sys.stderr.write("Exception thrown in worker process {}: {}\n".format(os.getpid(),e))
raise
def run_all_pileup_processing(regions_to_process, threads):
"""
Function to distribute jobs based on reference regions created.
Collects results and returns list for writing output bed file.
The bed results will differ based on model or count method, but they always share the first 7 elements:
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage, ...]
:param regions_to_process: List of sublists defining regions (input_file, ref, pos_start, pos_stop). (list)
:param threads: Number of threads to use for multiprocessing. (int)
:return filtered_bed_results: List of sublists with information to write the output bed file. (list)
"""
logging.info("run_all_pileup_processing: Starting parallel processing.\n")
# run all jobs
progress_bar = None
if sys.stderr.isatty():
progress_bar = tqdm(total=len(regions_to_process), miniters=1, smoothing=0)
bed_results = []
with concurrent.futures.ProcessPoolExecutor(max_workers=threads) as executor:
futures = [executor.submit(run_process_region_wrapper, r) for r in regions_to_process]
# Process results in order of completion
for future in concurrent.futures.as_completed(futures):
bed_result = future.result()
bed_results.append(bed_result)
if progress_bar:
progress_bar.update(1)
if progress_bar:
progress_bar.close()
logging.info("run_all_pileup_processing: Finished parallel processing.\n")
# results is a list of sublists, may contain None, remove these
filtered_bed_results = [i for i in bed_results if i]
# turn list of lists of sublists into list of sublists
flattened_bed_results = [i for sublist in filtered_bed_results for i in sublist]
# ensure bed results are sorted by ref contig name, start position
logging.info("run_all_pileup_processing: Starting sort for bed results.\n")
if flattened_bed_results:
flattened_bed_results.sort(key=itemgetter(0, 1))
logging.info("run_all_pileup_processing: Finished sort for bed results.\n")
return flattened_bed_results
def write_output_bed(label, modsites, min_coverage, bed_results):
"""
Writes output bed file(s) based on information in bed_merge_results (default).
Separates results into total, hap1, and hap2. If haplotypes not available,
only total is produced.
The bed_merge_results list will contain slighty different information depending on the pileup_mode option,
but the first 7 fields will be identical:
count-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
OR
model-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param outname: Name of output bed file to write. (str)
:param modsites: "reference" or "denovo", for the CpG detection mode. (str)
:param min_coverage: Minimum coverage to retain a site. (int)
:param bed_results: List of sublists with information to write the output bed file. (list)
:return output_files: List of output bed file names that were successfully written. (list)
"""
logging.info("write_output_bed: Writing unfiltered output bed files.\n")
out_total = "{}.combined.{}.bed".format(label, modsites)
out_hap1 = "{}.hap1.{}.bed".format(label, modsites)
out_hap2 = "{}.hap2.{}.bed".format(label, modsites)
cov_total = "{}.combined.{}.mincov{}.bed".format(label, modsites, min_coverage)
cov_hap1 = "{}.hap1.{}.mincov{}.bed".format(label, modsites, min_coverage)
cov_hap2 = "{}.hap2.{}.mincov{}.bed".format(label, modsites, min_coverage)
# remove any previous version of output files
for f in [out_total, out_hap1, out_hap2, cov_total, cov_hap1, cov_hap2]:
if os.path.exists(f):
os.remove(f)
with open(out_total, 'a') as fh_total:
with open(out_hap1, 'a') as fh_hap1:
with open(out_hap2, 'a') as fh_hap2:
for i in bed_results:
if i[4] == "Total":
fh_total.write("{}\n".format("\t".join([str(j) for j in i])))
elif i[4] == "hap1":
fh_hap1.write("{}\n".format("\t".join([str(j) for j in i])))
elif i[4] == "hap2":
fh_hap2.write("{}\n".format("\t".join([str(j) for j in i])))
# write coverage-filtered versions of bed files
logging.info("write_output_bed: Writing coverage-filtered output bed files, using min coverage = {}.\n".format(min_coverage))
output_files = []
for inBed, covBed in [(out_total, cov_total), (out_hap1, cov_hap1), (out_hap2, cov_hap2)]:
# if haplotypes not present, the bed files are empty, remove and do not write cov-filtered version
if os.stat(inBed).st_size == 0:
os.remove(inBed)
else:
output_files.append(inBed)
# write coverage filtered bed file
with open(inBed, 'r') as fh_in, open(covBed, 'a') as fh_out:
for line in fh_in:
if int(line.split('\t')[5]) >= min_coverage:
fh_out.write(line)
# check to ensure some sites were written, otherwise remove
if os.stat(covBed).st_size == 0:
os.remove(covBed)
else:
output_files.append(covBed)
return output_files
def make_bed_df(bed, pileup_mode):
"""
Construct a pandas dataframe from a bed file.
count-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) % mod sites, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) mod score, (9) unmod score]
OR
model-based list
[(0) ref name, (1) start coord, (2) stop coord, (3) mod probability, (4) haplotype, (5) coverage,
(6) mod sites, (7) unmod sites, (8) adjusted probability]
:param bed: Name of bed file.
:param pileup_mode: Site modification calling method. (str: "model", "count")
:return df: Pandas dataframe.
"""
logging.debug("make_bed_df: Converting '{}' to pandas dataframe.\n".format(bed))
if pileup_mode == "count":
df = pd.read_csv(bed, sep='\t', header=None,
names = ['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',
'modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score'])
df.drop(columns=['modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score', 'haplotype', 'coverage'], inplace=True)
elif pileup_mode == "model":
df = pd.read_csv(bed, sep='\t', header=None,
names = ['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',
'modified_bases', 'unmodified_bases', 'adj_prob'])
df.drop(columns=['haplotype', 'coverage', 'modified_bases', 'unmodified_bases', 'adj_prob'], inplace=True)
#df.sort_values(by=['chromosome', 'start'], inplace=True)
return df
def get_bigwig_header_info(input_fasta):
"""
Get chromosome names and lengths from reference fasta.
:param input_fasta: Name of reference fasta file.
:return header: List of tuples, containing [ (ref1, length1), (ref2, length2), ...] .
"""
logging.debug("get_bigwig_header_info: Getting ref:length info from reference fasta.\n")
header = []
with open(input_fasta) as fh:
for record in SeqIO.parse(fh, "fasta"):
header.append((record.id, len(record.seq)))
return header
def write_bigwig_from_df(df, header, outname):
"""
Function to write a bigwig file using a pandas dataframe from a bed file.
:param df: Pandas dataframe object (created from bed file).
:param header: List containing (ref name, length) information. (list of tuples)
:param outname: Name of bigwig output file to write (OUT.bw).
"""
logging.debug("write_bigwig_from_df: Writing bigwig file for '{}'.\n".format(outname))
# first filter reference contigs to match those in bed file
# get all unique ref contig names from bed
chroms_present = list(df["chromosome"].unique())
# header is a list of tuples, filter to keep only those present in bed
# must also sort reference contigs by name
filtered_header = sorted([x for x in header if x[0] in chroms_present], key=itemgetter(0))
for i,j in filtered_header:
logging.debug("\tHeader includes: '{}', '{}'.".format(i,j))
# raise error if no reference contig names match
if not filtered_header:
logging.error("No reference contig names match between bed file and reference fasta!")
raise ValueError("No reference contig names match between bed file and reference fasta!")
# open bigwig object, enable writing mode (default is read only)
bw = pyBigWig.open(outname, "w")
# must add header to bigwig prior to writing entries
bw.addHeader(filtered_header)
# iterate over ref contig names
for chrom, length in filtered_header:
logging.debug("\tAdding entries for '{}'.".format(chrom))
# subset dataframe by chromosome name
temp_df = df[df["chromosome"] == chrom]
logging.debug("\tNumber of entries = {:,}.".format(temp_df.shape[0]))
# add entries in order specified for bigwig objects:
# list of chr names: ["chr1", "chr1", "chr1"]
# list of start coords: [1, 100, 125]
# list of stop coords: ends=[6, 120, 126]
# list of vals: values=[0.0, 1.0, 200.0]
bw.addEntries(list(temp_df["chromosome"]),
list(temp_df["start"]),
ends=list(temp_df["stop"]),
values=list(temp_df["mod_probability"]))
logging.debug("\tFinished entries for '{}'.\n".format(chrom))
# close bigwig object
bw.close()
def convert_bed_to_bigwig(bed_files, fasta, pileup_mode):
"""
Write bigwig files for each output bed file.
:param bed_files: List of output bed file names. (list)
:param fasta: A path to reference fasta file. (str)
:param pileup_mode: Site modification calling method. (str: "model", "count")
"""
logging.info("convert_bed_to_bigwig: Converting {} bed files to bigwig files.\n".format(len(bed_files)))
header = get_bigwig_header_info(fasta)
for bed in bed_files:
outname = "{}.bw".format(bed.split(".bed")[0])
df = make_bed_df(bed, pileup_mode)
write_bigwig_from_df(df, header, outname)
def main():
args = get_args()
setup_logging(args.output_label)
log_args(args)
if args.pileup_mode == "model":
if args.model_dir == None:
logging.error("Must supply a model to use when running model-based scoring!")
raise ValueError("Must supply a model to use when running model-based scoring!")
else:
if not os.path.isdir(args.model_dir):
logging.error("{} is not a valid directory path!".format(args.model_dir))
raise ValueError("{} is not a valid directory path!".format(args.model_dir))
print("\nChunking regions for multiprocessing.")
regions_to_process = get_regions_to_process(args.bam, args.fasta, args.chunksize, args.modsites,
args.pileup_mode, args.model_dir, args.min_mapq, args.hap_tag)
print("Running multiprocessing on {:,} chunks.".format(len(regions_to_process)))
bed_results = run_all_pileup_processing(regions_to_process, args.threads)
print("Finished multiprocessing.\nWriting bed files.")
bed_files = write_output_bed(args.output_label, args.modsites, args.min_coverage, bed_results)
print("Writing bigwig files.")
convert_bed_to_bigwig(bed_files, args.fasta, args.pileup_mode)
print("Finished.\n")
if __name__ == '__main__':
main()
|
[
"os.remove",
"Bio.Seq.Seq",
"argparse.ArgumentParser",
"pandas.read_csv",
"re.finditer",
"numpy.clip",
"numpy.histogram",
"numpy.linalg.norm",
"pandas.DataFrame",
"logging.error",
"sys.stderr.isatty",
"os.path.exists",
"numpy.append",
"numpy.swapaxes",
"collections.Counter",
"numpy.stack",
"tensorflow.keras.models.load_model",
"Bio.SeqIO.parse",
"os.stat",
"pysam.AlignmentFile",
"numpy.lib.stride_tricks.sliding_window_view",
"logging.debug",
"os.getpid",
"logging.basicConfig",
"os.path.isdir",
"logging.info",
"pyBigWig.open",
"operator.itemgetter",
"logging.getLogger"
] |
[((519, 801), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""aligned_bam_to_cpg_scores.py"""', 'description': '"""Calculate CpG positions and scores from an aligned bam file. Outputs raw and \n coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available)."""'}), '(prog=\'aligned_bam_to_cpg_scores.py\', description=\n """Calculate CpG positions and scores from an aligned bam file. Outputs raw and \n coverage-filtered results in bed and bigwig format, including haplotype-specific results (when available)."""\n )\n', (542, 801), False, 'import argparse\n'), ((4240, 4263), 'os.path.exists', 'os.path.exists', (['logname'], {}), '(logname)\n', (4254, 4263), False, 'import os\n'), ((4326, 4472), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'logname', 'format': '"""%(asctime)s: %(levelname)s: %(message)s"""', 'datefmt': '"""%d-%b-%y %H:%M:%S"""', 'level': 'logging.DEBUG'}), "(filename=logname, format=\n '%(asctime)s: %(levelname)s: %(message)s', datefmt='%d-%b-%y %H:%M:%S',\n level=logging.DEBUG)\n", (4345, 4472), False, 'import logging\n'), ((4619, 4669), 'logging.info', 'logging.info', (['"""Using following argument settings:"""'], {}), "('Using following argument settings:')\n", (4631, 4669), False, 'import logging\n'), ((5830, 5888), 'logging.info', 'logging.info', (['"""get_regions_to_process: Starting chunking."""'], {}), "('get_regions_to_process: Starting chunking.')\n", (5842, 5888), False, 'import logging\n'), ((5942, 5978), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['input_bam', '"""rb"""'], {}), "(input_bam, 'rb')\n", (5961, 5978), False, 'import pysam\n'), ((31278, 31330), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_dir'], {'compile': '(False)'}), '(model_dir, compile=False)\n', (31304, 31330), True, 'import tensorflow as tf\n'), ((34891, 34927), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['input_bam', '"""rb"""'], {}), "(input_bam, 'rb')\n", (34910, 34927), False, 'import pysam\n'), ((36951, 37025), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Starting parallel processing.\n"""'], {}), "('run_all_pileup_processing: Starting parallel processing.\\n')\n", (36963, 37025), False, 'import logging\n'), ((37077, 37096), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (37094, 37096), False, 'import sys\n'), ((37703, 37777), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Finished parallel processing.\n"""'], {}), "('run_all_pileup_processing: Finished parallel processing.\\n')\n", (37715, 37777), False, 'import logging\n'), ((38123, 38198), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Starting sort for bed results.\n"""'], {}), "('run_all_pileup_processing: Starting sort for bed results.\\n')\n", (38135, 38198), False, 'import logging\n'), ((39637, 39709), 'logging.info', 'logging.info', (['"""write_output_bed: Writing unfiltered output bed files.\n"""'], {}), "('write_output_bed: Writing unfiltered output bed files.\\n')\n", (39649, 39709), False, 'import logging\n'), ((43792, 43885), 'logging.debug', 'logging.debug', (['"""get_bigwig_header_info: Getting ref:length info from reference fasta.\n"""'], {}), "(\n 'get_bigwig_header_info: Getting ref:length info from reference fasta.\\n')\n", (43805, 43885), False, 'import logging\n'), ((45335, 45362), 'pyBigWig.open', 'pyBigWig.open', (['outname', '"""w"""'], {}), "(outname, 'w')\n", (45348, 45362), False, 'import pyBigWig\n'), ((4273, 4291), 'os.remove', 'os.remove', (['logname'], {}), '(logname)\n', (4282, 4291), False, 'import os\n'), ((7315, 7339), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fh', '"""fasta"""'], {}), "(fh, 'fasta')\n", (7326, 7339), False, 'from Bio import SeqIO\n'), ((24544, 24604), 'pandas.DataFrame', 'pd.DataFrame', (['modinfoList'], {'columns': "['strand', 'prob', 'hap']"}), "(modinfoList, columns=['strand', 'prob', 'hap'])\n", (24556, 24604), True, 'import pandas as pd\n'), ((26235, 26255), 'numpy.linalg.norm', 'np.linalg.norm', (['hist'], {}), '(hist)\n', (26249, 26255), True, 'import numpy as np\n'), ((26435, 26462), 'numpy.append', 'np.append', (['(hist / norm)', 'adj'], {}), '(hist / norm, adj)\n', (26444, 26462), True, 'import numpy as np\n'), ((29027, 29067), 'numpy.lib.stride_tricks.sliding_window_view', 'sliding_window_view', (['featPad', '(11)'], {'axis': '(0)'}), '(featPad, 11, axis=0)\n', (29046, 29067), False, 'from numpy.lib.stride_tricks import sliding_window_view\n'), ((29094, 29127), 'numpy.swapaxes', 'np.swapaxes', (['featuresWindow', '(1)', '(2)'], {}), '(featuresWindow, 1, 2)\n', (29105, 29127), True, 'import numpy as np\n'), ((29195, 29217), 'numpy.clip', 'np.clip', (['predict', '(0)', '(1)'], {}), '(predict, 0, 1)\n', (29202, 29217), True, 'import numpy as np\n'), ((38294, 38369), 'logging.info', 'logging.info', (['"""run_all_pileup_processing: Finished sort for bed results.\n"""'], {}), "('run_all_pileup_processing: Finished sort for bed results.\\n')\n", (38306, 38369), False, 'import logging\n'), ((40264, 40281), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (40278, 40281), False, 'import os\n'), ((42671, 42867), 'pandas.read_csv', 'pd.read_csv', (['bed'], {'sep': '"""\t"""', 'header': 'None', 'names': "['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',\n 'modified_bases', 'unmodified_bases', 'mod_score', 'unmod_score']"}), "(bed, sep='\\t', header=None, names=['chromosome', 'start',\n 'stop', 'mod_probability', 'haplotype', 'coverage', 'modified_bases',\n 'unmodified_bases', 'mod_score', 'unmod_score'])\n", (42682, 42867), True, 'import pandas as pd\n'), ((43953, 43977), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fh', '"""fasta"""'], {}), "(fh, 'fasta')\n", (43964, 43977), False, 'from Bio import SeqIO\n'), ((45071, 45162), 'logging.error', 'logging.error', (['"""No reference contig names match between bed file and reference fasta!"""'], {}), "(\n 'No reference contig names match between bed file and reference fasta!')\n", (45084, 45162), False, 'import logging\n'), ((26174, 26216), 'numpy.histogram', 'np.histogram', (['probs'], {'bins': '(20)', 'range': '[0, 1]'}), '(probs, bins=20, range=[0, 1])\n', (26186, 26216), True, 'import numpy as np\n'), ((28916, 28936), 'numpy.stack', 'np.stack', (['normhistos'], {}), '(normhistos)\n', (28924, 28936), True, 'import numpy as np\n'), ((31004, 31035), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (31021, 31035), False, 'import logging\n'), ((40295, 40307), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (40304, 40307), False, 'import os\n'), ((41321, 41337), 'os.remove', 'os.remove', (['inBed'], {}), '(inBed)\n', (41330, 41337), False, 'import os\n'), ((43099, 43279), 'pandas.read_csv', 'pd.read_csv', (['bed'], {'sep': '"""\t"""', 'header': 'None', 'names': "['chromosome', 'start', 'stop', 'mod_probability', 'haplotype', 'coverage',\n 'modified_bases', 'unmodified_bases', 'adj_prob']"}), "(bed, sep='\\t', header=None, names=['chromosome', 'start',\n 'stop', 'mod_probability', 'haplotype', 'coverage', 'modified_bases',\n 'unmodified_bases', 'adj_prob'])\n", (43110, 43279), True, 'import pandas as pd\n'), ((44867, 44880), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (44877, 44880), False, 'from operator import itemgetter\n'), ((47175, 47252), 'logging.error', 'logging.error', (['"""Must supply a model to use when running model-based scoring!"""'], {}), "('Must supply a model to use when running model-based scoring!')\n", (47188, 47252), False, 'import logging\n'), ((9548, 9576), 're.finditer', 're.finditer', (['base', 'query_seq'], {}), '(base, query_seq)\n', (9559, 9576), False, 'import re\n'), ((38268, 38284), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (38278, 38284), False, 'from operator import itemgetter\n'), ((41280, 41294), 'os.stat', 'os.stat', (['inBed'], {}), '(inBed)\n', (41287, 41294), False, 'import os\n'), ((41787, 41804), 'os.remove', 'os.remove', (['covBed'], {}), '(covBed)\n', (41796, 41804), False, 'import os\n'), ((47379, 47408), 'os.path.isdir', 'os.path.isdir', (['args.model_dir'], {}), '(args.model_dir)\n', (47392, 47408), False, 'import os\n'), ((36198, 36209), 'os.getpid', 'os.getpid', ([], {}), '()\n', (36207, 36209), False, 'import os\n'), ((41741, 41756), 'os.stat', 'os.stat', (['covBed'], {}), '(covBed)\n', (41748, 41756), False, 'import os\n'), ((9806, 9820), 'Bio.Seq.Seq', 'Seq', (['query_seq'], {}), '(query_seq)\n', (9809, 9820), False, 'from Bio.Seq import Seq\n'), ((19141, 19151), 'collections.Counter', 'Counter', (['v'], {}), '(v)\n', (19148, 19151), False, 'from collections import Counter\n')]
|
import numpy as np
def get_monthly_rate(rate) -> float:
"""
computes the monthy interest rate based on the yearly interest rate
:param float rate: the yearly interest rate
:return: the monthly interest rate
This computation uses the 12th root on the growth factor
"""
growth_year = rate + 1
growth_month = np.power(growth_year, 1./12)
rate_month = growth_month - 1
return rate_month
|
[
"numpy.power"
] |
[((343, 374), 'numpy.power', 'np.power', (['growth_year', '(1.0 / 12)'], {}), '(growth_year, 1.0 / 12)\n', (351, 374), True, 'import numpy as np\n')]
|
import numpy as np, pyemma as py
# from msmbuilder.decomposition.tica import tICA
from sklearn.kernel_approximation import Nystroem
class Kernel_tica(object):
def __init__(self, n_components, lag_time,
gamma, # gamma value for rbf kernel
n_components_nystroem=100, # number of components for Nystroem kernel approximation
landmarks = None,
shrinkage = None,
weights='empirical' # if 'koopman', use Koopman reweighting for tICA (see Wu, Hao, et al. "Variational Koopman models: slow collective variables and molecular kinetics from short off-equilibrium simulations." The Journal of Chemical Physics 146.15 (2017): 154104.)
):
self._n_components = n_components
self._lag_time = lag_time
self._n_components_nystroem = n_components_nystroem
self._landmarks = landmarks
self._gamma = gamma
self._nystroem = Nystroem(gamma=gamma, n_components=n_components_nystroem)
self._weights = weights
# self._tica = tICA(n_components=n_components, lag_time=lag_time, shrinkage=shrinkage)
self._shrinkage = shrinkage
return
def fit(self, sequence_list):
if self._landmarks is None:
self._nystroem.fit(np.concatenate(sequence_list))
else:
print("using landmarks")
self._nystroem.fit(self._landmarks)
sequence_transformed = [self._nystroem.transform(item) for item in sequence_list]
# define tica object at fit() with sequence_list supplied for initialization, as it is required by
# Koopman reweighting
self._tica = py.coordinates.tica(sequence_transformed, lag=self._lag_time,
dim=self._n_components, kinetic_map=True,
weights=self._weights)
return
def transform(self, sequence_list):
return self._tica.transform(
[self._nystroem.transform(item) for item in sequence_list])
def fit_transform(self, sequence_list):
self.fit(sequence_list)
return self.transform(sequence_list)
def score(self, sequence_list):
model = self.__class__(n_components = self._n_components, lag_time=self._lag_time, gamma=self._gamma,
n_components_nystroem=self._n_components_nystroem, landmarks=self._landmarks,
shrinkage=self._shrinkage)
model.fit(sequence_list)
return np.sum(model._tica.eigenvalues)
|
[
"sklearn.kernel_approximation.Nystroem",
"pyemma.coordinates.tica",
"numpy.sum",
"numpy.concatenate"
] |
[((975, 1032), 'sklearn.kernel_approximation.Nystroem', 'Nystroem', ([], {'gamma': 'gamma', 'n_components': 'n_components_nystroem'}), '(gamma=gamma, n_components=n_components_nystroem)\n', (983, 1032), False, 'from sklearn.kernel_approximation import Nystroem\n'), ((1691, 1822), 'pyemma.coordinates.tica', 'py.coordinates.tica', (['sequence_transformed'], {'lag': 'self._lag_time', 'dim': 'self._n_components', 'kinetic_map': '(True)', 'weights': 'self._weights'}), '(sequence_transformed, lag=self._lag_time, dim=self.\n _n_components, kinetic_map=True, weights=self._weights)\n', (1710, 1822), True, 'import numpy as np, pyemma as py\n'), ((2549, 2580), 'numpy.sum', 'np.sum', (['model._tica.eigenvalues'], {}), '(model._tica.eigenvalues)\n', (2555, 2580), True, 'import numpy as np, pyemma as py\n'), ((1313, 1342), 'numpy.concatenate', 'np.concatenate', (['sequence_list'], {}), '(sequence_list)\n', (1327, 1342), True, 'import numpy as np, pyemma as py\n')]
|
import sys
sys.path.append('../src/')
import os
import numpy as np
from mask_rcnn.mrcnn import utils
import mask_rcnn.mrcnn.model as modellib
from mask_rcnn.samples.coco import coco
import cv2
import argparse as ap
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def get_mask_rcnn(model, image, COCO_MODEL_PATH):
# Run detection
results = model.detect([image], verbose=1)
r = results[0]
idx = np.where(r['class_ids'] != 0) #select non-background
boxes = r['rois'][idx]
scores = r['scores'][idx]
classes = r['class_ids'][idx]
#score threshold = 0.7
idxs = np.where(scores > 0.7)
boxes = boxes[idxs]
people_scores = scores[idxs]
classes = classes[idxs]
return boxes, scores, classes
def run(read_direc, save_direc, model, COCO_MODEL_PATH, class_names, save_image=False):
if os.path.exists('./processed_images_mask.txt'):
with open('./processed_images_mask.txt', 'r') as f:
processed_files = f.readlines()
else:
processed_files = []
print('Started:', save_direc, read_direc)
if not os.path.exists(save_direc+'/'):
os.mkdir(save_direc+'/')
if save_image:
if not os.path.exists(save_direc+'/images_mask/'):
os.mkdir(save_direc + '/images_mask/')
i=0
for fi in os.listdir(read_direc):
if fi + '\n' in processed_files:
print('Skipping ', fi)
continue
image = cv2.imread(read_direc +fi)
#histogram equalization
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
i = i+1
if i % 1000 == 0:
print('Processed ' + str(i) + 'images')
scaler_y = np.shape(image)[0]/960
scaler_x = np.shape(image)[1]/540
image1 = cv2.resize(image, (540, 960))
mask_boxes, mask_scores, mask_classes = get_mask_rcnn(model, image1, COCO_MODEL_PATH)
for bbox, score, classid in zip(mask_boxes, mask_scores, mask_classes):
bbox[1] = int(bbox[1])*scaler_x
bbox[0] = int(bbox[0])*scaler_y
bbox[3] = int(bbox[3])*scaler_x
bbox[2] = int(bbox[2])*scaler_y
with open(save_direc+'/groundtruth_boxes_mask.txt', 'a') as f:
f.write(str(fi) + ' ' + str(bbox[1])+ ' ' + str(bbox[0]) + ' ' + str(bbox[3]) + ' ' + str(bbox[2]) + ' ' + str(score) + ' ' + class_names[classid] + '\n')
if save_image:
cv2.rectangle(image, (int(bbox[1]+1), int(bbox[0]+1)), (int(bbox[3]+1), int(bbox[2]+1)), (0,255,0), 3)
cv2.putText(image, class_names[classid], (round(float(bbox[1])), round(float(bbox[0]))), cv2.FONT_HERSHEY_SIMPLEX, 4,(0,0,255),10,cv2.LINE_AA)
with open('./processed_images_mask.txt', 'a') as f:
f.write(fi + '\n')
if save_image:
cv2.imwrite(save_direc+'/images_mask/' + str(i) + '.jpg', image)
if __name__ == '__main__':
parser = ap.ArgumentParser()
parser.add_argument('-r', "--readdir", help="Directory with images")
parser.add_argument('-s', "--savedir", help="Directory for saving the detection results")
parser.add_argument('-i', "--saveimage", action='store_true', help="Save image with predicted bounding box or not")
args = vars(parser.parse_args())
read_direc = args['readdir']
save_direc = args['savedir']
COCO_MODEL_PATH = "../src/models/mask_rcnn_coco.h5"
MODEL_DIR = os.path.join('mask_rcnn/', "logs")
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'trafficlight',
'fire hydrant', 'stop sign', 'parkingmeter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sportsball',
'kite', 'baseballbat', 'baseballglove', 'skateboard',
'surfboard', 'tennisracket', 'bottle', 'wineglass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hotdog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'pottedplant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cellphone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddybear', 'hairdrier', 'toothbrush']
config = InferenceConfig()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
run(read_direc, save_direc, model, COCO_MODEL_PATH, class_names, args['saveimage'])
print('Finished')
|
[
"sys.path.append",
"os.mkdir",
"cv2.equalizeHist",
"argparse.ArgumentParser",
"cv2.cvtColor",
"os.path.exists",
"numpy.shape",
"cv2.imread",
"numpy.where",
"mask_rcnn.mrcnn.model.MaskRCNN",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((11, 37), 'sys.path.append', 'sys.path.append', (['"""../src/"""'], {}), "('../src/')\n", (26, 37), False, 'import sys\n'), ((591, 620), 'numpy.where', 'np.where', (["(r['class_ids'] != 0)"], {}), "(r['class_ids'] != 0)\n", (599, 620), True, 'import numpy as np\n'), ((831, 853), 'numpy.where', 'np.where', (['(scores > 0.7)'], {}), '(scores > 0.7)\n', (839, 853), True, 'import numpy as np\n'), ((1112, 1157), 'os.path.exists', 'os.path.exists', (['"""./processed_images_mask.txt"""'], {}), "('./processed_images_mask.txt')\n", (1126, 1157), False, 'import os\n'), ((1605, 1627), 'os.listdir', 'os.listdir', (['read_direc'], {}), '(read_direc)\n', (1615, 1627), False, 'import os\n'), ((3511, 3530), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {}), '()\n', (3528, 3530), True, 'import argparse as ap\n'), ((4013, 4047), 'os.path.join', 'os.path.join', (['"""mask_rcnn/"""', '"""logs"""'], {}), "('mask_rcnn/', 'logs')\n", (4025, 4047), False, 'import os\n'), ((5501, 5572), 'mask_rcnn.mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'model_dir': 'MODEL_DIR', 'config': 'config'}), "(mode='inference', model_dir=MODEL_DIR, config=config)\n", (5518, 5572), True, 'import mask_rcnn.mrcnn.model as modellib\n'), ((1389, 1421), 'os.path.exists', 'os.path.exists', (["(save_direc + '/')"], {}), "(save_direc + '/')\n", (1403, 1421), False, 'import os\n'), ((1429, 1455), 'os.mkdir', 'os.mkdir', (["(save_direc + '/')"], {}), "(save_direc + '/')\n", (1437, 1455), False, 'import os\n'), ((1779, 1806), 'cv2.imread', 'cv2.imread', (['(read_direc + fi)'], {}), '(read_direc + fi)\n', (1789, 1806), False, 'import cv2\n'), ((1871, 1909), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (1883, 1909), False, 'import cv2\n'), ((1933, 1965), 'cv2.equalizeHist', 'cv2.equalizeHist', (['image[:, :, 2]'], {}), '(image[:, :, 2])\n', (1949, 1965), False, 'import cv2\n'), ((1980, 2018), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_HSV2BGR'], {}), '(image, cv2.COLOR_HSV2BGR)\n', (1992, 2018), False, 'import cv2\n'), ((2214, 2243), 'cv2.resize', 'cv2.resize', (['image', '(540, 960)'], {}), '(image, (540, 960))\n', (2224, 2243), False, 'import cv2\n'), ((1488, 1532), 'os.path.exists', 'os.path.exists', (["(save_direc + '/images_mask/')"], {}), "(save_direc + '/images_mask/')\n", (1502, 1532), False, 'import os\n'), ((1544, 1582), 'os.mkdir', 'os.mkdir', (["(save_direc + '/images_mask/')"], {}), "(save_direc + '/images_mask/')\n", (1552, 1582), False, 'import os\n'), ((2132, 2147), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2140, 2147), True, 'import numpy as np\n'), ((2174, 2189), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (2182, 2189), True, 'import numpy as np\n')]
|
from abc import ABC, abstractmethod
from collections import OrderedDict
from functools import reduce
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import gym
import matplotlib.pyplot as plt
class Params():
"""
policy which outputs the policy parameters directly, i.e. for direct optimization
"""
def __init__(self, dim_in=7, dim_act=6):
self.dim_act = dim_act
self.init_params()
def init_params(self):
self.params = np.random.randn(self.dim_act)/3 - 1.75
self.num_params = self.dim_act
def forward(self, obs):
return self.get_params()
def get_params(self):
return self.params
def set_params(self, params):
assert params.shape == self.params.shape
self.params = params
def reset(self):
pass
if __name__ == "__main__":
# run tests
print("OK")
|
[
"numpy.random.randn"
] |
[((517, 546), 'numpy.random.randn', 'np.random.randn', (['self.dim_act'], {}), '(self.dim_act)\n', (532, 546), True, 'import numpy as np\n')]
|
import socket
import nengo
import numpy as np
import pytest
from nengo.exceptions import SimulationError
from nengo_loihi.block import Axon, LoihiBlock, Synapse
from nengo_loihi.builder.builder import Model
from nengo_loihi.builder.discretize import discretize_model
from nengo_loihi.hardware import interface as hardware_interface
from nengo_loihi.hardware.allocators import Greedy
from nengo_loihi.hardware.builder import build_board
from nengo_loihi.hardware.nxsdk_shim import NxsdkBoard
class MockNxsdk:
def __init__(self):
self.__version__ = None
def test_error_on_old_version(monkeypatch):
mock = MockNxsdk()
mock.__version__ = "0.5.5"
monkeypatch.setattr(hardware_interface, "nxsdk", mock)
with pytest.raises(ImportError, match="nxsdk"):
hardware_interface.HardwareInterface.check_nxsdk_version()
def test_no_warn_on_current_version(monkeypatch):
mock = MockNxsdk()
mock.__version__ = str(hardware_interface.HardwareInterface.max_nxsdk_version)
monkeypatch.setattr(hardware_interface, "nxsdk", mock)
monkeypatch.setattr(hardware_interface, "assert_nxsdk", lambda: True)
with pytest.warns(None) as record:
hardware_interface.HardwareInterface.check_nxsdk_version()
assert len(record) == 0
def test_warn_on_future_version(monkeypatch):
mock = MockNxsdk()
mock.__version__ = "100.0.0"
monkeypatch.setattr(hardware_interface, "nxsdk", mock)
monkeypatch.setattr(hardware_interface, "assert_nxsdk", lambda: True)
with pytest.warns(UserWarning):
hardware_interface.HardwareInterface.check_nxsdk_version()
def test_builder_poptype_errors():
pytest.importorskip("nxsdk")
# Test error in build_synapse
model = Model()
block = LoihiBlock(1)
block.compartment.configure_lif()
model.add_block(block)
synapse = Synapse(1)
synapse.set_weights([[1]])
synapse.pop_type = 8
block.add_synapse(synapse)
discretize_model(model)
allocator = Greedy() # one core per ensemble
board = allocator(model, n_chips=1)
with pytest.raises(ValueError, match="unrecognized pop_type"):
build_board(board)
# Test error in build_axon
model = Model()
block0 = LoihiBlock(1)
block0.compartment.configure_lif()
model.add_block(block0)
block1 = LoihiBlock(1)
block1.compartment.configure_lif()
model.add_block(block1)
axon = Axon(1)
block0.add_axon(axon)
synapse = Synapse(1)
synapse.set_weights([[1]])
synapse.pop_type = 8
axon.target = synapse
block1.add_synapse(synapse)
discretize_model(model)
board = allocator(model, n_chips=1)
with pytest.raises(ValueError, match="unrecognized pop_type"):
build_board(board)
def test_host_snip_recv_bytes():
host_snip = hardware_interface.HostSnip(None)
# We bypass the host_snip.connect method and connect manually
host_address = "127.0.0.1" # Standard loopback interface address
# Configure socket to send data to itself
host_snip.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host_snip.socket.bind((host_address, host_snip.port))
host_snip.socket.connect((host_address, host_snip.port))
# Generate random data to send
data = np.random.randint(0, 8192, size=1100, dtype=np.int32)
# Correctly receive data in two chunks
# Note that chunks are 4096 bytes at the smallest (HostSnip.recv_size)
host_snip.send_all(data)
received = host_snip.recv_bytes(1024 * 4)
assert np.all(received == data[:1024])
rest = 1100 - 1024
received = host_snip.recv_bytes(rest * 4)
assert np.all(received == data[-rest:])
# Send too little data
host_snip.send_all(data)
with pytest.raises(RuntimeError, match="less than expected"):
host_snip.recv_bytes(1536 * 4)
# Send shutdown signal at the end
data[-1] = -1
host_snip.send_all(data)
with pytest.raises(RuntimeError, match="shutdown signal from chip"):
host_snip.recv_bytes(1100 * 4)
# Too little data with shutdown signal still raises too little data
host_snip.send_all(data)
with pytest.raises(RuntimeError, match="less than expected"):
host_snip.recv_bytes(2048 * 4)
@pytest.mark.target_loihi
def test_interface_connection_errors(Simulator, monkeypatch):
with nengo.Network() as net:
nengo.Ensemble(2, 1)
# test opening closed interface error
sim = Simulator(net)
interface = sim.sims["loihi"]
interface.close()
with pytest.raises(SimulationError, match="cannot be reopened"):
with interface:
pass
sim.close()
# test failed connection error
def start(*args, **kwargs):
raise Exception("Mock failure to connect")
monkeypatch.setattr(NxsdkBoard, "start", start)
with pytest.raises(SimulationError, match="Mock failure to connect"):
with Simulator(net):
pass
@pytest.mark.filterwarnings("ignore:Model is precomputable.")
@pytest.mark.target_loihi
def test_snip_input_count(Simulator, seed, plt):
with nengo.Network(seed=seed) as model:
a = nengo.Ensemble(100, 1)
for i in range(30):
stim = nengo.Node(0.5)
nengo.Connection(stim, a, synapse=None)
with Simulator(model, precompute=False) as sim:
with pytest.warns(UserWarning, match="Too many spikes"):
sim.run(0.01)
|
[
"nengo_loihi.builder.builder.Model",
"nengo_loihi.block.Axon",
"numpy.random.randint",
"nengo.Connection",
"nengo_loihi.hardware.builder.build_board",
"pytest.warns",
"nengo.Node",
"nengo_loihi.block.LoihiBlock",
"nengo_loihi.hardware.interface.HostSnip",
"pytest.raises",
"nengo.Network",
"nengo_loihi.hardware.allocators.Greedy",
"nengo.Ensemble",
"numpy.all",
"pytest.importorskip",
"nengo_loihi.block.Synapse",
"nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version",
"pytest.mark.filterwarnings",
"nengo_loihi.builder.discretize.discretize_model"
] |
[((4928, 4988), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Model is precomputable."""'], {}), "('ignore:Model is precomputable.')\n", (4954, 4988), False, 'import pytest\n'), ((1655, 1683), 'pytest.importorskip', 'pytest.importorskip', (['"""nxsdk"""'], {}), "('nxsdk')\n", (1674, 1683), False, 'import pytest\n'), ((1731, 1738), 'nengo_loihi.builder.builder.Model', 'Model', ([], {}), '()\n', (1736, 1738), False, 'from nengo_loihi.builder.builder import Model\n'), ((1751, 1764), 'nengo_loihi.block.LoihiBlock', 'LoihiBlock', (['(1)'], {}), '(1)\n', (1761, 1764), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((1845, 1855), 'nengo_loihi.block.Synapse', 'Synapse', (['(1)'], {}), '(1)\n', (1852, 1855), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((1948, 1971), 'nengo_loihi.builder.discretize.discretize_model', 'discretize_model', (['model'], {}), '(model)\n', (1964, 1971), False, 'from nengo_loihi.builder.discretize import discretize_model\n'), ((1989, 1997), 'nengo_loihi.hardware.allocators.Greedy', 'Greedy', ([], {}), '()\n', (1995, 1997), False, 'from nengo_loihi.hardware.allocators import Greedy\n'), ((2202, 2209), 'nengo_loihi.builder.builder.Model', 'Model', ([], {}), '()\n', (2207, 2209), False, 'from nengo_loihi.builder.builder import Model\n'), ((2223, 2236), 'nengo_loihi.block.LoihiBlock', 'LoihiBlock', (['(1)'], {}), '(1)\n', (2233, 2236), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2317, 2330), 'nengo_loihi.block.LoihiBlock', 'LoihiBlock', (['(1)'], {}), '(1)\n', (2327, 2330), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2410, 2417), 'nengo_loihi.block.Axon', 'Axon', (['(1)'], {}), '(1)\n', (2414, 2417), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2459, 2469), 'nengo_loihi.block.Synapse', 'Synapse', (['(1)'], {}), '(1)\n', (2466, 2469), False, 'from nengo_loihi.block import Axon, LoihiBlock, Synapse\n'), ((2589, 2612), 'nengo_loihi.builder.discretize.discretize_model', 'discretize_model', (['model'], {}), '(model)\n', (2605, 2612), False, 'from nengo_loihi.builder.discretize import discretize_model\n'), ((2800, 2833), 'nengo_loihi.hardware.interface.HostSnip', 'hardware_interface.HostSnip', (['None'], {}), '(None)\n', (2827, 2833), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((3259, 3312), 'numpy.random.randint', 'np.random.randint', (['(0)', '(8192)'], {'size': '(1100)', 'dtype': 'np.int32'}), '(0, 8192, size=1100, dtype=np.int32)\n', (3276, 3312), True, 'import numpy as np\n'), ((3518, 3549), 'numpy.all', 'np.all', (['(received == data[:1024])'], {}), '(received == data[:1024])\n', (3524, 3549), True, 'import numpy as np\n'), ((3630, 3662), 'numpy.all', 'np.all', (['(received == data[-rest:])'], {}), '(received == data[-rest:])\n', (3636, 3662), True, 'import numpy as np\n'), ((737, 778), 'pytest.raises', 'pytest.raises', (['ImportError'], {'match': '"""nxsdk"""'}), "(ImportError, match='nxsdk')\n", (750, 778), False, 'import pytest\n'), ((788, 846), 'nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version', 'hardware_interface.HardwareInterface.check_nxsdk_version', ([], {}), '()\n', (844, 846), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((1148, 1166), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (1160, 1166), False, 'import pytest\n'), ((1186, 1244), 'nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version', 'hardware_interface.HardwareInterface.check_nxsdk_version', ([], {}), '()\n', (1242, 1244), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((1520, 1545), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (1532, 1545), False, 'import pytest\n'), ((1555, 1613), 'nengo_loihi.hardware.interface.HardwareInterface.check_nxsdk_version', 'hardware_interface.HardwareInterface.check_nxsdk_version', ([], {}), '()\n', (1611, 1613), True, 'from nengo_loihi.hardware import interface as hardware_interface\n'), ((2073, 2129), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unrecognized pop_type"""'}), "(ValueError, match='unrecognized pop_type')\n", (2086, 2129), False, 'import pytest\n'), ((2139, 2157), 'nengo_loihi.hardware.builder.build_board', 'build_board', (['board'], {}), '(board)\n', (2150, 2157), False, 'from nengo_loihi.hardware.builder import build_board\n'), ((2664, 2720), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unrecognized pop_type"""'}), "(ValueError, match='unrecognized pop_type')\n", (2677, 2720), False, 'import pytest\n'), ((2730, 2748), 'nengo_loihi.hardware.builder.build_board', 'build_board', (['board'], {}), '(board)\n', (2741, 2748), False, 'from nengo_loihi.hardware.builder import build_board\n'), ((3729, 3784), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""less than expected"""'}), "(RuntimeError, match='less than expected')\n", (3742, 3784), False, 'import pytest\n'), ((3920, 3982), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""shutdown signal from chip"""'}), "(RuntimeError, match='shutdown signal from chip')\n", (3933, 3982), False, 'import pytest\n'), ((4134, 4189), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""less than expected"""'}), "(RuntimeError, match='less than expected')\n", (4147, 4189), False, 'import pytest\n'), ((4329, 4344), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (4342, 4344), False, 'import nengo\n'), ((4361, 4381), 'nengo.Ensemble', 'nengo.Ensemble', (['(2)', '(1)'], {}), '(2, 1)\n', (4375, 4381), False, 'import nengo\n'), ((4515, 4573), 'pytest.raises', 'pytest.raises', (['SimulationError'], {'match': '"""cannot be reopened"""'}), "(SimulationError, match='cannot be reopened')\n", (4528, 4573), False, 'import pytest\n'), ((4814, 4877), 'pytest.raises', 'pytest.raises', (['SimulationError'], {'match': '"""Mock failure to connect"""'}), "(SimulationError, match='Mock failure to connect')\n", (4827, 4877), False, 'import pytest\n'), ((5073, 5097), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (5086, 5097), False, 'import nengo\n'), ((5120, 5142), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (5134, 5142), False, 'import nengo\n'), ((5190, 5205), 'nengo.Node', 'nengo.Node', (['(0.5)'], {}), '(0.5)\n', (5200, 5205), False, 'import nengo\n'), ((5218, 5257), 'nengo.Connection', 'nengo.Connection', (['stim', 'a'], {'synapse': 'None'}), '(stim, a, synapse=None)\n', (5234, 5257), False, 'import nengo\n'), ((5323, 5373), 'pytest.warns', 'pytest.warns', (['UserWarning'], {'match': '"""Too many spikes"""'}), "(UserWarning, match='Too many spikes')\n", (5335, 5373), False, 'import pytest\n')]
|
import os
import numpy as np
# from skimage.io import imread
import cv2
import copy
from skimage.transform import resize
def load_data_siamese(x_size,y_size,data_path,label_path,image_s_path,uncentain_path,validation_name,test_name):
tmp = np.loadtxt(label_path, dtype=np.str, delimiter=",")
# delete one image because we don't have the jpg image, 8252 is the position of this item and 1 is related to the title
tmp = np.delete(tmp,8252+1, axis = 0)
ran = tmp[:,0]
lr = tmp[:,1]
tracking = tmp[:,2]
tmp1=tmp[:,3]
ran = ran[1:len(ran)]
lr = lr[1:len(lr)]
tracking = tracking[1:len(tracking)]
tmp1=tmp1[1:len(tmp1)]
#generate ran and tracking numer for image with ending -s
tmp_s = np.loadtxt(image_s_path, dtype=np.str, delimiter=",")
ran_s = tmp_s[:,1]
tracking_s = tmp_s[:,2]
ran_s = ran_s[1:len(ran_s)]
tracking_s = tracking_s[1:len(tracking_s)]
#generate ran and tracking numer for image with uncentain label
tmp_un = np.loadtxt(uncentain_path, dtype=np.str, delimiter=",")
ran_un = tmp_un[:,0]
tracking_un = tmp_un[:,1]
ran_un = ran_un[1:len(ran_un)]
tracking_un = tracking_un[1:len(tracking_un)]
# x_size = 331
# y_size = 331
val_images1 = np.ndarray((len(validation_name)*20, x_size, y_size,3))
val_images2 = np.ndarray((len(validation_name)*20, x_size, y_size,3))
# val_images = []
val_labels = []
le = 0
for i in range(len(validation_name)):
ind = np.argwhere(ran==validation_name[i][0])
kk = 0
for j in range(len(ind)):
if lr[int(ind[j])] == validation_name[i][1]:
data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
IM = cv2.imread(data_paths)
if kk == 0:
val_images_base = cv2.resize(IM, (x_size, y_size))
gt = tmp1[int(ind[j])]
kk =1
else:
val_images1[le] = val_images_base
val_images2[le] = cv2.resize(IM, (x_size, y_size))
le += 1
if gt == tmp1[int(ind[j])]:
val_labels = np.append(val_labels,1)
else:
val_labels = np.append(val_labels,0)
# # take the second image as the ground truth
# val_labels = np.append(val_labels,tmp1[int(ind[j])])
# continue
val_images1 = val_images1[0:le,:,:,:]
val_images2 = val_images2[0:le,:,:,:]
val_images = [val_images1,val_images2]
test_images1 = np.ndarray((len(test_name)*20, x_size, y_size,3))
test_images2 = np.ndarray((len(test_name)*20, x_size, y_size,3))
#test_images = []
test_labels = []
le = 0
ind_start = []
ll_index = 0
for i in range(len(test_name)):
ind = np.argwhere(ran==test_name[i][0])
kk = 0
for j in range(len(ind)):
if lr[int(ind[j])] == test_name[i][1]:
data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
IM = cv2.imread(data_paths)
if kk ==0:
test_images_base = cv2.resize(IM, (x_size, y_size))
gt = tmp1[int(ind[j])]
kk = 1
ind_start = np.append(ind_start,ll_index)
else:
test_images1[le] = test_images_base
test_images2[le] = cv2.resize(IM, (x_size, y_size))
le += 1
if gt == tmp1[int(ind[j])]:
test_labels = np.append(test_labels,1)
else:
test_labels = np.append(test_labels,0)
ll_index += 1
# # take the second image as the ground truth
# test_labels = np.append(test_labels,tmp1[int(ind[j])])
# continue
test_images1 = test_images1[0:le,:,:,:]
test_images2 = test_images2[0:le,:,:,:]
test_images =[test_images1,test_images2]
# test_images_s = np.ndarray((len(test_name)*10, x_size, y_size,3))
# #test_images = []
# test_labels_s = []
# le = 0
# for i in range(len(test_name)):
# ind = np.argwhere(ran==test_name[i][0])
# ind_s = np.argwhere(ran_s==test_name[i][0])
# for j in range(len(ind)):
# if lr[int(ind[j])] == test_name[i][1] and len(np.argwhere(tracking_s[ind_s]==tracking[int(ind[j])])) != 0:
# data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
# IM = cv2.imread(data_paths)
# test_images_s[le] = cv2.resize(IM, (x_size, y_size))
# # test_images_s[le] = resize(IM, (x_size, y_size, 3))
# # test_images_s[le] = IM
# #test_images = np.append(test_images,IM)
# le += 1
# test_labels_s = np.append(test_labels_s,tmp1[int(ind[j])])
# # continue
# test_images_s = test_images_s[0:le,:,:,:]
# test_images_un = np.ndarray((len(test_name)*10, x_size, y_size,3))
# #test_images = []
# test_labels_un = []
# le = 0
# for i in range(len(test_name)):
# ind = np.argwhere(ran==test_name[i][0])
# ind_un = np.argwhere(ran_un==test_name[i][0])
# for j in range(len(ind)):
# if lr[int(ind[j])] == test_name[i][1] and len(np.argwhere(tracking_un[ind_un]==tracking[int(ind[j])])) != 0:
# data_paths = os.path.join(data_path, (ran[int(ind[j])] + '-'+ tracking[int(ind[j])] + '.jpg'))
# IM = cv2.imread(data_paths)
# test_images_un[le] = cv2.resize(IM, (x_size, y_size))
# # test_images_un[le] = resize(IM, (x_size, y_size, 3))
# # test_images_un[le] = IM
# #test_images = np.append(test_images,IM)
# le += 1
# test_labels_un = np.append(test_labels_un,tmp1[int(ind[j])])
# # continue
# test_images_un = test_images_un[0:le,:,:,:]
# return val_labels, test_labels
# return val_images,val_labels, test_images,test_labels, test_images_s, test_labels_s, test_images_un, test_labels_un
return val_images,val_labels, test_images,test_labels,ind_start
|
[
"cv2.imread",
"numpy.append",
"numpy.loadtxt",
"numpy.argwhere",
"numpy.delete",
"cv2.resize"
] |
[((245, 296), 'numpy.loadtxt', 'np.loadtxt', (['label_path'], {'dtype': 'np.str', 'delimiter': '""","""'}), "(label_path, dtype=np.str, delimiter=',')\n", (255, 296), True, 'import numpy as np\n'), ((431, 463), 'numpy.delete', 'np.delete', (['tmp', '(8252 + 1)'], {'axis': '(0)'}), '(tmp, 8252 + 1, axis=0)\n', (440, 463), True, 'import numpy as np\n'), ((739, 792), 'numpy.loadtxt', 'np.loadtxt', (['image_s_path'], {'dtype': 'np.str', 'delimiter': '""","""'}), "(image_s_path, dtype=np.str, delimiter=',')\n", (749, 792), True, 'import numpy as np\n'), ((1010, 1065), 'numpy.loadtxt', 'np.loadtxt', (['uncentain_path'], {'dtype': 'np.str', 'delimiter': '""","""'}), "(uncentain_path, dtype=np.str, delimiter=',')\n", (1020, 1065), True, 'import numpy as np\n'), ((1533, 1574), 'numpy.argwhere', 'np.argwhere', (['(ran == validation_name[i][0])'], {}), '(ran == validation_name[i][0])\n', (1544, 1574), True, 'import numpy as np\n'), ((2990, 3025), 'numpy.argwhere', 'np.argwhere', (['(ran == test_name[i][0])'], {}), '(ran == test_name[i][0])\n', (3001, 3025), True, 'import numpy as np\n'), ((1811, 1833), 'cv2.imread', 'cv2.imread', (['data_paths'], {}), '(data_paths)\n', (1821, 1833), False, 'import cv2\n'), ((3256, 3278), 'cv2.imread', 'cv2.imread', (['data_paths'], {}), '(data_paths)\n', (3266, 3278), False, 'import cv2\n'), ((1900, 1932), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (1910, 1932), False, 'import cv2\n'), ((2116, 2148), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (2126, 2148), False, 'import cv2\n'), ((3345, 3377), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (3355, 3377), False, 'import cv2\n'), ((3480, 3510), 'numpy.append', 'np.append', (['ind_start', 'll_index'], {}), '(ind_start, ll_index)\n', (3489, 3510), True, 'import numpy as np\n'), ((3627, 3659), 'cv2.resize', 'cv2.resize', (['IM', '(x_size, y_size)'], {}), '(IM, (x_size, y_size))\n', (3637, 3659), False, 'import cv2\n'), ((2262, 2286), 'numpy.append', 'np.append', (['val_labels', '(1)'], {}), '(val_labels, 1)\n', (2271, 2286), True, 'import numpy as np\n'), ((2349, 2373), 'numpy.append', 'np.append', (['val_labels', '(0)'], {}), '(val_labels, 0)\n', (2358, 2373), True, 'import numpy as np\n'), ((3774, 3799), 'numpy.append', 'np.append', (['test_labels', '(1)'], {}), '(test_labels, 1)\n', (3783, 3799), True, 'import numpy as np\n'), ((3863, 3888), 'numpy.append', 'np.append', (['test_labels', '(0)'], {}), '(test_labels, 0)\n', (3872, 3888), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
import os,sys
import cv2, re
import numpy as np
try:
from pyutil import PyLogger
except ImportError:
from .. import PyLogger
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
SRC_TYPE_NAME = ["WebCam","Video","IPCam"]
OUTPUT_VIDEO_NAME = "source{}.avi"
SAVE_FORMAT = 'XVID'
DEFAULT_FPS = 20
class VideoController():
def __init__(self, video_src, video_ratio=1, record_prefix="", record_name="", isRecord=False, log=False, debug=False):
# init logger
self.__logger = PyLogger(log=log,debug=debug)
self.__vid_caps = list()
self.__vid_writers = list()
self.__record_path = os.path.join(record_prefix,record_name) if record_name != "" else os.path.join(record_prefix,OUTPUT_VIDEO_NAME)
self.__video_ratio = video_ratio
self.fps = DEFAULT_FPS
# create a VideoCapture for each src
for src in video_src:
self.__initVideoSource(src)
# init writer parameters
self.__fourcc = cv2.VideoWriter_fourcc(*SAVE_FORMAT)
if isRecord:
self.__initVideoWriter()
def __initVideoSource(self, src, camId=-1):
"""
Initialise video input source
Args:
src (object): video source used by Opencv, could be int or String
camId (int): if any cameraId was given
"""
if src is None or src == "":
return
sourceType = -1
# usb cam/web cam
if type(src) is int:
sourceType = 0
# search for ipcams
elif re.search( r'(http)|(rstp)|(https) & *', src, re.M|re.I):
sourceType = 2
# videos
else:
sourceType = 1
cap = cv2.VideoCapture(src)
if cap.isOpened():
if camId == -1:
camId = len(self.__vid_caps)
if len(self.__vid_caps) > 0:
cams = np.array(self.__vid_caps)[:,0]
if camId in cams:
camId = np.amax(cams) + 1
fps = int(cap.get(cv2.CAP_PROP_FPS))
self.__vid_caps.append([camId, sourceType, cap, src,fps])
self.__logger.info("Video Input Connected to {}".format(src))
else:
self.__logger.error("No {} Source Found From {}".format(SRC_TYPE_NAME[sourceType], src))
def __initVideoWriter(self):
"""
Initialise video writer
"""
for cap_info in self.__vid_caps:
cap = cap_info[2] # get cv2.cap object
fps = cap_info[4]
if fps == 0 or self.fps < fps:
fps = self.fps
self.__vid_writers.append([cap_info[0],cv2.VideoWriter(self.__record_path.format(cap_info[0]),
self.__fourcc,
fps,
(int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)/self.__video_ratio),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)/self.__video_ratio)))])
def writeVideo(self, camId, frame):
"""
Write video to output
Args:
camId (int): if any cameraId was given
frame (np.array): video frame to be written
"""
if len(self.__vid_writers) > 0:
ids = np.array(self.__vid_writers)[:,0]
if frame is not None:
self.__vid_writers[np.where(ids == camId)[0][0]][1].write(frame)
def getFrame(self, camId):
"""
Return frame from video source
Args:
camId (int): camera ID
Returns:
**frame** (np.array) - current frame
"""
# Capture frame-by-frame
frame = None
try:
cap = self.__vid_caps[np.where(np.array(self.__vid_caps)[:,0]==camId)[0][0]][2]
if cap is not None:
ret, frame = cap.read()
frame = cv2.resize(frame, (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)/self.__video_ratio),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)/self.__video_ratio)))
#frame = cv2.resize(frame, (420,240))
except cv2.error:
return None
return frame
def showFrame(self, frame, title="Video"):
"""
Using OpenCV to display the current frame
Title is important if need to display multi window
Args:
frame (np.array): frame given to be shown
title (string): display window title, associate frame and display window
"""
# Display the resulting frame
cv2.imshow(title,frame)
# This line is important to keep the video showing
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
def onClose(self):
for cap in self.__vid_caps:
cap[2].release()
for writer in self.__vid_writers:
writer[1].release()
cv2.destroyAllWindows()
def printVideoSrcInfo(self):
# header
self.__logger.info("{:5}|{:10}".format("CamID","Source"))
# body
for cap in self.__vid_caps:
src = cap[3]
if type(src) is int:
src = SRC_TYPE_NAME[0]+ " {}".format(src)
self.__logger.info("{:5}|{}".format(cap[0],src))
def getVideoSrcInfo(self):
"""
Return Camera Information
Returns:
* **cam_info** (numpy.array) - camera information (camId, src)
"""
if len(self.__vid_caps) <= 0:
return None
return np.array(self.__vid_caps)[:,[0,3]]
def drawInfo(self, frame, fps, color=(255,255,255), num_people=-1):
"""
Draw frame info
Args:
frame (numpy.array): input frame
fps (int): Frame per second
color (tuple): BGR color code
num_people (int): number of people detected
Returns:
* **frame** (numpy.array) - modified frame
"""
frame_size = frame.shape
cv2.putText(frame, "FPS:{}".format(fps), (20,frame_size[0]-20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
if num_people >= 0:
cv2.putText(frame, "Num.Person:{}".format(num_people), (frame_size[1]-150,frame_size[0]-20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
return frame
def setIsRecord(self, isRecord):
"""
Set is recorded video or not
Args:
isRecord (boolean): record video or not
"""
if isRecord and not self.isRecord:
self.__initVideoWriter()
self.isRecord = isRecord
|
[
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.amax",
"numpy.where",
"numpy.array",
"re.search",
"cv2.destroyAllWindows",
"os.path.join",
"pyutil.PyLogger"
] |
[((566, 596), 'pyutil.PyLogger', 'PyLogger', ([], {'log': 'log', 'debug': 'debug'}), '(log=log, debug=debug)\n', (574, 596), False, 'from pyutil import PyLogger\n'), ((987, 1023), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*SAVE_FORMAT'], {}), '(*SAVE_FORMAT)\n', (1009, 1023), False, 'import cv2, re\n'), ((1547, 1568), 'cv2.VideoCapture', 'cv2.VideoCapture', (['src'], {}), '(src)\n', (1563, 1568), False, 'import cv2, re\n'), ((3795, 3819), 'cv2.imshow', 'cv2.imshow', (['title', 'frame'], {}), '(title, frame)\n', (3805, 3819), False, 'import cv2, re\n'), ((4088, 4111), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4109, 4111), False, 'import cv2, re\n'), ((676, 716), 'os.path.join', 'os.path.join', (['record_prefix', 'record_name'], {}), '(record_prefix, record_name)\n', (688, 716), False, 'import os, sys\n'), ((742, 788), 'os.path.join', 'os.path.join', (['record_prefix', 'OUTPUT_VIDEO_NAME'], {}), '(record_prefix, OUTPUT_VIDEO_NAME)\n', (754, 788), False, 'import os, sys\n'), ((1426, 1482), 're.search', 're.search', (['"""(http)|(rstp)|(https) & *"""', 'src', '(re.M | re.I)'], {}), "('(http)|(rstp)|(https) & *', src, re.M | re.I)\n", (1435, 1482), False, 'import cv2, re\n'), ((3932, 3955), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3953, 3955), False, 'import cv2, re\n'), ((4594, 4619), 'numpy.array', 'np.array', (['self.__vid_caps'], {}), '(self.__vid_caps)\n', (4602, 4619), True, 'import numpy as np\n'), ((2763, 2791), 'numpy.array', 'np.array', (['self.__vid_writers'], {}), '(self.__vid_writers)\n', (2771, 2791), True, 'import numpy as np\n'), ((3877, 3891), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3888, 3891), False, 'import cv2, re\n'), ((1685, 1710), 'numpy.array', 'np.array', (['self.__vid_caps'], {}), '(self.__vid_caps)\n', (1693, 1710), True, 'import numpy as np\n'), ((1751, 1764), 'numpy.amax', 'np.amax', (['cams'], {}), '(cams)\n', (1758, 1764), True, 'import numpy as np\n'), ((2845, 2867), 'numpy.where', 'np.where', (['(ids == camId)'], {}), '(ids == camId)\n', (2853, 2867), True, 'import numpy as np\n'), ((3134, 3159), 'numpy.array', 'np.array', (['self.__vid_caps'], {}), '(self.__vid_caps)\n', (3142, 3159), True, 'import numpy as np\n')]
|
"""Example of count data sampled from negative-binomial distribution
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from sklearn.model_selection import train_test_split
from xgboost_distribution import XGBDistribution
def generate_count_data(n_samples=10_000):
X = np.random.uniform(-2, 0, n_samples)
n = 66 * np.abs(np.cos(X))
p = 0.5 * np.abs(np.cos(X / 3))
y = np.random.negative_binomial(n=n, p=p, size=n_samples)
return X[..., np.newaxis], y
def predict_distribution(model, X, y):
"""Predict a distribution for a given X, and evaluate over y"""
distribution_func = {
"normal": getattr(stats, "norm").pdf,
"laplace": getattr(stats, "laplace").pdf,
"poisson": getattr(stats, "poisson").pmf,
"negative-binomial": getattr(stats, "nbinom").pmf,
}
preds = model.predict(X[..., np.newaxis])
dists = np.zeros(shape=(len(X), len(y)))
for ii, x in enumerate(X):
params = {field: param[ii] for (field, param) in zip(preds._fields, preds)}
dists[ii] = distribution_func[model.distribution](y, **params)
return dists
def create_distribution_heatmap(
model, x_range=(-2, 0), x_steps=100, y_range=(0, 100), normalize=True
):
xx = np.linspace(x_range[0], x_range[1], x_steps)
yy = np.linspace(y_range[0], y_range[1], y_range[1] - y_range[0] + 1)
ym, xm = np.meshgrid(xx, yy)
z = predict_distribution(model, xx, yy)
if normalize:
z = z / z.max(axis=0)
return ym, xm, z.transpose()
def main():
random_state = 10
np.random.seed(random_state)
X, y = generate_count_data(n_samples=10_000)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state)
model = XGBDistribution(
distribution="negative-binomial", # try changing the distribution here
natural_gradient=True,
max_depth=3,
n_estimators=500,
)
model.fit(
X_train,
y_train,
eval_set=[(X_test, y_test)],
early_stopping_rounds=10,
verbose=False,
)
xm, ym, z = create_distribution_heatmap(model)
fig, ax = plt.subplots(figsize=(9, 6))
ax.pcolormesh(
xm, ym, z, cmap="Oranges", vmin=0, vmax=1.608, alpha=1.0, shading="auto"
)
ax.scatter(X_test, y_test, s=0.75, alpha=0.25, c="k", label="data")
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.random.uniform",
"numpy.meshgrid",
"numpy.random.seed",
"matplotlib.pyplot.show",
"xgboost_distribution.XGBDistribution",
"numpy.random.negative_binomial",
"sklearn.model_selection.train_test_split",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.subplots"
] |
[((309, 344), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(0)', 'n_samples'], {}), '(-2, 0, n_samples)\n', (326, 344), True, 'import numpy as np\n'), ((421, 474), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': 'n', 'p': 'p', 'size': 'n_samples'}), '(n=n, p=p, size=n_samples)\n', (448, 474), True, 'import numpy as np\n'), ((1272, 1316), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', 'x_steps'], {}), '(x_range[0], x_range[1], x_steps)\n', (1283, 1316), True, 'import numpy as np\n'), ((1326, 1390), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', '(y_range[1] - y_range[0] + 1)'], {}), '(y_range[0], y_range[1], y_range[1] - y_range[0] + 1)\n', (1337, 1390), True, 'import numpy as np\n'), ((1404, 1423), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (1415, 1423), True, 'import numpy as np\n'), ((1592, 1620), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (1606, 1620), True, 'import numpy as np\n'), ((1710, 1759), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': 'random_state'}), '(X, y, random_state=random_state)\n', (1726, 1759), False, 'from sklearn.model_selection import train_test_split\n'), ((1773, 1880), 'xgboost_distribution.XGBDistribution', 'XGBDistribution', ([], {'distribution': '"""negative-binomial"""', 'natural_gradient': '(True)', 'max_depth': '(3)', 'n_estimators': '(500)'}), "(distribution='negative-binomial', natural_gradient=True,\n max_depth=3, n_estimators=500)\n", (1788, 1880), False, 'from xgboost_distribution import XGBDistribution\n'), ((2170, 2198), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (2182, 2198), True, 'from matplotlib import pyplot as plt\n'), ((2381, 2391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2389, 2391), True, 'from matplotlib import pyplot as plt\n'), ((365, 374), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (371, 374), True, 'import numpy as np\n'), ((397, 410), 'numpy.cos', 'np.cos', (['(X / 3)'], {}), '(X / 3)\n', (403, 410), True, 'import numpy as np\n')]
|
import os
import numpy as np
import tensorflow as tf
from utils.recorder import RecorderTf2 as Recorder
class Base(tf.keras.Model):
def __init__(self, a_dim_or_list, action_type, base_dir):
super().__init__()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
self.device = "/gpu:0"
tf.config.experimental.set_memory_growth(physical_devices[0], True)
else:
self.device = "/cpu:0"
tf.keras.backend.set_floatx('float64')
self.cp_dir, self.log_dir, self.excel_dir = [os.path.join(base_dir, i) for i in ['model', 'log', 'excel']]
self.action_type = action_type
self.a_counts = int(np.array(a_dim_or_list).prod())
self.global_step = tf.Variable(0, name="global_step", trainable=False, dtype=tf.int64) # in TF 2.x must be tf.int64, because function set_step need args to be tf.int64.
self.episode = 0
def get_init_episode(self):
"""
get the initial training step. use for continue train from last training step.
"""
if os.path.exists(os.path.join(self.cp_dir, 'checkpoint')):
return int(tf.train.latest_checkpoint(self.cp_dir).split('-')[-1])
else:
return 0
def generate_recorder(self, logger2file, model=None):
"""
create model/log/data dictionary and define writer to record training data.
"""
self.check_or_create(self.cp_dir, 'checkpoints')
self.check_or_create(self.log_dir, 'logs(summaries)')
self.check_or_create(self.excel_dir, 'excel')
self.recorder = Recorder(
cp_dir=self.cp_dir,
log_dir=self.log_dir,
excel_dir=self.excel_dir,
logger2file=logger2file,
model=model
)
def init_or_restore(self, base_dir):
"""
check whether chekpoint and model be within cp_dir, if in it, restore otherwise initialize randomly.
"""
cp_dir = os.path.join(base_dir, 'model')
if os.path.exists(os.path.join(cp_dir, 'checkpoint')):
try:
self.recorder.checkpoint.restore(self.recorder.saver.latest_checkpoint)
except:
self.recorder.logger.error('restore model from checkpoint FAILED.')
else:
self.recorder.logger.info('restore model from checkpoint SUCCUESS.')
else:
self.recorder.logger.info('initialize model SUCCUESS.')
def save_checkpoint(self, global_step):
"""
save the training model
"""
self.recorder.saver.save(checkpoint_number=global_step)
def writer_summary(self, global_step, **kargs):
"""
record the data used to show in the tensorboard
"""
tf.summary.experimental.set_step(global_step)
for i in [{'tag': 'MAIN/' + key, 'value': kargs[key]} for key in kargs]:
tf.summary.scalar(i['tag'], i['value'])
self.recorder.writer.flush()
def check_or_create(self, dicpath, name=''):
"""
check dictionary whether existing, if not then create it.
"""
if not os.path.exists(dicpath):
os.makedirs(dicpath)
print(f'create {name} directionary :', dicpath)
def close(self):
"""
end training, and export the training model
"""
pass
def get_global_step(self):
"""
get the current trianing step.
"""
return self.global_step
def set_global_step(self, num):
"""
set the start training step.
"""
self.global_step = num
def update_target_net_weights(self, tge, src, ployak=None):
if ployak is None:
tf.group([r.assign(v) for r, v in zip(tge, src)])
else:
tf.group([r.assign(self.ployak * v + (1 - self.ployak) * r) for r, v in zip(tge, src)])
|
[
"os.path.join",
"tensorflow.summary.scalar",
"os.makedirs",
"utils.recorder.RecorderTf2",
"tensorflow.config.experimental.set_memory_growth",
"os.path.exists",
"tensorflow.summary.experimental.set_step",
"tensorflow.Variable",
"numpy.array",
"tensorflow.train.latest_checkpoint",
"tensorflow.keras.backend.set_floatx",
"tensorflow.config.experimental.list_physical_devices"
] |
[((252, 303), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (296, 303), True, 'import tensorflow as tf\n'), ((514, 552), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (541, 552), True, 'import tensorflow as tf\n'), ((794, 861), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)', 'dtype': 'tf.int64'}), "(0, name='global_step', trainable=False, dtype=tf.int64)\n", (805, 861), True, 'import tensorflow as tf\n'), ((1661, 1779), 'utils.recorder.RecorderTf2', 'Recorder', ([], {'cp_dir': 'self.cp_dir', 'log_dir': 'self.log_dir', 'excel_dir': 'self.excel_dir', 'logger2file': 'logger2file', 'model': 'model'}), '(cp_dir=self.cp_dir, log_dir=self.log_dir, excel_dir=self.excel_dir,\n logger2file=logger2file, model=model)\n', (1669, 1779), True, 'from utils.recorder import RecorderTf2 as Recorder\n'), ((2038, 2069), 'os.path.join', 'os.path.join', (['base_dir', '"""model"""'], {}), "(base_dir, 'model')\n", (2050, 2069), False, 'import os\n'), ((2834, 2879), 'tensorflow.summary.experimental.set_step', 'tf.summary.experimental.set_step', (['global_step'], {}), '(global_step)\n', (2866, 2879), True, 'import tensorflow as tf\n'), ((389, 456), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (429, 456), True, 'import tensorflow as tf\n'), ((606, 631), 'os.path.join', 'os.path.join', (['base_dir', 'i'], {}), '(base_dir, i)\n', (618, 631), False, 'import os\n'), ((1140, 1179), 'os.path.join', 'os.path.join', (['self.cp_dir', '"""checkpoint"""'], {}), "(self.cp_dir, 'checkpoint')\n", (1152, 1179), False, 'import os\n'), ((2096, 2130), 'os.path.join', 'os.path.join', (['cp_dir', '"""checkpoint"""'], {}), "(cp_dir, 'checkpoint')\n", (2108, 2130), False, 'import os\n'), ((2973, 3012), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["i['tag']", "i['value']"], {}), "(i['tag'], i['value'])\n", (2990, 3012), True, 'import tensorflow as tf\n'), ((3205, 3228), 'os.path.exists', 'os.path.exists', (['dicpath'], {}), '(dicpath)\n', (3219, 3228), False, 'import os\n'), ((3242, 3262), 'os.makedirs', 'os.makedirs', (['dicpath'], {}), '(dicpath)\n', (3253, 3262), False, 'import os\n'), ((735, 758), 'numpy.array', 'np.array', (['a_dim_or_list'], {}), '(a_dim_or_list)\n', (743, 758), True, 'import numpy as np\n'), ((1205, 1244), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.cp_dir'], {}), '(self.cp_dir)\n', (1231, 1244), True, 'import tensorflow as tf\n')]
|
# Copyright 2019 the ProGraML authors.
#
# Contact <NAME> <<EMAIL>>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains TODO: one line summary.
TODO: Detailed explanation of the file.
"""
from typing import Any
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
import numpy as np
import sklearn.metrics
from labm8.py import app
FLAGS = app.FLAGS
app.DEFINE_string(
"batch_scores_averaging_method",
"weighted",
"Selects the averaging method to use when computing recall/precision/F1 "
"scores. See <https://scikit-learn.org/stable/modules/generated/sklearn"
".metrics.f1_score.html>",
)
class Data(NamedTuple):
"""The model data for a batch."""
graph_ids: List[int]
data: Any
# A flag used to mark that this batch is the end of an iterable sequences of
# batches.
end_of_batches: bool = False
@property
def graph_count(self) -> int:
return len(self.graph_ids)
def EmptyBatch() -> Data:
"""Construct an empty batch."""
return Data(graph_ids=[], data=None)
def EndOfBatches() -> Data:
"""Construct a 'end of batches' marker."""
return Data(graph_ids=[], data=None, end_of_batches=True)
class BatchIterator(NamedTuple):
"""A batch iterator"""
batches: Iterable[Data]
# The total number of graphs in all of the batches.
graph_count: int
class Results(NamedTuple):
"""The results of running a batch through a model.
Don't instantiate this tuple directly, use Results.Create().
"""
targets: np.array
predictions: np.array
# The number of model iterations to compute the final results. This is used
# by iterative models such as message passing networks.
iteration_count: int
# For iterative models, this indicates whether the state of the model at
# iteration_count had converged on a solution.
model_converged: bool
# The learning rate and loss of models, if applicable.
learning_rate: Optional[float]
loss: Optional[float]
# Batch-level average performance metrics.
accuracy: float
precision: float
recall: float
f1: float
@property
def has_learning_rate(self) -> bool:
return self.learning_rate is not None
@property
def has_loss(self) -> bool:
return self.loss is not None
@property
def target_count(self) -> int:
"""Get the number of targets in the batch.
For graph-level classifiers, this will be equal to Data.graph_count, else
it's equal to the batch node count.
"""
return self.targets.shape[1]
def __repr__(self) -> str:
return (
f"accuracy={self.accuracy:.2%}%, "
f"precision={self.precision:.3f}, "
f"recall={self.recall:.3f}, "
f"f1={self.f1:.3f}"
)
def __eq__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy == rhs.accuracy
def __gt__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy > rhs.accuracy
@classmethod
def Create(
cls,
targets: np.array,
predictions: np.array,
iteration_count: int = 1,
model_converged: bool = True,
learning_rate: Optional[float] = None,
loss: Optional[float] = None,
):
"""Construct a results instance from 1-hot targets and predictions.
This is the preferred means of construct a Results instance, which takes
care of evaluating all of the metrics for you. The behavior of metrics
calculation is dependent on the --batch_scores_averaging_method flag.
Args:
targets: An array of 1-hot target vectors with
shape (y_count, y_dimensionality), dtype int32.
predictions: An array of 1-hot prediction vectors with
shape (y_count, y_dimensionality), dtype int32.
iteration_count: For iterative models, the number of model iterations to
compute the final result.
model_converged: For iterative models, whether model converged.
learning_rate: The model learning rate, if applicable.
loss: The model loss, if applicable.
Returns:
A Results instance.
"""
if targets.shape != predictions.shape:
raise TypeError(
f"Expected model to produce targets with shape {targets.shape} but "
f"instead received predictions with shape {predictions.shape}"
)
y_dimensionality = targets.shape[1]
if y_dimensionality < 2:
raise TypeError(
f"Expected label dimensionality > 1, received {y_dimensionality}"
)
# Create dense arrays of shape (target_count).
true_y = np.argmax(targets, axis=1)
pred_y = np.argmax(predictions, axis=1)
# NOTE(github.com/ChrisCummins/ProGraML/issues/22): This assumes that
# labels use the values [0,...n).
labels = np.arange(y_dimensionality, dtype=np.int64)
return cls(
targets=targets,
predictions=predictions,
iteration_count=iteration_count,
model_converged=model_converged,
learning_rate=learning_rate,
loss=loss,
accuracy=sklearn.metrics.accuracy_score(true_y, pred_y),
precision=sklearn.metrics.precision_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
recall=sklearn.metrics.recall_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
f1=sklearn.metrics.f1_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
)
class RollingResults:
"""Maintain weighted rolling averages across batches."""
def __init__(self):
self.weight_sum = 0
self.batch_count = 0
self.graph_count = 0
self.target_count = 0
self.weighted_iteration_count_sum = 0
self.weighted_model_converged_sum = 0
self.has_learning_rate = False
self.weighted_learning_rate_sum = 0
self.has_loss = False
self.weighted_loss_sum = 0
self.weighted_accuracy_sum = 0
self.weighted_precision_sum = 0
self.weighted_recall_sum = 0
self.weighted_f1_sum = 0
def Update(
self, data: Data, results: Results, weight: Optional[float] = None
) -> None:
"""Update the rolling results with a new batch.
Args:
data: The batch data used to produce the results.
results: The batch results to update the current state with.
weight: A weight to assign to weighted sums. E.g. to weight results
across all targets, use weight=results.target_count. To weight across
targets, use weight=batch.target_count. To weight across
graphs, use weight=batch.graph_count. By default, weight by target
count.
"""
if weight is None:
weight = results.target_count
self.weight_sum += weight
self.batch_count += 1
self.graph_count += data.graph_count
self.target_count += results.target_count
self.weighted_iteration_count_sum += results.iteration_count * weight
self.weighted_model_converged_sum += (
weight if results.model_converged else 0
)
if results.has_learning_rate:
self.has_learning_rate = True
self.weighted_learning_rate_sum += results.learning_rate * weight
if results.has_loss:
self.has_loss = True
self.weighted_loss_sum += results.loss * weight
self.weighted_accuracy_sum += results.accuracy * weight
self.weighted_precision_sum += results.precision * weight
self.weighted_recall_sum += results.recall * weight
self.weighted_f1_sum += results.f1 * weight
@property
def iteration_count(self) -> float:
return self.weighted_iteration_count_sum / max(self.weight_sum, 1)
@property
def model_converged(self) -> float:
return self.weighted_model_converged_sum / max(self.weight_sum, 1)
@property
def learning_rate(self) -> Optional[float]:
if self.has_learning_rate:
return self.weighted_learning_rate_sum / max(self.weight_sum, 1)
@property
def loss(self) -> Optional[float]:
if self.has_loss:
return self.weighted_loss_sum / max(self.weight_sum, 1)
@property
def accuracy(self) -> float:
return self.weighted_accuracy_sum / max(self.weight_sum, 1)
@property
def precision(self) -> float:
return self.weighted_precision_sum / max(self.weight_sum, 1)
@property
def recall(self) -> float:
return self.weighted_recall_sum / max(self.weight_sum, 1)
@property
def f1(self) -> float:
return self.weighted_f1_sum / max(self.weight_sum, 1)
|
[
"numpy.arange",
"labm8.py.app.DEFINE_string",
"numpy.argmax"
] |
[((928, 1167), 'labm8.py.app.DEFINE_string', 'app.DEFINE_string', (['"""batch_scores_averaging_method"""', '"""weighted"""', '"""Selects the averaging method to use when computing recall/precision/F1 scores. See <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html>"""'], {}), "('batch_scores_averaging_method', 'weighted',\n 'Selects the averaging method to use when computing recall/precision/F1 scores. See <https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html>'\n )\n", (945, 1167), False, 'from labm8.py import app\n'), ((5005, 5031), 'numpy.argmax', 'np.argmax', (['targets'], {'axis': '(1)'}), '(targets, axis=1)\n', (5014, 5031), True, 'import numpy as np\n'), ((5045, 5075), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (5054, 5075), True, 'import numpy as np\n'), ((5202, 5245), 'numpy.arange', 'np.arange', (['y_dimensionality'], {'dtype': 'np.int64'}), '(y_dimensionality, dtype=np.int64)\n', (5211, 5245), True, 'import numpy as np\n')]
|
import collections
import os
import numpy as np
import tensorflow as tf
from pysc2.lib import actions
from tensorflow.contrib import layers
from tensorflow.contrib.layers.python.layers.optimizers import OPTIMIZER_SUMMARIES
from actorcritic.policy import FullyConvPolicy
from common.preprocess import ObsProcesser, FEATURE_KEYS, AgentInputTuple
from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs
def _get_placeholders(spatial_dim):
sd = spatial_dim
feature_list = [
(FEATURE_KEYS.minimap_numeric, tf.float32, [None, sd, sd, ObsProcesser.N_MINIMAP_CHANNELS]),
(FEATURE_KEYS.screen_numeric, tf.float32, [None, sd, sd, ObsProcesser.N_SCREEN_CHANNELS]),
(FEATURE_KEYS.screen_unit_type, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.is_spatial_action_available, tf.float32, [None]),
(FEATURE_KEYS.available_action_ids, tf.float32, [None, len(actions.FUNCTIONS)]),
(FEATURE_KEYS.selected_spatial_action, tf.int32, [None, 2]),
(FEATURE_KEYS.selected_action_id, tf.int32, [None]),
(FEATURE_KEYS.value_target, tf.float32, [None]),
(FEATURE_KEYS.player_relative_screen, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.player_relative_minimap, tf.int32, [None, sd, sd]),
(FEATURE_KEYS.advantage, tf.float32, [None])
]
return AgentInputTuple(
**{name: tf.placeholder(dtype, shape, name) for name, dtype, shape in feature_list}
)
class ACMode:
A2C = "a2c"
PPO = "ppo"
SelectedLogProbs = collections.namedtuple("SelectedLogProbs", ["action_id", "spatial", "total"])
class ActorCriticAgent:
_scalar_summary_key = "scalar_summaries"
def __init__(self,
sess: tf.Session,
summary_path: str,
all_summary_freq: int,
scalar_summary_freq: int,
spatial_dim: int,
mode: str,
clip_epsilon=0.2,
unit_type_emb_dim=4,
loss_value_weight=1.0,
entropy_weight_spatial=1e-6,
entropy_weight_action_id=1e-5,
max_gradient_norm=None,
optimiser="adam",
optimiser_pars: dict = None,
policy=FullyConvPolicy
):
"""
Actor-Critic Agent for learning pysc2-minigames
https://arxiv.org/pdf/1708.04782.pdf
https://github.com/deepmind/pysc2
Can use
- A2C https://blog.openai.com/baselines-acktr-a2c/ (synchronous version of A3C)
or
- PPO https://arxiv.org/pdf/1707.06347.pdf
:param summary_path: tensorflow summaries will be created here
:param all_summary_freq: how often save all summaries
:param scalar_summary_freq: int, how often save scalar summaries
:param spatial_dim: dimension for both minimap and screen
:param mode: a2c or ppo
:param clip_epsilon: epsilon for clipping the ratio in PPO (no effect in A2C)
:param loss_value_weight: value weight for a2c update
:param entropy_weight_spatial: spatial entropy weight for a2c update
:param entropy_weight_action_id: action selection entropy weight for a2c update
:param max_gradient_norm: global max norm for gradients, if None then not limited
:param optimiser: see valid choices below
:param optimiser_pars: optional parameters to pass in optimiser
:param policy: Policy class
"""
assert optimiser in ["adam", "rmsprop"]
assert mode in [ACMode.A2C, ACMode.PPO]
self.mode = mode
self.sess = sess
self.spatial_dim = spatial_dim
self.loss_value_weight = loss_value_weight
self.entropy_weight_spatial = entropy_weight_spatial
self.entropy_weight_action_id = entropy_weight_action_id
self.unit_type_emb_dim = unit_type_emb_dim
self.summary_path = summary_path
os.makedirs(summary_path, exist_ok=True)
self.summary_writer = tf.summary.FileWriter(summary_path)
self.all_summary_freq = all_summary_freq
self.scalar_summary_freq = scalar_summary_freq
self.train_step = 0
self.max_gradient_norm = max_gradient_norm
self.clip_epsilon = clip_epsilon
self.policy = policy
opt_class = tf.train.AdamOptimizer if optimiser == "adam" else tf.train.RMSPropOptimizer
if optimiser_pars is None:
pars = {
"adam": {
"learning_rate": 1e-4,
"epsilon": 5e-7
},
"rmsprop": {
"learning_rate": 2e-4
}
}[optimiser]
else:
pars = optimiser_pars
self.optimiser = opt_class(**pars)
def init(self):
self.sess.run(self.init_op)
if self.mode == ACMode.PPO:
self.update_theta()
def _get_select_action_probs(self, pi, selected_spatial_action_flat):
action_id = select_from_each_row(
pi.action_id_log_probs, self.placeholders.selected_action_id
)
spatial = select_from_each_row(
pi.spatial_action_log_probs, selected_spatial_action_flat
)
total = spatial + action_id
return SelectedLogProbs(action_id, spatial, total)
def _scalar_summary(self, name, tensor):
tf.summary.scalar(name, tensor,
collections=[tf.GraphKeys.SUMMARIES, self._scalar_summary_key])
def build_model(self):
self.placeholders = _get_placeholders(self.spatial_dim)
with tf.variable_scope("theta"):
theta = self.policy(self, trainable=True).build()
selected_spatial_action_flat = ravel_index_pairs(
self.placeholders.selected_spatial_action, self.spatial_dim
)
selected_log_probs = self._get_select_action_probs(theta, selected_spatial_action_flat)
# maximum is to avoid 0 / 0 because this is used to calculate some means
sum_spatial_action_available = tf.maximum(
1e-10, tf.reduce_sum(self.placeholders.is_spatial_action_available)
)
neg_entropy_spatial = tf.reduce_sum(
theta.spatial_action_probs * theta.spatial_action_log_probs
) / sum_spatial_action_available
neg_entropy_action_id = tf.reduce_mean(tf.reduce_sum(
theta.action_id_probs * theta.action_id_log_probs, axis=1
))
if self.mode == ACMode.PPO:
# could also use stop_gradient and forget about the trainable
with tf.variable_scope("theta_old"):
theta_old = self.policy(self, trainable=False).build()
new_theta_var = tf.global_variables("theta/")
old_theta_var = tf.global_variables("theta_old/")
assert len(tf.trainable_variables("theta/")) == len(new_theta_var)
assert not tf.trainable_variables("theta_old/")
assert len(old_theta_var) == len(new_theta_var)
self.update_theta_op = [
tf.assign(t_old, t_new) for t_new, t_old in zip(new_theta_var, old_theta_var)
]
selected_log_probs_old = self._get_select_action_probs(
theta_old, selected_spatial_action_flat
)
ratio = tf.exp(selected_log_probs.total - selected_log_probs_old.total)
clipped_ratio = tf.clip_by_value(
ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon
)
l_clip = tf.minimum(
ratio * self.placeholders.advantage,
clipped_ratio * self.placeholders.advantage
)
self.sampled_action_id = weighted_random_sample(theta_old.action_id_probs)
self.sampled_spatial_action = weighted_random_sample(theta_old.spatial_action_probs)
self.value_estimate = theta_old.value_estimate
self._scalar_summary("action/ratio", tf.reduce_mean(clipped_ratio))
self._scalar_summary("action/ratio_is_clipped",
tf.reduce_mean(tf.to_float(tf.equal(ratio, clipped_ratio))))
policy_loss = -tf.reduce_mean(l_clip)
else:
self.sampled_action_id = weighted_random_sample(theta.action_id_probs)
self.sampled_spatial_action = weighted_random_sample(theta.spatial_action_probs)
self.value_estimate = theta.value_estimate
policy_loss = -tf.reduce_mean(selected_log_probs.total * self.placeholders.advantage)
value_loss = tf.losses.mean_squared_error(
self.placeholders.value_target, theta.value_estimate)
loss = (
policy_loss
+ value_loss * self.loss_value_weight
+ neg_entropy_spatial * self.entropy_weight_spatial
+ neg_entropy_action_id * self.entropy_weight_action_id
)
self.train_op = layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
optimizer=self.optimiser,
clip_gradients=self.max_gradient_norm,
summaries=OPTIMIZER_SUMMARIES,
learning_rate=None,
name="train_op"
)
self._scalar_summary("value/estimate", tf.reduce_mean(self.value_estimate))
self._scalar_summary("value/target", tf.reduce_mean(self.placeholders.value_target))
self._scalar_summary("action/is_spatial_action_available",
tf.reduce_mean(self.placeholders.is_spatial_action_available))
self._scalar_summary("action/selected_id_log_prob",
tf.reduce_mean(selected_log_probs.action_id))
self._scalar_summary("loss/policy", policy_loss)
self._scalar_summary("loss/value", value_loss)
self._scalar_summary("loss/neg_entropy_spatial", neg_entropy_spatial)
self._scalar_summary("loss/neg_entropy_action_id", neg_entropy_action_id)
self._scalar_summary("loss/total", loss)
self._scalar_summary("value/advantage", tf.reduce_mean(self.placeholders.advantage))
self._scalar_summary("action/selected_total_log_prob",
tf.reduce_mean(selected_log_probs.total))
self._scalar_summary("action/selected_spatial_log_prob",
tf.reduce_sum(selected_log_probs.spatial) / sum_spatial_action_available)
self.init_op = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=2)
self.all_summary_op = tf.summary.merge_all(tf.GraphKeys.SUMMARIES)
self.scalar_summary_op = tf.summary.merge(tf.get_collection(self._scalar_summary_key))
def _input_to_feed_dict(self, input_dict):
return {k + ":0": v for k, v in input_dict.items()}
def step(self, obs):
feed_dict = self._input_to_feed_dict(obs)
action_id, spatial_action, value_estimate = self.sess.run(
[self.sampled_action_id, self.sampled_spatial_action, self.value_estimate],
feed_dict=feed_dict
)
spatial_action_2d = np.array(
np.unravel_index(spatial_action, (self.spatial_dim,) * 2)
).transpose()
return action_id, spatial_action_2d, value_estimate
def train(self, input_dict):
feed_dict = self._input_to_feed_dict(input_dict)
ops = [self.train_op]
write_all_summaries = (
(self.train_step % self.all_summary_freq == 0) and
self.summary_path is not None
)
write_scalar_summaries = (
(self.train_step % self.scalar_summary_freq == 0) and
self.summary_path is not None
)
if write_all_summaries:
ops.append(self.all_summary_op)
elif write_scalar_summaries:
ops.append(self.scalar_summary_op)
r = self.sess.run(ops, feed_dict)
if write_all_summaries or write_scalar_summaries:
self.summary_writer.add_summary(r[-1], global_step=self.train_step)
self.train_step += 1
def get_value(self, obs):
feed_dict = self._input_to_feed_dict(obs)
return self.sess.run(self.value_estimate, feed_dict=feed_dict)
def flush_summaries(self):
self.summary_writer.flush()
def save(self, path, step=None):
os.makedirs(path, exist_ok=True)
step = step or self.train_step
print("saving model to %s, step %d" % (path, step))
self.saver.save(self.sess, path + '/model.ckpt', global_step=step)
def load(self, path):
ckpt = tf.train.get_checkpoint_state(path)
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
self.train_step = int(ckpt.model_checkpoint_path.split('-')[-1])
print("loaded old model with train_step %d" % self.train_step)
self.train_step += 1
def update_theta(self):
if self.mode == ACMode.PPO:
self.sess.run(self.update_theta_op)
|
[
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.global_variables",
"tensorflow.assign",
"common.util.ravel_index_pairs",
"tensorflow.variable_scope",
"tensorflow.minimum",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.exp",
"tensorflow.summary.merge_all",
"tensorflow.train.get_checkpoint_state",
"tensorflow.train.get_global_step",
"tensorflow.equal",
"tensorflow.summary.scalar",
"tensorflow.losses.mean_squared_error",
"tensorflow.global_variables_initializer",
"tensorflow.train.Saver",
"common.util.weighted_random_sample",
"tensorflow.reduce_mean",
"os.makedirs",
"common.util.select_from_each_row",
"numpy.unravel_index",
"collections.namedtuple"
] |
[((1528, 1605), 'collections.namedtuple', 'collections.namedtuple', (['"""SelectedLogProbs"""', "['action_id', 'spatial', 'total']"], {}), "('SelectedLogProbs', ['action_id', 'spatial', 'total'])\n", (1550, 1605), False, 'import collections\n'), ((3882, 3922), 'os.makedirs', 'os.makedirs', (['summary_path'], {'exist_ok': '(True)'}), '(summary_path, exist_ok=True)\n', (3893, 3922), False, 'import os\n'), ((3953, 3988), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_path'], {}), '(summary_path)\n', (3974, 3988), True, 'import tensorflow as tf\n'), ((4945, 5032), 'common.util.select_from_each_row', 'select_from_each_row', (['pi.action_id_log_probs', 'self.placeholders.selected_action_id'], {}), '(pi.action_id_log_probs, self.placeholders.\n selected_action_id)\n', (4965, 5032), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((5068, 5147), 'common.util.select_from_each_row', 'select_from_each_row', (['pi.spatial_action_log_probs', 'selected_spatial_action_flat'], {}), '(pi.spatial_action_log_probs, selected_spatial_action_flat)\n', (5088, 5147), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((5320, 5420), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['name', 'tensor'], {'collections': '[tf.GraphKeys.SUMMARIES, self._scalar_summary_key]'}), '(name, tensor, collections=[tf.GraphKeys.SUMMARIES, self.\n _scalar_summary_key])\n', (5337, 5420), True, 'import tensorflow as tf\n'), ((5664, 5742), 'common.util.ravel_index_pairs', 'ravel_index_pairs', (['self.placeholders.selected_spatial_action', 'self.spatial_dim'], {}), '(self.placeholders.selected_spatial_action, self.spatial_dim)\n', (5681, 5742), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((8475, 8562), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['self.placeholders.value_target', 'theta.value_estimate'], {}), '(self.placeholders.value_target, theta.\n value_estimate)\n', (8503, 8562), True, 'import tensorflow as tf\n'), ((10273, 10306), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10304, 10306), True, 'import tensorflow as tf\n'), ((10328, 10357), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(2)'}), '(max_to_keep=2)\n', (10342, 10357), True, 'import tensorflow as tf\n'), ((10388, 10432), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', (['tf.GraphKeys.SUMMARIES'], {}), '(tf.GraphKeys.SUMMARIES)\n', (10408, 10432), True, 'import tensorflow as tf\n'), ((12163, 12195), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (12174, 12195), False, 'import os\n'), ((12412, 12447), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['path'], {}), '(path)\n', (12441, 12447), True, 'import tensorflow as tf\n'), ((5534, 5560), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta"""'], {}), "('theta')\n", (5551, 5560), True, 'import tensorflow as tf\n'), ((6014, 6074), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.placeholders.is_spatial_action_available'], {}), '(self.placeholders.is_spatial_action_available)\n', (6027, 6074), True, 'import tensorflow as tf\n'), ((6116, 6190), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(theta.spatial_action_probs * theta.spatial_action_log_probs)'], {}), '(theta.spatial_action_probs * theta.spatial_action_log_probs)\n', (6129, 6190), True, 'import tensorflow as tf\n'), ((6291, 6363), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(theta.action_id_probs * theta.action_id_log_probs)'], {'axis': '(1)'}), '(theta.action_id_probs * theta.action_id_log_probs, axis=1)\n', (6304, 6363), True, 'import tensorflow as tf\n'), ((6647, 6676), 'tensorflow.global_variables', 'tf.global_variables', (['"""theta/"""'], {}), "('theta/')\n", (6666, 6676), True, 'import tensorflow as tf\n'), ((6705, 6738), 'tensorflow.global_variables', 'tf.global_variables', (['"""theta_old/"""'], {}), "('theta_old/')\n", (6724, 6738), True, 'import tensorflow as tf\n'), ((7244, 7307), 'tensorflow.exp', 'tf.exp', (['(selected_log_probs.total - selected_log_probs_old.total)'], {}), '(selected_log_probs.total - selected_log_probs_old.total)\n', (7250, 7307), True, 'import tensorflow as tf\n'), ((7336, 7409), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['ratio', '(1.0 - self.clip_epsilon)', '(1.0 + self.clip_epsilon)'], {}), '(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon)\n', (7352, 7409), True, 'import tensorflow as tf\n'), ((7461, 7558), 'tensorflow.minimum', 'tf.minimum', (['(ratio * self.placeholders.advantage)', '(clipped_ratio * self.placeholders.advantage)'], {}), '(ratio * self.placeholders.advantage, clipped_ratio * self.\n placeholders.advantage)\n', (7471, 7558), True, 'import tensorflow as tf\n'), ((7637, 7686), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta_old.action_id_probs'], {}), '(theta_old.action_id_probs)\n', (7659, 7686), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((7729, 7783), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta_old.spatial_action_probs'], {}), '(theta_old.spatial_action_probs)\n', (7751, 7783), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((8161, 8206), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta.action_id_probs'], {}), '(theta.action_id_probs)\n', (8183, 8206), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((8249, 8299), 'common.util.weighted_random_sample', 'weighted_random_sample', (['theta.spatial_action_probs'], {}), '(theta.spatial_action_probs)\n', (8271, 8299), False, 'from common.util import weighted_random_sample, select_from_each_row, ravel_index_pairs\n'), ((9177, 9212), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.value_estimate'], {}), '(self.value_estimate)\n', (9191, 9212), True, 'import tensorflow as tf\n'), ((9259, 9305), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.placeholders.value_target'], {}), '(self.placeholders.value_target)\n', (9273, 9305), True, 'import tensorflow as tf\n'), ((9386, 9447), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.placeholders.is_spatial_action_available'], {}), '(self.placeholders.is_spatial_action_available)\n', (9400, 9447), True, 'import tensorflow as tf\n'), ((9521, 9565), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['selected_log_probs.action_id'], {}), '(selected_log_probs.action_id)\n', (9535, 9565), True, 'import tensorflow as tf\n'), ((9936, 9979), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.placeholders.advantage'], {}), '(self.placeholders.advantage)\n', (9950, 9979), True, 'import tensorflow as tf\n'), ((10056, 10096), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['selected_log_probs.total'], {}), '(selected_log_probs.total)\n', (10070, 10096), True, 'import tensorflow as tf\n'), ((10483, 10526), 'tensorflow.get_collection', 'tf.get_collection', (['self._scalar_summary_key'], {}), '(self._scalar_summary_key)\n', (10500, 10526), True, 'import tensorflow as tf\n'), ((1378, 1412), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape', 'name'], {}), '(dtype, shape, name)\n', (1392, 1412), True, 'import tensorflow as tf\n'), ((6515, 6545), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""theta_old"""'], {}), "('theta_old')\n", (6532, 6545), True, 'import tensorflow as tf\n'), ((6842, 6878), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""theta_old/"""'], {}), "('theta_old/')\n", (6864, 6878), True, 'import tensorflow as tf\n'), ((6993, 7016), 'tensorflow.assign', 'tf.assign', (['t_old', 't_new'], {}), '(t_old, t_new)\n', (7002, 7016), True, 'import tensorflow as tf\n'), ((7892, 7921), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['clipped_ratio'], {}), '(clipped_ratio)\n', (7906, 7921), True, 'import tensorflow as tf\n'), ((8087, 8109), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['l_clip'], {}), '(l_clip)\n', (8101, 8109), True, 'import tensorflow as tf\n'), ((8382, 8452), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(selected_log_probs.total * self.placeholders.advantage)'], {}), '(selected_log_probs.total * self.placeholders.advantage)\n', (8396, 8452), True, 'import tensorflow as tf\n'), ((8899, 8925), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (8923, 8925), True, 'import tensorflow as tf\n'), ((10175, 10216), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['selected_log_probs.spatial'], {}), '(selected_log_probs.spatial)\n', (10188, 10216), True, 'import tensorflow as tf\n'), ((6763, 6795), 'tensorflow.trainable_variables', 'tf.trainable_variables', (['"""theta/"""'], {}), "('theta/')\n", (6785, 6795), True, 'import tensorflow as tf\n'), ((10961, 11018), 'numpy.unravel_index', 'np.unravel_index', (['spatial_action', '((self.spatial_dim,) * 2)'], {}), '(spatial_action, (self.spatial_dim,) * 2)\n', (10977, 11018), True, 'import numpy as np\n'), ((8026, 8056), 'tensorflow.equal', 'tf.equal', (['ratio', 'clipped_ratio'], {}), '(ratio, clipped_ratio)\n', (8034, 8056), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
# coding:utf-8
from __future__ import print_function
#import sys
import re
import glob
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
all_files = glob.glob('../*/dat_L*_tau_inf')
list_L = []
list_N = []
list_mx = []
list_mz0mz1 = []
list_ene = []
for file_name in all_files:
# N = file_name.replace("dat_L","")
L = re.sub(".*dat_L","",file_name)
L = int(L.replace("_tau_inf",""))
N = L**2
list_L.append(L)
list_N.append(N)
print(file_name,L,N)
# file = open(sys.argv[1])
# file = open('dat_L3_tau_inf')
file = open(file_name)
lines = file.readlines()
file.close()
for line in lines:
if line.startswith("mx ["):
line_mx = line[:-1]
line_mx = line_mx.replace("mx [","")
line_mx = line_mx.replace("]","")
# list_mx = np.fromstring(line_mx,dtype=np.float,sep=',')
list_mx.append(np.fromstring(line_mx,dtype=np.float,sep=','))
if line.startswith("mz0mz1 ["):
line_mz0mz1 = line[:-1]
line_mz0mz1 = line_mz0mz1.replace("mz0mz1 [","")
line_mz0mz1 = line_mz0mz1.replace("]","")
list_mz0mz1.append(np.fromstring(line_mz0mz1,dtype=np.float,sep=','))
if line.startswith("ene ["):
line_ene = line[:-1]
line_ene = line_ene.replace("ene [","")
line_ene = line_ene.replace("]","")
list_ene.append(np.fromstring(line_ene,dtype=np.float,sep=','))
if line.startswith("field_steps: h(t)= ["):
line_h = line[:-1]
line_h = line_h.replace("field_steps: h(t)= [","")
line_h = line_h.replace("]","")
list_h = np.fromstring(line_h,dtype=np.float,sep=',')
list_enedens = []
for i in range(len(list_N)):
list_enedens.append(np.array([x/list_N[i] for x in list_ene[i]],dtype=np.float))
print("h",list_h)
for i in range(len(list_L)):
print("L mx",list_L[i],list_mx[i])
print("L mz0mz1",list_L[i],list_mz0mz1[i])
print("L enedens",list_L[i],list_enedens[i])
fig0 = plt.figure()
fig0.suptitle("mx")
for i in range(len(list_L)):
plt.plot(list_h,list_mx[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,1),loc='upper right',borderaxespad=1)
plt.gca().invert_xaxis()
fig0.savefig("fig_mx.png")
fig1 = plt.figure()
fig1.suptitle("mz0mz1")
for i in range(len(list_L)):
plt.plot(list_h,list_mz0mz1[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,0),loc='lower right',borderaxespad=1)
plt.gca().invert_xaxis()
fig1.savefig("fig_mz0mz1.png")
fig2 = plt.figure()
fig2.suptitle("enedens")
for i in range(len(list_L)):
plt.plot(list_h,list_enedens[i],label=list_L[i])
plt.xlabel("field")
plt.legend(bbox_to_anchor=(1,0),loc='lower right',borderaxespad=1)
plt.gca().invert_xaxis()
fig2.savefig("fig_enedens.png")
|
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.array",
"glob.glob",
"matplotlib.pyplot.xlabel",
"re.sub",
"numpy.fromstring"
] |
[((147, 168), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (161, 168), False, 'import matplotlib\n'), ((214, 246), 'glob.glob', 'glob.glob', (['"""../*/dat_L*_tau_inf"""'], {}), "('../*/dat_L*_tau_inf')\n", (223, 246), False, 'import glob\n'), ((2106, 2118), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2116, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2216, 2235), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""field"""'], {}), "('field')\n", (2226, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2236, 2305), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 1)', 'loc': '"""upper right"""', 'borderaxespad': '(1)'}), "(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=1)\n", (2246, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2375), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2373, 2375), True, 'import matplotlib.pyplot as plt\n'), ((2481, 2500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""field"""'], {}), "('field')\n", (2491, 2500), True, 'import matplotlib.pyplot as plt\n'), ((2501, 2570), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 0)', 'loc': '"""lower right"""', 'borderaxespad': '(1)'}), "(bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=1)\n", (2511, 2570), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2644), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2642, 2644), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2771), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""field"""'], {}), "('field')\n", (2762, 2771), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2841), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1, 0)', 'loc': '"""lower right"""', 'borderaxespad': '(1)'}), "(bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=1)\n", (2782, 2841), True, 'import matplotlib.pyplot as plt\n'), ((391, 423), 're.sub', 're.sub', (['""".*dat_L"""', '""""""', 'file_name'], {}), "('.*dat_L', '', file_name)\n", (397, 423), False, 'import re\n'), ((2172, 2217), 'matplotlib.pyplot.plot', 'plt.plot', (['list_h', 'list_mx[i]'], {'label': 'list_L[i]'}), '(list_h, list_mx[i], label=list_L[i])\n', (2180, 2217), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2482), 'matplotlib.pyplot.plot', 'plt.plot', (['list_h', 'list_mz0mz1[i]'], {'label': 'list_L[i]'}), '(list_h, list_mz0mz1[i], label=list_L[i])\n', (2441, 2482), True, 'import matplotlib.pyplot as plt\n'), ((2703, 2753), 'matplotlib.pyplot.plot', 'plt.plot', (['list_h', 'list_enedens[i]'], {'label': 'list_L[i]'}), '(list_h, list_enedens[i], label=list_L[i])\n', (2711, 2753), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1918), 'numpy.array', 'np.array', (['[(x / list_N[i]) for x in list_ene[i]]'], {'dtype': 'np.float'}), '([(x / list_N[i]) for x in list_ene[i]], dtype=np.float)\n', (1862, 1918), True, 'import numpy as np\n'), ((2303, 2312), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2310, 2312), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2577), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2575, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2848), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2846, 2848), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1783), 'numpy.fromstring', 'np.fromstring', (['line_h'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_h, dtype=np.float, sep=',')\n", (1750, 1783), True, 'import numpy as np\n'), ((960, 1007), 'numpy.fromstring', 'np.fromstring', (['line_mx'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_mx, dtype=np.float, sep=',')\n", (973, 1007), True, 'import numpy as np\n'), ((1229, 1280), 'numpy.fromstring', 'np.fromstring', (['line_mz0mz1'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_mz0mz1, dtype=np.float, sep=',')\n", (1242, 1280), True, 'import numpy as np\n'), ((1478, 1526), 'numpy.fromstring', 'np.fromstring', (['line_ene'], {'dtype': 'np.float', 'sep': '""","""'}), "(line_ene, dtype=np.float, sep=',')\n", (1491, 1526), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# encoding: utf-8
import argparse
import prody
import os
import shutil
import subprocess
import numpy
from os.path import join
GMX_PATH = '/usr/local/gromacs/bin/'
mdp_string = '''
define = -DPOSRES
integrator = {integrator}
nsteps = 1000
emtol = 1
nstlist = 1
coulombtype = Cut-off
vdwtype = Cut-off
ns_type = simple
rlist = 1.8
rcoulomb = 1.8
rvdw = 1.8
pbc = xyz
implicit_solvent = GBSA
gb_algorithm = OBC
sa_algorithm = ACE-approximation
rgbradii = 1.8
;nstxout = 1
'''
def parse_args():
parser = argparse.ArgumentParser(description='Generate trajectory with gaussian flucutations.')
parser.add_argument('pdb_file', metavar='INPUT_PDB_FILE', help='path to input pdb file')
parser.add_argument('trajectory', metavar='TRAJECTORY', help='path to input trajectory')
parser.add_argument('out_file', metavar='OUTPUT_PDB_FILE', help='path to input pdb file')
parser.add_argument('--pos_res_k', type=float, default=1000.)
args = parser.parse_args()
return (args.pdb_file, args.trajectory, args.out_file, args.pos_res_k)
class Minimizer(object):
def __init__(self, input_pdb_filename, trajectory_filename):
self.input_pdb = self._load_pdb(input_pdb_filename)
self.trajectory = self._load_pdb(trajectory_filename)
def _load_pdb(self, in_file):
protein = prody.parsePDB(in_file)
return protein
def _get_closest_frame(self):
output = prody.AtomGroup('Cartesian average coordinates')
output.setCoords( self.trajectory.getCoords() )
output.setNames( self.trajectory.getNames() )
output.setResnums( self.trajectory.getResnums() )
output.setResnames( self.trajectory.getResnames() )
ensemble = prody.PDBEnsemble(self.trajectory)
ensemble.setCoords(self.input_pdb)
ensemble.superpose()
rmsds = ensemble.getRMSDs()
min_index = numpy.argmin(rmsds)
output.setCoords( ensemble.getCoordsets(min_index) )
return output
def _create_no_h_file(self, output_stream):
# make the index file
cmd = join(GMX_PATH, 'make_ndx')
cmd += ' -f min_round_2.gro -o no_h.ndx'
p1 = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p1.communicate('q\n')
# run editconf
edit_cmd = join(GMX_PATH, 'editconf')
edit_cmd += ' -f min_round_2.gro -o no_h.gro -n no_h.ndx'
p2 = subprocess.Popen(edit_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p2.communicate('2\n')
def _re_order(self, output_stream):
# create a new index file
lines = open('index.ndx').read().splitlines()
header = lines[0]
indices = []
for line in lines[1:]:
cols = line.split()
for col in cols:
indices.append( int(col) )
resorted = [ indices.index(val)+1 for val in range( 1, max(indices)+1 ) ]
with open('resort.ndx', 'w') as out:
print >>out, header
for val in resorted:
print >>out, val
# resort
edit_cmd = join(GMX_PATH, 'editconf')
edit_cmd += ' -f no_h.gro -o min.pdb -n resort.ndx'
subprocess.check_call(edit_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
def run_minimization(self, posres_force_const=1000., output_stream=None):
start = self._get_closest_frame()
# create temp dir
if os.path.isdir('Temp'):
pass
else:
os.mkdir('Temp')
os.chdir('Temp')
# write the average file
prody.writePDB('average.pdb', self.input_pdb)
pdb_cmd = join(GMX_PATH, 'pdb2gmx')
pdb_cmd += ' -f average.pdb -ff amber99sb-ildn -water none -n index.ndx -posrefc {} -o ref.gro -his'.format(
posres_force_const)
p = subprocess.Popen(pdb_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p.communicate('0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n')
# put it in a bigger box
box_cmd = join(GMX_PATH, 'editconf')
box_cmd += ' -f ref.gro -o ref_box.gro -c -box 999 999 999'
subprocess.check_call(box_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# write pdb file
prody.writePDB('start.pdb', start)
# pdb2gmx
pdb_cmd = join(GMX_PATH, 'pdb2gmx')
pdb_cmd += ' -f start.pdb -ff amber99sb-ildn -water none -n index.ndx -posrefc {} -his'.format(
posres_force_const)
p = subprocess.Popen(pdb_cmd, shell=True, stdin=subprocess.PIPE,
stdout=output_stream, stderr=output_stream)
p.communicate('0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n')
# put it in a bigger box
box_cmd = join(GMX_PATH, 'editconf')
box_cmd += ' -f conf.gro -o box.gro -c -box 999 999 999'
subprocess.check_call(box_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# Round 1
#
# write mdp file
with open('min_round_1.mdp', 'w') as min_file:
min_file.write( mdp_string.format(integrator='steep') )
# run grompp
grompp_cmd = join(GMX_PATH, 'grompp')
grompp_cmd += ' -f min_round_1.mdp -c box.gro -p topol.top -o min_round_1 -r ref_box.gro'
subprocess.check_call(grompp_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# run mdrun
md_cmd = join(GMX_PATH, 'mdrun')
md_cmd += ' -deffnm min_round_1 -v -nt 1'
subprocess.check_call(md_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# Round 2
#
# write mdp file
with open('min_round_2.mdp', 'w') as min_file:
min_file.write( mdp_string.format(integrator='l-bfgs') )
# run grompp
grompp_cmd = join(GMX_PATH, 'grompp')
grompp_cmd += ' -f min_round_2.mdp -c min_round_1.gro -p topol.top -o min_round_2 -maxwarn 1 -r ref_box.gro'
subprocess.check_call(grompp_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
# run mdrun
md_cmd = join(GMX_PATH, 'mdrun')
md_cmd += ' -deffnm min_round_2 -v -nt 1'
subprocess.check_call(md_cmd, shell=True, stdout=output_stream,
stderr=output_stream)
#
# gather results
#
self._create_no_h_file(output_stream)
self._re_order(output_stream)
# load the pdb
protein = prody.parsePDB('min.pdb').select('not hydrogen')
# clean up
os.chdir('..')
shutil.rmtree('Temp')
return protein
def main():
r = parse_args()
input_pdb_filename = r[0]
trajectory_filename = r[1]
output_pdb_filename = r[2]
force_const = r[3]
m = Minimizer(input_pdb_filename, trajectory_filename)
minimized_protein = m.run_minimization(force_const)
prody.writePDB(output_pdb_filename, minimized_protein)
if __name__ == '__main__':
main()
|
[
"os.mkdir",
"subprocess.Popen",
"argparse.ArgumentParser",
"prody.PDBEnsemble",
"os.path.join",
"os.path.isdir",
"prody.AtomGroup",
"numpy.argmin",
"prody.parsePDB",
"shutil.rmtree",
"prody.writePDB",
"os.chdir",
"subprocess.check_call"
] |
[((534, 625), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate trajectory with gaussian flucutations."""'}), "(description=\n 'Generate trajectory with gaussian flucutations.')\n", (557, 625), False, 'import argparse\n'), ((7413, 7467), 'prody.writePDB', 'prody.writePDB', (['output_pdb_filename', 'minimized_protein'], {}), '(output_pdb_filename, minimized_protein)\n', (7427, 7467), False, 'import prody\n'), ((1341, 1364), 'prody.parsePDB', 'prody.parsePDB', (['in_file'], {}), '(in_file)\n', (1355, 1364), False, 'import prody\n'), ((1440, 1488), 'prody.AtomGroup', 'prody.AtomGroup', (['"""Cartesian average coordinates"""'], {}), "('Cartesian average coordinates')\n", (1455, 1488), False, 'import prody\n'), ((1737, 1771), 'prody.PDBEnsemble', 'prody.PDBEnsemble', (['self.trajectory'], {}), '(self.trajectory)\n', (1754, 1771), False, 'import prody\n'), ((1900, 1919), 'numpy.argmin', 'numpy.argmin', (['rmsds'], {}), '(rmsds)\n', (1912, 1919), False, 'import numpy\n'), ((2097, 2123), 'os.path.join', 'join', (['GMX_PATH', '"""make_ndx"""'], {}), "(GMX_PATH, 'make_ndx')\n", (2101, 2123), False, 'from os.path import join\n'), ((2186, 2291), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (2202, 2291), False, 'import subprocess\n'), ((2390, 2416), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (2394, 2416), False, 'from os.path import join\n'), ((2496, 2606), 'subprocess.Popen', 'subprocess.Popen', (['edit_cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(edit_cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (2512, 2606), False, 'import subprocess\n'), ((3235, 3261), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (3239, 3261), False, 'from os.path import join\n'), ((3330, 3422), 'subprocess.check_call', 'subprocess.check_call', (['edit_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(edit_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (3351, 3422), False, 'import subprocess\n'), ((3607, 3628), 'os.path.isdir', 'os.path.isdir', (['"""Temp"""'], {}), "('Temp')\n", (3620, 3628), False, 'import os\n'), ((3698, 3714), 'os.chdir', 'os.chdir', (['"""Temp"""'], {}), "('Temp')\n", (3706, 3714), False, 'import os\n'), ((3757, 3802), 'prody.writePDB', 'prody.writePDB', (['"""average.pdb"""', 'self.input_pdb'], {}), "('average.pdb', self.input_pdb)\n", (3771, 3802), False, 'import prody\n'), ((3821, 3846), 'os.path.join', 'join', (['GMX_PATH', '"""pdb2gmx"""'], {}), "(GMX_PATH, 'pdb2gmx')\n", (3825, 3846), False, 'from os.path import join\n'), ((4008, 4117), 'subprocess.Popen', 'subprocess.Popen', (['pdb_cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(pdb_cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (4024, 4117), False, 'import subprocess\n'), ((4330, 4356), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (4334, 4356), False, 'from os.path import join\n'), ((4433, 4524), 'subprocess.check_call', 'subprocess.check_call', (['box_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(box_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (4454, 4524), False, 'import subprocess\n'), ((4584, 4618), 'prody.writePDB', 'prody.writePDB', (['"""start.pdb"""', 'start'], {}), "('start.pdb', start)\n", (4598, 4618), False, 'import prody\n'), ((4656, 4681), 'os.path.join', 'join', (['GMX_PATH', '"""pdb2gmx"""'], {}), "(GMX_PATH, 'pdb2gmx')\n", (4660, 4681), False, 'from os.path import join\n'), ((4830, 4939), 'subprocess.Popen', 'subprocess.Popen', (['pdb_cmd'], {'shell': '(True)', 'stdin': 'subprocess.PIPE', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(pdb_cmd, shell=True, stdin=subprocess.PIPE, stdout=\n output_stream, stderr=output_stream)\n', (4846, 4939), False, 'import subprocess\n'), ((5153, 5179), 'os.path.join', 'join', (['GMX_PATH', '"""editconf"""'], {}), "(GMX_PATH, 'editconf')\n", (5157, 5179), False, 'from os.path import join\n'), ((5253, 5344), 'subprocess.check_call', 'subprocess.check_call', (['box_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(box_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (5274, 5344), False, 'import subprocess\n'), ((5601, 5625), 'os.path.join', 'join', (['GMX_PATH', '"""grompp"""'], {}), "(GMX_PATH, 'grompp')\n", (5605, 5625), False, 'from os.path import join\n'), ((5732, 5826), 'subprocess.check_call', 'subprocess.check_call', (['grompp_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(grompp_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (5753, 5826), False, 'import subprocess\n'), ((5890, 5913), 'os.path.join', 'join', (['GMX_PATH', '"""mdrun"""'], {}), "(GMX_PATH, 'mdrun')\n", (5894, 5913), False, 'from os.path import join\n'), ((5972, 6062), 'subprocess.check_call', 'subprocess.check_call', (['md_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(md_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (5993, 6062), False, 'import subprocess\n'), ((6320, 6344), 'os.path.join', 'join', (['GMX_PATH', '"""grompp"""'], {}), "(GMX_PATH, 'grompp')\n", (6324, 6344), False, 'from os.path import join\n'), ((6470, 6564), 'subprocess.check_call', 'subprocess.check_call', (['grompp_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(grompp_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (6491, 6564), False, 'import subprocess\n'), ((6628, 6651), 'os.path.join', 'join', (['GMX_PATH', '"""mdrun"""'], {}), "(GMX_PATH, 'mdrun')\n", (6632, 6651), False, 'from os.path import join\n'), ((6710, 6800), 'subprocess.check_call', 'subprocess.check_call', (['md_cmd'], {'shell': '(True)', 'stdout': 'output_stream', 'stderr': 'output_stream'}), '(md_cmd, shell=True, stdout=output_stream, stderr=\n output_stream)\n', (6731, 6800), False, 'import subprocess\n'), ((7075, 7089), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (7083, 7089), False, 'import os\n'), ((7098, 7119), 'shutil.rmtree', 'shutil.rmtree', (['"""Temp"""'], {}), "('Temp')\n", (7111, 7119), False, 'import shutil\n'), ((3673, 3689), 'os.mkdir', 'os.mkdir', (['"""Temp"""'], {}), "('Temp')\n", (3681, 3689), False, 'import os\n'), ((6998, 7023), 'prody.parsePDB', 'prody.parsePDB', (['"""min.pdb"""'], {}), "('min.pdb')\n", (7012, 7023), False, 'import prody\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This module defines filters for Cell instances
"""
from __future__ import print_function
import copy
import numpy as np
from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND
from tunacell.base.datatools import multiplicative_increments
from tunacell.base.observable import Observable
class FilterCell(FilterGeneral):
"General class for filtering cell objects (reader.Cell instances)"
_type = 'CELL'
class FilterCellAny(FilterCell):
"Class that does not filter anything."
def __init__(self):
self.label = 'Always True' # short description for human readers
return
def func(self, cell):
return True
class FilterData(FilterCell):
"""Default filter test only if cell exists and cell.data non empty."""
def __init__(self):
self.label = 'Cell Has Data'
return
def func(self, cell):
boo = False
if cell is not None:
boo = cell.data is not None and len(cell.data) > 0
return boo
class FilterCellIDparity(FilterCell):
"""Test whether identifier is odd or even"""
def __init__(self, parity='even'):
self.parity = parity
self.label = 'Cell identifier is {}'.format(parity)
return
def func(self, cell):
# test if even
try:
even = int(cell.identifier) % 2 == 0
if self.parity == 'even':
return even
elif self.parity == 'odd':
return not even
else:
raise ValueError("Parity must be 'even' or 'odd'")
except ValueError as ve:
print(ve)
return False
class FilterCellIDbound(FilterCell):
"""Test class"""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.label = '{} <= cellID < {}'.format(lower_bound, upper_bound)
return
def func(self, cell):
return bounded(int(cell.identifier),
self.lower_bound, self.upper_bound)
class FilterHasParent(FilterCell):
"""Test whether a cell has an identified parent cell"""
def __init__(self):
self.label = 'Cell Has Parent'
return
def func(self, cell):
boo = False
if cell.parent:
boo = True
return boo
class FilterDaughters(FilterCell):
"Test whether a given cell as at least one daughter cell"
def __init__(self, daughter_min=1, daughter_max=2):
label = 'Number of daughter cell(s): '
label += '{0} <= n_daughters <= {1}'.format(daughter_min, daughter_max)
self.label = label
self.lower_bound = daughter_min
self.upper_bound = daughter_max + 1 # lower <= x < upper
return
def func(self, cell):
return bounded(len(cell.childs),
lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
class FilterCompleteCycle(FilterCell):
"Test whether a cell has a given parent and at least one daughter."
def __init__(self, daughter_min=1):
label = 'Cell cycle complete'
label += ' (with at least {} daughter cell(s)'.format(daughter_min)
self.daughter_min = daughter_min
self.label = label
return
def func(self, cell):
filt_parent = FilterHasParent()
filt_daughter = FilterDaughters(daughter_min=self.daughter_min)
return filt_parent(cell) and filt_daughter(cell)
class FilterCycleFrames(FilterCell):
"""Check whether cell has got a minimal number of datapoints."""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Number of registered frames:'
label += '{0} <= n_frames <= {1}'.format(self.lower_bound,
self.upper_bound)
self.label = label
return
def func(self, cell):
# check whether data exists
boo = False
filtData = FilterData()
if filtData.func(cell):
boo = bounded(len(cell.data),
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
return boo
class FilterCycleSpanIncluded(FilterCell):
"""Check that cell cycle time interval is within valid bounds."""
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = '{} <= Initial frame and Final frame < {}'.format(lower_bound,
upper_bound)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
boo = included(cell.data['time'],
lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
return boo
class FilterTimeInCycle(FilterCell):
"""Check that tref is within cell birth and division time"""
def __init__(self, tref=0.):
self.tref = tref
label = 'birth/first time <= {} < division/last time'.format(tref)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
if cell.birth_time is not None:
lower = cell.birth_time
else:
lower = cell.data['time'][0]
if cell.division_time is not None:
upper = cell.division_time
else:
upper = cell.data['time'][-1]
boo = lower <= self.tref < upper
return boo
class FilterObservableBound(FilterCell):
"""Check that a given observable is bounded.
Parameters
----------
obs : Observable instance
observable that will be tested for bounds
works only for continuous observable (mode='dynamics')
tref : float (default None)
Time of reference at which to test dynamics observable value
lower_bound : float (default None)
upper_bound : float (default None)
"""
def __init__(self, obs=Observable(name='undefined'), tref=None,
lower_bound=None, upper_bound=None):
self.obs_to_test = obs # observable to be tested
self._obs = [obs, ] # hidden to be computed at for filtering purpose
self.tref = tref
# code below is commented because func is able to deal with arrays
# if obs.mode == 'dynamics' and tref is None:
# msg = 'For dynamics mode, this filter needs a valid tref (float)'
# raise ValueError(msg)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = '{} <= {}'.format(lower_bound, obs.name)
if tref is not None:
label += ' (t={})'.format(tref)
label += ' < {}'.format(upper_bound)
self.label = label
return
def func(self, cell):
import collections
boo = False
if self.tref is not None:
filt = FilterAND(FilterData(),
FilterTimeInCycle(tref=self.tref))
else:
filt = FilterData()
label = self.obs_to_test.label
if filt(cell):
# retrieve data
array = cell._sdata[label] # two cases: array, or single value
raw_time = cell.data['time']
if len(raw_time) > 1:
dt = np.amin(raw_time[1:] - raw_time[:-1])
else:
dt = cell.container.period
if array is None:
return False
if isinstance(array, collections.Iterable):
if self.tref is None:
# data may be one value (for cycle observables), or array
boo = bounded(array[label], self.lower_bound, self.upper_bound)
else:
# find data closest to tref (-> round to closest time)
# for now return closest time to tref
index = np.argmin(np.abs(array['time'] - self.tref))
# check that it's really close:
if np.abs(array['time'][index] - self.tref) < dt:
value = array[label][index]
boo = bounded(value, self.lower_bound, self.upper_bound)
# otherwise it's a number
else:
boo = bounded(array, self.lower_bound, self.upper_bound)
return boo
# useless ?
class FilterLengthIncrement(FilterCell):
"Check increments are bounded."
def __init__(self, lower_bound=None, upper_bound=None):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Length increments between two successive frames: '
label += '{0} <= delta_length <= {1}'.format(self.lower_bound,
self.upper_bound)
return
def func(self, cell):
boo = False
filtData = FilterData()
if filtData(cell):
ell = np.array(cell.data['length'])
incr = multiplicative_increments(ell)
lower = bounded(np.amin(incr), lower_bound=self.lower_bound)
upper = bounded(np.amax(incr), upper_bound=self.upper_bound)
boo = lower and upper
return boo
class FilterSymmetricDivision(FilterCell):
"""Check that cell division is (roughly) symmetric.
Parameters
----------
raw : str
column label of raw observable to test for symmetric division
(usually one of 'length', 'area'). This quantity will be approximated
"""
def __init__(self, raw='area', lower_bound=0.4, upper_bound=0.6):
self.raw = raw
# Observable to be computed: raw at birth, raw at division
# hidden _obs because not part of parameters, but should be computed
self._obs = [Observable(raw=raw, scale='log', mode='birth', timing='b'),
Observable(raw=raw, scale='log', mode='division',
timing='d')]
self.lower_bound = lower_bound
self.upper_bound = upper_bound
label = 'Symmetric division filter:'
ratio_str = '(daughter cell {})/(mother cell {})'.format(raw, raw)
label += ' {} <= {} <= {}'.format(self.lower_bound,
ratio_str,
self.upper_bound)
# label += 'OR (in case mother cell data is missing) '
# label += '{0} <= (daughter cell area)/(sister cell area) <= {1}\
# '.format(self.lower_bound/self.upper_bound,
# self.upper_bound/self.lower_bound)
self.label = label
return
def func(self, cell):
boo = False
filtData = FilterData()
if cell.parent is None:
# birth is not reported, impossible to test, cannot exclude from data
boo = True
else:
if filtData(cell):
csize = cell._sdata[self._obs[0].label]
if filtData(cell.parent):
psize = cell.parent._sdata[self._obs[1].label]
boo = bounded(csize/psize,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
else:
# parent exists, but without data.
# this is a weird scenario, that should not exist
# TODO: warn user?
# but we can check with sibling
sibs = copy.deepcopy(cell.parent.childs)
for item in sibs:
if item.identifier == cell.identifier:
sibs.remove(item)
if sibs:
if len(sibs) > 1:
from ..base.cell import CellChildsError
raise CellChildsError('>2 daughters')
sib = sibs[0] # there should be only one cell
if sib.data is not None:
sibsize = sib._sdata[self._obs[0].label()]
boo = bounded(csize/sibsize,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound
)
else:
boo = True # sibling cell: no data, accept this cell
else:
boo = True # no sibling, accept this cell
return boo
|
[
"tunacell.filters.main.included",
"copy.deepcopy",
"numpy.abs",
"numpy.amin",
"tunacell.base.datatools.multiplicative_increments",
"tunacell.filters.main.bounded",
"tunacell.base.observable.Observable",
"numpy.amax",
"numpy.array"
] |
[((6363, 6391), 'tunacell.base.observable.Observable', 'Observable', ([], {'name': '"""undefined"""'}), "(name='undefined')\n", (6373, 6391), False, 'from tunacell.base.observable import Observable\n'), ((4970, 5062), 'tunacell.filters.main.included', 'included', (["cell.data['time']"], {'lower_bound': 'self.lower_bound', 'upper_bound': 'self.upper_bound'}), "(cell.data['time'], lower_bound=self.lower_bound, upper_bound=self.\n upper_bound)\n", (4978, 5062), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((9303, 9332), 'numpy.array', 'np.array', (["cell.data['length']"], {}), "(cell.data['length'])\n", (9311, 9332), True, 'import numpy as np\n'), ((9352, 9382), 'tunacell.base.datatools.multiplicative_increments', 'multiplicative_increments', (['ell'], {}), '(ell)\n', (9377, 9382), False, 'from tunacell.base.datatools import multiplicative_increments\n'), ((10143, 10201), 'tunacell.base.observable.Observable', 'Observable', ([], {'raw': 'raw', 'scale': '"""log"""', 'mode': '"""birth"""', 'timing': '"""b"""'}), "(raw=raw, scale='log', mode='birth', timing='b')\n", (10153, 10201), False, 'from tunacell.base.observable import Observable\n'), ((10224, 10285), 'tunacell.base.observable.Observable', 'Observable', ([], {'raw': 'raw', 'scale': '"""log"""', 'mode': '"""division"""', 'timing': '"""d"""'}), "(raw=raw, scale='log', mode='division', timing='d')\n", (10234, 10285), False, 'from tunacell.base.observable import Observable\n'), ((7679, 7716), 'numpy.amin', 'np.amin', (['(raw_time[1:] - raw_time[:-1])'], {}), '(raw_time[1:] - raw_time[:-1])\n', (7686, 7716), True, 'import numpy as np\n'), ((8654, 8704), 'tunacell.filters.main.bounded', 'bounded', (['array', 'self.lower_bound', 'self.upper_bound'], {}), '(array, self.lower_bound, self.upper_bound)\n', (8661, 8704), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((9411, 9424), 'numpy.amin', 'np.amin', (['incr'], {}), '(incr)\n', (9418, 9424), True, 'import numpy as np\n'), ((9484, 9497), 'numpy.amax', 'np.amax', (['incr'], {}), '(incr)\n', (9491, 9497), True, 'import numpy as np\n'), ((8035, 8092), 'tunacell.filters.main.bounded', 'bounded', (['array[label]', 'self.lower_bound', 'self.upper_bound'], {}), '(array[label], self.lower_bound, self.upper_bound)\n', (8042, 8092), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((11446, 11533), 'tunacell.filters.main.bounded', 'bounded', (['(csize / psize)'], {'lower_bound': 'self.lower_bound', 'upper_bound': 'self.upper_bound'}), '(csize / psize, lower_bound=self.lower_bound, upper_bound=self.\n upper_bound)\n', (11453, 11533), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((11895, 11928), 'copy.deepcopy', 'copy.deepcopy', (['cell.parent.childs'], {}), '(cell.parent.childs)\n', (11908, 11928), False, 'import copy\n'), ((8286, 8319), 'numpy.abs', 'np.abs', (["(array['time'] - self.tref)"], {}), "(array['time'] - self.tref)\n", (8292, 8319), True, 'import numpy as np\n'), ((8396, 8436), 'numpy.abs', 'np.abs', (["(array['time'][index] - self.tref)"], {}), "(array['time'][index] - self.tref)\n", (8402, 8436), True, 'import numpy as np\n'), ((8525, 8575), 'tunacell.filters.main.bounded', 'bounded', (['value', 'self.lower_bound', 'self.upper_bound'], {}), '(value, self.lower_bound, self.upper_bound)\n', (8532, 8575), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n'), ((12506, 12595), 'tunacell.filters.main.bounded', 'bounded', (['(csize / sibsize)'], {'lower_bound': 'self.lower_bound', 'upper_bound': 'self.upper_bound'}), '(csize / sibsize, lower_bound=self.lower_bound, upper_bound=self.\n upper_bound)\n', (12513, 12595), False, 'from tunacell.filters.main import FilterGeneral, bounded, included, FilterAND\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 18:04:26 2020
@author: hp
"""
import pandas as pd
import numpy as np
ratings= pd.read_csv('ratings.csv')
movies= pd.read_csv(r'movies.csv' )
ts = ratings['timestamp']
ts = pd.to_datetime(ts, unit = 's').dt.hour
movies['hours'] = ts
merged = ratings.merge(movies, left_on = 'movieId' , right_on = 'movieId', suffixes = ['_user',''])
merged = merged[['userId', 'movieId','genres','hours']]
merged = pd.concat([merged,merged['genres'].str.get_dummies(sep = '|')], axis = 1)
del merged['genres']
del merged['(no genres listed)']
def activateuserprofile(userId):
userprofile = merged.loc[merged['userId'] == userId]
del userprofile ['userId']
del userprofile['movieId']
userprofile = userprofile.groupby(['hours'], as_index = False, sort =True).sum()
userprofile.iloc[:,1:20] = userprofile.iloc[:,1:20].apply(lambda x:(x - np.min(x))/(np.max(x)-np.min(x)),axis = 1)
return(userprofile)
activeuser = activateuserprofile(30)
recommend = movies= pd.read_csv(r'recommend.csv' )
del merged['userId']
del merged['rating']
merged = merged.drop_duplicate()
user_pref = recommend.merge(merged, left_on = 'movieId' , right_on = 'movieId', suffixes = ['_user',''])
product = np.dot(user_pref.iloc[:,2:21].as_matrix(), activeuser.iloc[21,2:21].as_matrix())#IndexError: single positional indexer is out-of-bounds
preferences = np.stack((user_pref['movieId'], product), axis =-1)
df = pd.DataFrame(preferences, columns = ['movieId', 'prefrernces'])
result = (df.sort_values(['preferences'], ascending = False).iloc[0:10],0)
|
[
"pandas.DataFrame",
"numpy.stack",
"pandas.read_csv",
"numpy.min",
"numpy.max",
"pandas.to_datetime"
] |
[((141, 167), 'pandas.read_csv', 'pd.read_csv', (['"""ratings.csv"""'], {}), "('ratings.csv')\n", (152, 167), True, 'import pandas as pd\n'), ((177, 202), 'pandas.read_csv', 'pd.read_csv', (['"""movies.csv"""'], {}), "('movies.csv')\n", (188, 202), True, 'import pandas as pd\n'), ((1461, 1489), 'pandas.read_csv', 'pd.read_csv', (['"""recommend.csv"""'], {}), "('recommend.csv')\n", (1472, 1489), True, 'import pandas as pd\n'), ((1845, 1895), 'numpy.stack', 'np.stack', (["(user_pref['movieId'], product)"], {'axis': '(-1)'}), "((user_pref['movieId'], product), axis=-1)\n", (1853, 1895), True, 'import numpy as np\n'), ((1904, 1965), 'pandas.DataFrame', 'pd.DataFrame', (['preferences'], {'columns': "['movieId', 'prefrernces']"}), "(preferences, columns=['movieId', 'prefrernces'])\n", (1916, 1965), True, 'import pandas as pd\n'), ((239, 267), 'pandas.to_datetime', 'pd.to_datetime', (['ts'], {'unit': '"""s"""'}), "(ts, unit='s')\n", (253, 267), True, 'import pandas as pd\n'), ((926, 935), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (932, 935), True, 'import numpy as np\n'), ((938, 947), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (944, 947), True, 'import numpy as np\n'), ((948, 957), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (954, 957), True, 'import numpy as np\n')]
|
# A neural network which approximates linear function y = 2x + 3.
# The network has 1 layer with 1 node, which has 1 input (and a bias).
# As there is no activation effectively this node is a linear function.
# After +/- 10.000 iterations W should be close to 2 and B should be close to 3.
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(formatter={"float": "{: 0.3f}".format}, linewidth=np.inf)
np.random.seed(1)
X = np.array([[0], [1], [2], [3], [4]]) # X = input (here: 5 values)
Y = 2 * X + 3 # Y = output: y = 2x + 3 (as many values as there are X's)
W = np.random.normal(scale=0.1, size=(1, 1)) # layer: (1, 1) = 1 node with 1 input
B = np.random.normal(scale=0.1, size=(1, 1)) # bias: (1, 1) = for 1 node (and by definition only 1 bias value per node)
learning_rate = 0.001
iterations = 10000
error = []
print("initial :", "W =", W, "B =", B, "(random initialization)")
m = X.shape[0]
for _ in range(iterations):
# forward pass
a = W.dot(X.T) + B
# back propagation
da = a - Y.T # da = error
dz = da # no activation
dw = dz.dot(X) / m
db = np.sum(dz, axis=1, keepdims=True) / m
W -= learning_rate * dw
B -= learning_rate * db
error.append(np.average(da ** 2))
print("result :", "W =", W, "B =", B, "(after {} iterations)".format(iterations))
print("expected: W = 2, B = 3")
plt.plot(range(iterations), error)
plt.title("MSE (mean squared error)")
plt.xlabel("training iterations")
plt.ylabel("mse")
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.set_printoptions",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.average",
"numpy.array",
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((343, 420), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{: 0.3f}'.format}", 'linewidth': 'np.inf'}), "(formatter={'float': '{: 0.3f}'.format}, linewidth=np.inf)\n", (362, 420), True, 'import numpy as np\n'), ((421, 438), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (435, 438), True, 'import numpy as np\n'), ((444, 479), 'numpy.array', 'np.array', (['[[0], [1], [2], [3], [4]]'], {}), '([[0], [1], [2], [3], [4]])\n', (452, 479), True, 'import numpy as np\n'), ((589, 629), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.1)', 'size': '(1, 1)'}), '(scale=0.1, size=(1, 1))\n', (605, 629), True, 'import numpy as np\n'), ((673, 713), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.1)', 'size': '(1, 1)'}), '(scale=0.1, size=(1, 1))\n', (689, 713), True, 'import numpy as np\n'), ((1399, 1436), 'matplotlib.pyplot.title', 'plt.title', (['"""MSE (mean squared error)"""'], {}), "('MSE (mean squared error)')\n", (1408, 1436), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1470), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""training iterations"""'], {}), "('training iterations')\n", (1447, 1470), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1488), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mse"""'], {}), "('mse')\n", (1481, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1499), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1497, 1499), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1146), 'numpy.sum', 'np.sum', (['dz'], {'axis': '(1)', 'keepdims': '(True)'}), '(dz, axis=1, keepdims=True)\n', (1119, 1146), True, 'import numpy as np\n'), ((1226, 1245), 'numpy.average', 'np.average', (['(da ** 2)'], {}), '(da ** 2)\n', (1236, 1245), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
from numpy import log10 as lg
from numpy import pi as pi
from scipy.interpolate import interp1d as sp_interp1d
from scipy.integrate import odeint
from scipy.integrate import ode
import warnings
import timeit
import scipy.optimize as opt
from matplotlib import cm
from astropy import constants as const
from astropy import units as u
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
G=const.G.cgs.value
c=const.c.cgs.value
Ms=const.M_sun.cgs.value
hbar=const.hbar.cgs.value
m_n=const.m_n.cgs.value
km=10**5
import matplotlib.font_manager as font_manager
plt.rcParams['xtick.labelsize'] = 25
plt.rcParams['ytick.labelsize'] = 25
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.major.size'] = 8
plt.rcParams['ytick.major.size'] = 8
plt.rcParams['xtick.minor.size'] = 4
plt.rcParams['ytick.minor.size'] = 4
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['axes.labelpad'] = 8.0
plt.rcParams['figure.constrained_layout.h_pad'] = 0
plt.rcParams['text.usetex'] = True
plt.rc('text', usetex=True)
plt.rcParams['font.sans-serif'] = ['Times New Roman']
plt.tick_params(axis='both', which='minor', labelsize=18)
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
names1= ['m14','m14_5_001','m14_5_1', 'm14_10_001','m14_10_1']
names2=['m20','m20_5_001', 'm20_10_001','m20_10_1']
colors=['black', 'c', 'g', 'orange', 'red', 'black', 'c','orange','red']
linestyle=['-', ':', '-.', '-', '--' ,'-' ,'--' , '-.' ,':']
labels=[r'\rm GR',r'$\xi=5,\,\, a=0.01$', r'$\xi=5,\,\, a=1$',r'$\xi=10,\,\, a=0.01$',r'$\xi=10,\,\, a=1$',r'\rm GR',r'$\xi=5,\,\, a=0.01$',
r'$\xi=10,\,\, a=0.01$',r'$\xi=10,\,\, a=1$']
fig, axs = plt.subplots(2, 2,figsize=(15,12),sharex=True, sharey='row')
plt.subplots_adjust(hspace=0.0)
plt.subplots_adjust(wspace=0)
axs[0,0].yaxis.set_minor_locator(MultipleLocator(0.25/5))
axs[1,0].yaxis.set_minor_locator(MultipleLocator(0.2/5))
axs[0,0].xaxis.set_minor_locator(MultipleLocator(10/5))
for i in range(len(names1)):
data1 = np.genfromtxt('data/'+'sol_'+ 'ap4_'+names1[i]+'.txt')
R, gtt, grr= data1[:,0]/10**5, data1[:,1], data1[:, 2]
axs[1,0].plot(R,gtt,linewidth=2, color=colors[i],linestyle=linestyle[i])
axs[1,0].grid(alpha=0.6)
axs[1,0].set_ylabel(r'$ -g_{tt}$', fontsize=30)
axs[0,0].plot(R,grr,linewidth=2, color=colors[i],linestyle=linestyle[i],label=labels[i])
axs[0,0].grid(alpha=0.6)
axs[0,0].set_ylabel(r'$ g_{rr}$', fontsize=30)
axs[0,0].legend(fontsize=25, frameon=False,loc=(0.37,0.27))
sub_axes = plt.axes([.3, .18, .20, .18])
sub_axes.plot(R,gtt,linewidth=2, color=colors[i],linestyle=linestyle[i])
sub_axes.set_ylim(0.67,0.725)
sub_axes.set_xlim(13.4,14.6)
# sub_axes.set_xticks([10,11,12])
# sub_axes.grid(alpha=0.8)
sub_axes.yaxis.set_minor_locator(MultipleLocator(0.02/5))
sub_axes.xaxis.set_minor_locator(MultipleLocator(0.5/5))
for j in range(len(names2)):
data2 = np.genfromtxt('data/'+'sol_'+ 'ap4_'+names2[j]+'.txt')
R, gtt, grr= data2[:,0]/10**5, data2[:,1], data2[:, 2]
axs[1,1].plot(R,gtt,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5])
axs[1,1].grid(alpha=0.6)
axs[0,1].plot(R,grr,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5],label=labels[j+5])
axs[0,1].grid(alpha=0.6)
axs[0,1].legend(fontsize=25, frameon=False,loc=(0.37,0.4))
sub_axes = plt.axes([.69, .18, .19, .16])
sub_axes.plot(R,gtt,linewidth=2, color=colors[j+5],linestyle=linestyle[j+5])
sub_axes.set_xlim(13.4,14.6)
sub_axes.set_ylim(0.53,0.59)
# sub_axes.set_yticks([6,8,10])
sub_axes.set_yticks([0.54,0.56,0.58])
# sub_axes.grid(alpha=0.8)
sub_axes.yaxis.set_minor_locator(MultipleLocator(0.02/5))
sub_axes.xaxis.set_minor_locator(MultipleLocator(0.5/5))
fig.text(0.48, 0.04, r'$r\,[\rm km]$' ,fontsize=30)
# fig.text(0.7, 0.04, r'$r\,[\rm km]$' ,fontsize=30)
axs[1,0].set_ylim(0.14,0.95)
axs[0,0].set_ylim(0.97,2.35)
axs[0,0].set_xlim(-1,43)
fig.text(0.28, 0.84, r'$M=1.4M_{\odot}$' ,fontsize=25)
fig.text(0.66, 0.84, r'$M=2M_{\odot}$' ,fontsize=25)
plt.savefig("ap41.pdf", format='pdf', bbox_inches="tight")
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.axes",
"numpy.genfromtxt",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.tick_params",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust"
] |
[((1228, 1255), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1234, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1367), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""minor"""', 'labelsize': '(18)'}), "(axis='both', which='minor', labelsize=18)\n", (1325, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1943, 2006), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(15, 12)', 'sharex': '(True)', 'sharey': '"""row"""'}), "(2, 2, figsize=(15, 12), sharex=True, sharey='row')\n", (1955, 2006), True, 'import matplotlib.pyplot as plt\n'), ((2004, 2035), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.0)'}), '(hspace=0.0)\n', (2023, 2035), True, 'import matplotlib.pyplot as plt\n'), ((2036, 2065), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)'}), '(wspace=0)\n', (2055, 2065), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4468), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ap41.pdf"""'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('ap41.pdf', format='pdf', bbox_inches='tight')\n", (4421, 4468), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4477, 4479), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2124), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.25 / 5)'], {}), '(0.25 / 5)\n', (2114, 2124), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((2157, 2181), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.2 / 5)'], {}), '(0.2 / 5)\n', (2172, 2181), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((2214, 2237), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(10 / 5)'], {}), '(10 / 5)\n', (2229, 2237), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((2293, 2354), 'numpy.genfromtxt', 'np.genfromtxt', (["('data/' + 'sol_' + 'ap4_' + names1[i] + '.txt')"], {}), "('data/' + 'sol_' + 'ap4_' + names1[i] + '.txt')\n", (2306, 2354), True, 'import numpy as np\n'), ((2832, 2864), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.3, 0.18, 0.2, 0.18]'], {}), '([0.3, 0.18, 0.2, 0.18])\n', (2840, 2864), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3322), 'numpy.genfromtxt', 'np.genfromtxt', (["('data/' + 'sol_' + 'ap4_' + names2[j] + '.txt')"], {}), "('data/' + 'sol_' + 'ap4_' + names2[j] + '.txt')\n", (3274, 3322), True, 'import numpy as np\n'), ((3696, 3730), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.69, 0.18, 0.19, 0.16]'], {}), '([0.69, 0.18, 0.19, 0.16])\n', (3704, 3730), True, 'import matplotlib.pyplot as plt\n'), ((3114, 3139), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.02 / 5)'], {}), '(0.02 / 5)\n', (3129, 3139), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((3176, 3200), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.5 / 5)'], {}), '(0.5 / 5)\n', (3191, 3200), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((4022, 4047), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.02 / 5)'], {}), '(0.02 / 5)\n', (4037, 4047), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n'), ((4084, 4108), 'matplotlib.ticker.MultipleLocator', 'MultipleLocator', (['(0.5 / 5)'], {}), '(0.5 / 5)\n', (4099, 4108), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split miniImageNET 100 experiment.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import random
import datetime
import collections
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import construct_split_miniImagenet
from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check
from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'RESNET-S', 'RESNET-B', 'VGG']
ARCH = 'RESNET-S'
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'M-EWC', 'S-GEM', 'A-GEM', 'FTR_EXT', 'PNN', 'ER-Reservoir', 'ER-Ringbuffer', 'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP', 'ER-SUBSPACE-GP'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 13
IMG_HEIGHT = 84
IMG_WIDTH = 84
IMG_CHANNELS = 3
TOTAL_CLASSES = 100 # Total number of classes in the dataset
VISUALIZE_IMPORTANCE_MEASURE = False
MEASURE_CONVERGENCE_AFTER = 0.9
EPS_MEM_BATCH_SIZE = 256
DEBUG_EPISODIC_MEMORY = False
K_FOR_CROSS_VAL = 3
TIME_MY_METHOD = False
COUNT_VIOLATONS = False
MEASURE_PERF_ON_EPS_MEMORY = False
## Logging, saving and testing options
LOG_DIR = './split_miniImagenet_results'
RESNET18_miniImageNET10_CHECKPOINT = './resnet-18-pretrained-miniImagenet10/model.ckpt-19999'
DATA_FILE = 'miniImageNet_Dataset/miniImageNet_full.pickle'
## Evaluation options
## Task split
NUM_TASKS = 10
MULTI_TASK = False
PROJECTION_RANK = 50
GRAD_CHECK = False
QR = False
SVB = False
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split miniImagenet experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--maintain-orthogonality", action="store_true",
help="If option is chosen then weights will be projected to Steifel manifold.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Total size of episodic memory.")
parser.add_argument("--eps-mem-batch", type=int, default=EPS_MEM_BATCH_SIZE,
help="Number of samples per class from previous tasks.")
parser.add_argument("--num-tasks", type=int, default=NUM_TASKS,
help="Number of tasks.")
parser.add_argument("--subspace-share-dims", type=int, default=0,
help="Number of dimensions to share across tasks.")
parser.add_argument("--data-file", type=str, default=DATA_FILE,
help="miniImageNet data file.")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, datasets, args):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
if model.imp_method in {'A-GEM', 'ER-Ringbuffer', 'ER-Reservoir', 'ER-SUBSPACE', 'ER-SUBSPACE-GP'}:
use_episodic_memory = True
else:
use_episodic_memory = False
batch_size = args.batch_size
# Loop over number of runs to average over
for runid in range(args.num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(args.random_seed+runid)
random.seed(args.random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
classes_per_task = TOTAL_CLASSES// args.num_tasks
total_classes = classes_per_task * model.num_tasks
if args.online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = args.mem_size * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if use_episodic_memory:
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
count_cls = np.zeros(TOTAL_CLASSES, dtype=np.int32)
episodic_filled_counter = 0
examples_seen_so_far = 0
# Mask for softmax
logit_mask = np.zeros(TOTAL_CLASSES)
nd_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
if COUNT_VIOLATONS:
violation_count = np.zeros(model.num_tasks)
vc = 0
proj_matrices = generate_projection_matrix(model.num_tasks, feature_dim=model.subspace_proj.get_shape()[0], share_dims=args.subspace_share_dims, qr=QR)
# Check the sanity of the generated matrices
unit_test_projection_matrices(proj_matrices)
# TODO: Temp for gradients check
prev_task_grads = []
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0 and model.imp_method != 'PNN'):
model.restore(sess)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
pnn_train_phase[task] = True
pnn_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# If not in the cross validation mode then concatenate the train and validation sets
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
print('Unique labels in the task: {}'.format(np.unique(np.nonzero(task_train_labels)[1])))
# Test for the tasks that we've seen so far
test_labels += task_labels[task]
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
logit_mask[:] = 0
# Train a task observing sequence of data
if args.train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if args.cross_validate_mode:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = args.train_iters
# Set the mask only once before starting the training for the task
logit_mask[task_labels[task]] = 1.0
if MULTI_TASK:
logit_mask[:] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
# Number of iterations after which convergence is checked
convergence_iters = int(num_iters * MEASURE_CONVERGENCE_AFTER)
# Training loop for task T
for iters in range(num_iters):
if args.train_single_epoch and not args.cross_validate_mode and not MULTI_TASK:
if (iters <= 20) or (iters > 20 and iters % 50 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
ftask.append(fbatch)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
elif model.imp_method in {'A-GEM', 'ER-Ringbuffer'}:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
else:
# Set the output labels over which the model needs to be trained
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
if args.train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+residual], model.y_[task]: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5, model.learning_rate: args.learning_rate}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True, model.learning_rate: args.learning_rate}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
residual = batch_size
if model.imp_method == 'PNN':
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_[task]: train_y[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5, model.learning_rate: args.learning_rate}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, pnn_logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
else:
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True, model.learning_rate: args.learning_rate}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PROJ-SUBSPACE-GP':
if task == 0:
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
# Compute gradient in \perp space
logit_mask[:] = 0
for tt in range(task):
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.train_phase] = False
feed_dict[model.subspace_proj] = np.eye(proj_matrices[task].shape[0]) - proj_matrices[task]
sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute gradient in P space and train
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.train_phase] = True
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train_gp, model.gp_total_loss], feed_dict=feed_dict)
reg = 0.0
elif model.imp_method == 'SUBSPACE-PROJ':
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
if args.maintain_orthogonality:
_, loss = sess.run([model.train_stiefel, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PNN':
_, loss = sess.run([model.train[task], model.unweighted_entropy[task]], feed_dict=feed_dict)
elif model.imp_method == 'FTR_EXT':
feed_dict[model.output_mask] = logit_mask
if task == 0:
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_classifier, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC' or model.imp_method == 'M-EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
if (iters >= convergence_iters) and (model.imp_method == 'M-EWC'):
_, _, _, _, loss = sess.run([model.weights_old_ops_grouped, model.set_tmp_fisher, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
else:
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
nd_logit_mask[:] = 0
for tt in range(task):
nd_logit_mask[tt][task_labels[tt]] = 1.0
if episodic_filled_counter <= args.eps_mem_batch:
mem_sample_mask = np.arange(episodic_filled_counter)
else:
# Sample a random subset from episodic memory buffer
mem_sample_mask = np.random.choice(episodic_filled_counter, args.eps_mem_batch, replace=False) # Sample without replacement so that we don't sample an example more than once
# Store the reference gradient
ref_feed_dict = {model.x: episodic_images[mem_sample_mask], model.y_: episodic_labels[mem_sample_mask],
model.keep_prob: 1.0, model.train_phase: True, model.learning_rate: args.learning_rate}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
ref_feed_dict.update(logit_mask_dict)
ref_feed_dict[model.mem_batch_size] = float(len(mem_sample_mask))
sess.run(model.store_ref_grads, feed_dict=ref_feed_dict)
# Compute the gradient for current task and project if need be
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
if COUNT_VIOLATONS:
vc, _, loss = sess.run([model.violation_count, model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
else:
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
# Put the batch in the ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'RWALK':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Update fisher and importance score after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
# Update the importance score using distance in riemannian manifold
sess.run(model.update_big_omega_riemann)
# Now that the score is updated, compute the new value for running Fisher
sess.run(model.set_running_fisher)
# Store the current value of the weights
sess.run(model.weights_delta_old_grouped)
# Reset the delta_L
sess.run([model.reset_small_omega])
_, _, _, _, loss = sess.run([model.set_tmp_fisher, model.weights_old_ops_grouped,
model.train, model.update_small_omega, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'ER-Reservoir':
mem_filled_so_far = examples_seen_so_far if (examples_seen_so_far < episodic_mem_size) else episodic_mem_size
if mem_filled_so_far < args.eps_mem_batch:
er_mem_indices = np.arange(mem_filled_so_far)
else:
er_mem_indices = np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0)
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
labels_in_the_batch = np.unique(np.nonzero(er_train_y_batch)[1])
logit_mask[:] = 0
for tt in range(task+1):
if any(c_lab == t_lab for t_lab in task_labels[tt] for c_lab in labels_in_the_batch):
logit_mask[task_labels[tt]] = 1.0
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.learning_rate: args.learning_rate}
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Reservoir update
for er_x, er_y_ in zip(train_x[offset:offset+residual], train_y[offset:offset+residual]):
update_reservior(er_x, er_y_, episodic_images, episodic_labels, episodic_mem_size, examples_seen_so_far)
examples_seen_so_far += 1
elif model.imp_method == 'ER-Ringbuffer':
# Sample Bn U Bm
mem_filled_so_far = episodic_filled_counter if (episodic_filled_counter <= episodic_mem_size) else episodic_mem_size
er_mem_indices = np.arange(mem_filled_so_far) if (mem_filled_so_far <= args.eps_mem_batch) else np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0) # TODO: Check if for task 0 the first arg is empty
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
# Set the logit masks
nd_logit_mask[:] = 0
for tt in range(task+1):
nd_logit_mask[tt][task_labels[tt]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.learning_rate: args.learning_rate}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = float(er_train_x_batch.shape[0])
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'ER-SUBSPACE':
# Zero out all the grads
sess.run([model.reset_er_subspace_grads])
if task > 0:
# Randomly pick a task to replay
tt = np.squeeze(np.random.choice(np.arange(task), 1, replace=False))
mem_offset = tt*args.mem_size*classes_per_task
er_mem_indices = np.arange(mem_offset, mem_offset+args.mem_size*classes_per_task)
np.random.shuffle(er_mem_indices)
er_train_x_batch = episodic_images[er_mem_indices]
er_train_y_batch = episodic_labels[er_mem_indices]
feed_dict = {model.x: er_train_x_batch, model.y_: er_train_y_batch,
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.task_id: task+1, model.learning_rate: args.learning_rate}
logit_mask[:] = 0
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[tt]
sess.run(model.accum_er_subspace_grads, feed_dict=feed_dict)
# Train on the current task
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.train_phase: True, model.task_id: task+1, model.learning_rate: args.learning_rate}
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.subspace_proj] = proj_matrices[task]
if args.maintain_orthogonality:
if SVB:
_, _, loss = sess.run([model.train_er_subspace, model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
# Every few iterations bound the singular values
if iters % 20 == 0:
sess.run(model.update_weights_svb)
else:
_, loss = sess.run([model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
sess.run(model.train_stiefel, feed_dict={model.learning_rate: args.learning_rate})
else:
_, _, loss = sess.run([model.train_er_subspace, model.accum_er_subspace_grads, model.reg_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
elif model.imp_method == 'ER-SUBSPACE-GP':
# Zero out all the grads
sess.run([model.reset_er_subspace_gp_grads])
feed_dict = {model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 1.0,
model.task_id: task+1, model.learning_rate: args.learning_rate, model.train_phase: True}
if task > 0:
# Randomly pick a task to replay
tt = np.squeeze(np.random.choice(np.arange(task), 1, replace=False))
mem_offset = tt*args.mem_size*classes_per_task
er_mem_indices = np.arange(mem_offset, mem_offset+args.mem_size*classes_per_task)
np.random.shuffle(er_mem_indices)
er_train_x_batch = episodic_images[er_mem_indices]
er_train_y_batch = episodic_labels[er_mem_indices]
logit_mask[:] = 0
logit_mask[task_labels[tt]] = 1.0
feed_dict[model.output_mask] = logit_mask
feed_dict[model.x] = er_train_x_batch
feed_dict[model.y_] = er_train_y_batch
# Compute the gradient in the \perp space
#feed_dict[model.subspace_proj] = np.eye(proj_matrices[tt].shape[0]) - proj_matrices[tt]
#sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute the gradient in P space and store the gradient
feed_dict[model.subspace_proj] = proj_matrices[tt]
sess.run(model.accum_er_subspace_grads, feed_dict=feed_dict)
# Train on the current task
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
if task == 0:
feed_dict[model.x] = train_x[offset:offset+residual]
feed_dict[model.y_] = train_y[offset:offset+residual]
else:
# Sample Bn U Bm
mem_filled_so_far = episodic_filled_counter if (episodic_filled_counter <= episodic_mem_size) else episodic_mem_size
er_mem_indices = np.arange(mem_filled_so_far) if (mem_filled_so_far <= args.eps_mem_batch) else np.random.choice(mem_filled_so_far, args.eps_mem_batch, replace=False)
np.random.shuffle(er_mem_indices)
er_train_x_batch = np.concatenate((episodic_images[er_mem_indices], train_x[offset:offset+residual]), axis=0) # TODO: Check if for task 0 the first arg is empty
er_train_y_batch = np.concatenate((episodic_labels[er_mem_indices], train_y[offset:offset+residual]), axis=0)
feed_dict[model.x] = er_train_x_batch
feed_dict[model.y_] = er_train_y_batch
# Compute the gradient in the \perp space
feed_dict[model.subspace_proj] = np.eye(proj_matrices[tt].shape[0]) - proj_matrices[task]
sess.run(model.store_ref_grads, feed_dict=feed_dict)
# Compute the gradient in P space and store the gradient
feed_dict[model.x] = train_x[offset:offset+residual]
feed_dict[model.y_] = train_y[offset:offset+residual]
feed_dict[model.subspace_proj] = proj_matrices[task]
_, loss = sess.run([model.train_er_gp, model.gp_total_loss], feed_dict=feed_dict)
# Put the batch in the FIFO ring buffer
update_fifo_buffer(train_x[offset:offset+residual], train_y[offset:offset+residual], episodic_images, episodic_labels,
task_labels[task], args.mem_size, count_cls, episodic_filled_counter)
if (iters % 100 == 0):
print('Step {:d} {:.3f}'.format(iters, loss))
#print('Step {:d}\t CE: {:.3f}\t Reg: {:.9f}\t TL: {:.3f}'.format(iters, entropy, reg, loss))
#print('Step {:d}\t Reg: {:.9f}\t TL: {:.3f}'.format(iters, reg, loss))
if (math.isnan(loss)):
print('ERROR: NaNs NaNs NaNs!!!')
sys.exit(0)
print('\t\t\t\tTraining for Task%d done!'%(task))
if model.imp_method == 'SUBSPACE-PROJ' and GRAD_CHECK:
# TODO: Compute the average gradient of the task at \theta^*: Could be done as running average (no need for extra passes?)
bbatch_size = 100
grad_sum = []
for iiters in range(train_x.shape[0] // bbatch_size):
offset = iiters * bbatch_size
feed_dict = {model.x: train_x[offset:offset+bbatch_size], model.y_: train_y[offset:offset+bbatch_size],
model.keep_prob: 1.0, model.train_phase: False, model.learning_rate: args.learning_rate}
nd_logit_mask[:] = 0
nd_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, nd_logit_mask)}
feed_dict.update(logit_mask_dict)
#feed_dict[model.subspace_proj] = proj_matrices[task]
projection_dict = {proj: proj_matrices[proj.get_shape()[0]][task] for proj in model.subspace_proj}
feed_dict.update(projection_dict)
feed_dict[model.mem_batch_size] = residual
grad_vars, train_vars = sess.run([model.reg_gradients_vars, model.trainable_vars], feed_dict=feed_dict)
for v in range(len(train_vars)):
if iiters == 0:
grad_sum.append(grad_vars[v][0])
else:
grad_sum[v] += (grad_vars[v][0] - grad_sum[v])/ iiters
prev_task_grads.append(grad_sum)
if use_episodic_memory:
episodic_filled_counter += args.mem_size * classes_per_task
if model.imp_method == 'A-GEM':
if COUNT_VIOLATONS:
violation_count[task] = vc
print('Task {}: Violation Count: {}'.format(task, violation_count))
sess.run(model.reset_violation_count, feed_dict=feed_dict)
# Compute the inter-task updates, Fisher/ importance scores etc
# Don't calculate the task updates for the last task
if (task < (len(task_labels) - 1)) or MEASURE_PERF_ON_EPS_MEMORY:
model.task_updates(sess, task, task_train_images, task_labels[task]) # TODO: For MAS, should the gradients be for current task or all the previous tasks
print('\t\t\t\tTask updates after Task%d done!'%(task))
if args.train_single_epoch and not args.cross_validate_mode:
fbatch = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
print('Task: {}, Acc: {}'.format(task, fbatch))
ftask.append(fbatch)
ftask = np.array(ftask)
if model.imp_method == 'PNN':
pnn_train_phase[:] = False
pnn_train_phase[task] = True
pnn_logit_mask[:] = 0
pnn_logit_mask[task][task_labels[task]] = 1.0
else:
if MEASURE_PERF_ON_EPS_MEMORY:
eps_mem = {
'images': episodic_images,
'labels': episodic_labels,
}
# Measure perf on episodic memory
ftask = test_task_sequence(model, sess, eps_mem, task_labels, task, classes_per_task=classes_per_task, projection_matrices=proj_matrices)
else:
# List to store accuracy for all the tasks for the current trained model
ftask = test_task_sequence(model, sess, datasets[0]['test'], task_labels, task, projection_matrices=proj_matrices)
print('Task: {}, Acc: {}'.format(task, ftask))
# Store the accuracies computed at task T in a list
evals.append(ftask)
# Reset the optimizer
model.reset_optimizer(sess)
#-> End for loop task
runs.append(np.array(evals))
# End for loop runid
runs = np.array(runs)
return runs, task_labels_dataset
def test_task_sequence(model, sess, test_data, test_tasks, task, classes_per_task=0, projection_matrices=None):
"""
Snapshot the current performance
"""
if TIME_MY_METHOD:
# Only compute the training time
return np.zeros(model.num_tasks)
final_acc = np.zeros(model.num_tasks)
if model.imp_method in {'PNN', 'A-GEM', 'ER-Ringbuffer'}:
logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
else:
logit_mask = np.zeros(TOTAL_CLASSES)
if MEASURE_PERF_ON_EPS_MEMORY:
for tt, labels in enumerate(test_tasks):
# Multi-head evaluation setting
logit_mask[:] = 0
logit_mask[labels] = 1.0
mem_offset = tt*SAMPLES_PER_CLASS*classes_per_task
feed_dict = {model.x: test_data['images'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task],
model.y_: test_data['labels'][mem_offset:mem_offset+SAMPLES_PER_CLASS*classes_per_task], model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
for tt, labels in enumerate(test_tasks):
if not MULTI_TASK:
if tt > task:
return final_acc
task_test_images, task_test_labels = load_task_specific_data(test_data, labels)
if model.imp_method == 'PNN':
pnn_train_phase = np.array(np.zeros(model.num_tasks), dtype=np.bool)
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_[tt]: task_test_labels, model.keep_prob: 1.0}
train_phase_dict = {m_t: i_t for (m_t, i_t) in zip(model.train_phase, pnn_train_phase)}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(train_phase_dict)
feed_dict.update(logit_mask_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
elif model.imp_method in {'A-GEM', 'ER-Ringbuffer'}:
logit_mask[:] = 0
logit_mask[tt][labels] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False}
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, logit_mask)}
feed_dict.update(logit_mask_dict)
#if model.imp_method in {'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP'}:
if False:
feed_dict[model.subspace_proj] = projection_matrices[tt]
#feed_dict[model.subspace_proj] = np.eye(projection_matrices[tt].shape[0])
#projection_dict = {proj: projection_matrices[proj.get_shape()[0]][tt] for proj in model.subspace_proj}
#feed_dict.update(projection_dict)
acc = model.accuracy[tt].eval(feed_dict = feed_dict)
else:
logit_mask[:] = 0
logit_mask[labels] = 1.0
#for ttt in range(task+1):
# logit_mask[test_tasks[ttt]] = 1.0
feed_dict = {model.x: task_test_images,
model.y_: task_test_labels, model.keep_prob: 1.0, model.train_phase: False, model.output_mask: logit_mask}
if model.imp_method in {'SUBSPACE-PROJ', 'ER-SUBSPACE', 'PROJ-SUBSPACE-GP', 'ER-SUBSPACE-GP'}:
feed_dict[model.subspace_proj] = projection_matrices[tt]
acc = model.accuracy.eval(feed_dict = feed_dict)
final_acc[tt] = acc
return final_acc
def main():
"""
Create the model and start the training
"""
# Get the CL arguments
args = get_arguments()
# Check if the network architecture is valid
if args.arch not in VALID_ARCHS:
raise ValueError("Network architecture %s is not supported!"%(args.arch))
# Check if the method to compute importance is valid
if args.imp_method not in MODELS:
raise ValueError("Importance measure %s is undefined!"%(args.imp_method))
# Check if the optimizer is valid
if args.optim not in VALID_OPTIMS:
raise ValueError("Optimizer %s is undefined!"%(args.optim))
# Create log directories to store the results
if not os.path.exists(args.log_dir):
print('Log directory %s created!'%(args.log_dir))
os.makedirs(args.log_dir)
# Generate the experiment key and store the meta data in a file
exper_meta_data = {'ARCH': args.arch,
'DATASET': 'SPLIT_miniImageNET',
'NUM_RUNS': args.num_runs,
'TRAIN_SINGLE_EPOCH': args.train_single_epoch,
'IMP_METHOD': args.imp_method,
'SYNAP_STGTH': args.synap_stgth,
'FISHER_EMA_DECAY': args.fisher_ema_decay,
'FISHER_UPDATE_AFTER': args.fisher_update_after,
'OPTIM': args.optim,
'LR': args.learning_rate,
'BATCH_SIZE': args.batch_size,
'MEM_SIZE': args.mem_size}
experiment_id = "SPLIT_miniImageNET_META_%s_%s_%r_%s-"%(args.imp_method, str(args.synap_stgth).replace('.', '_'),
str(args.batch_size), str(args.mem_size)) + datetime.datetime.now().strftime("%y-%m-%d-%H-%M")
snapshot_experiment_meta_data(args.log_dir, experiment_id, exper_meta_data)
# Get the task labels from the total number of tasks and full label space
if args.online_cross_val:
num_tasks = K_FOR_CROSS_VAL
else:
num_tasks = args.num_tasks - K_FOR_CROSS_VAL
# Load the split miniImagenet dataset
data_labs = [np.arange(TOTAL_CLASSES)]
datasets = construct_split_miniImagenet(data_labs, args.data_file)
# Variables to store the accuracies and standard deviations of the experiment
acc_mean = dict()
acc_std = dict()
# Reset the default graph
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# Set the random seed
tf.set_random_seed(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
# Define Input and Output of the model
x = tf.placeholder(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
learning_rate = tf.placeholder(dtype=tf.float32, shape=())
if args.imp_method == 'PNN':
y_ = []
for i in range(num_tasks):
y_.append(tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES]))
else:
y_ = tf.placeholder(tf.float32, shape=[None, TOTAL_CLASSES])
# Define the optimizer
if args.optim == 'ADAM':
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif args.optim == 'SGD':
opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif args.optim == 'MOMENTUM':
#base_lr = tf.constant(args.learning_rate)
#learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - train_step / training_iters), OPT_POWER))
opt = tf.train.MomentumOptimizer(learning_rate, OPT_MOMENTUM)
# Create the Model/ contruct the graph
model = Model(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.fisher_update_after,
args.fisher_ema_decay, learning_rate, network_arch=args.arch)
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
time_start = time.time()
with tf.Session(config=config, graph=graph) as sess:
runs, task_labels_dataset = train_task_sequence(model, sess, datasets, args)
# Close the session
sess.close()
time_end = time.time()
time_spent = time_end - time_start
# Store all the results in one dictionary to process later
exper_acc = dict(mean=runs)
exper_labels = dict(labels=task_labels_dataset)
# If cross-validation flag is enabled, store the stuff in a text file
if args.cross_validate_mode:
acc_mean, acc_std = average_acc_stats_across_runs(runs, model.imp_method)
fgt_mean, fgt_std = average_fgt_stats_across_runs(runs, model.imp_method)
cross_validate_dump_file = args.log_dir + '/' + 'SPLIT_miniImageNET_%s_%s'%(args.imp_method, args.optim) + '.txt'
with open(cross_validate_dump_file, 'a') as f:
if MULTI_TASK:
f.write('HERDING: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t ACC: {}\n'.format(args.arch, args.learning_rate, args.synap_stgth, acc_mean[-1,:].mean()))
else:
f.write('ORTHO:{} \t SVB:{}\t NUM_TASKS: {} \t MEM_SIZE: {} \t ARCH: {} \t LR:{} \t LAMBDA: {} \t SHARED_SUBSPACE:{}, \t ACC: {} (+-{})\t Fgt: {} (+-{})\t QR:{}\t Time: {}\n'.format(args.maintain_orthogonality, SVB, args.num_tasks, args.mem_size, args.arch, args.learning_rate,
args.synap_stgth, args.subspace_share_dims, acc_mean, acc_std, fgt_mean, fgt_std, QR, str(time_spent)))
# Store the experiment output to a file
snapshot_experiment_eval(args.log_dir, experiment_id, exper_acc)
snapshot_task_labels(args.log_dir, experiment_id, exper_labels)
if __name__ == '__main__':
main()
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.utils.average_acc_stats_across_runs",
"tensorflow.reset_default_graph",
"numpy.ones",
"utils.utils.average_fgt_stats_across_runs",
"tensorflow.ConfigProto",
"numpy.arange",
"utils.vis_utils.snapshot_experiment_meta_data",
"utils.data_utils.construct_split_miniImagenet",
"utils.vis_utils.snapshot_experiment_eval",
"os.path.join",
"os.path.exists",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"random.seed",
"numpy.random.choice",
"datetime.datetime.now",
"numpy.random.shuffle",
"utils.utils.load_task_specific_data",
"math.isnan",
"utils.utils.update_reservior",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.MomentumOptimizer",
"tensorflow.Graph",
"tensorflow.train.GradientDescentOptimizer",
"sys.exit",
"numpy.concatenate",
"utils.utils.unit_test_projection_matrices",
"os.makedirs",
"utils.vis_utils.snapshot_task_labels",
"model.Model",
"numpy.zeros",
"time.time",
"numpy.nonzero",
"utils.utils.update_fifo_buffer",
"numpy.array",
"numpy.eye",
"tensorflow.train.AdamOptimizer"
] |
[((3393, 3425), 'os.path.join', 'os.path.join', (['logdir', 'model_name'], {}), '(logdir, model_name)\n', (3405, 3425), False, 'import os\n'), ((4050, 4135), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for split miniImagenet experiment."""'}), "(description='Script for split miniImagenet experiment.'\n )\n", (4073, 4135), False, 'import argparse\n'), ((42224, 42238), 'numpy.array', 'np.array', (['runs'], {}), '(runs)\n', (42232, 42238), True, 'import numpy as np\n'), ((42565, 42590), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (42573, 42590), True, 'import numpy as np\n'), ((47575, 47650), 'utils.vis_utils.snapshot_experiment_meta_data', 'snapshot_experiment_meta_data', (['args.log_dir', 'experiment_id', 'exper_meta_data'], {}), '(args.log_dir, experiment_id, exper_meta_data)\n', (47604, 47650), False, 'from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels\n'), ((47960, 48015), 'utils.data_utils.construct_split_miniImagenet', 'construct_split_miniImagenet', (['data_labs', 'args.data_file'], {}), '(data_labs, args.data_file)\n', (47988, 48015), False, 'from utils.data_utils import construct_split_miniImagenet\n'), ((48177, 48201), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (48199, 48201), True, 'import tensorflow as tf\n'), ((48215, 48225), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (48223, 48225), True, 'import tensorflow as tf\n'), ((51366, 51430), 'utils.vis_utils.snapshot_experiment_eval', 'snapshot_experiment_eval', (['args.log_dir', 'experiment_id', 'exper_acc'], {}), '(args.log_dir, experiment_id, exper_acc)\n', (51390, 51430), False, 'from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels\n'), ((51435, 51498), 'utils.vis_utils.snapshot_task_labels', 'snapshot_task_labels', (['args.log_dir', 'experiment_id', 'exper_labels'], {}), '(args.log_dir, experiment_id, exper_labels)\n', (51455, 51498), False, 'from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels\n'), ((3437, 3459), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (3451, 3459), False, 'import os\n'), ((3467, 3486), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (3478, 3486), False, 'import os\n'), ((8308, 8348), 'numpy.random.seed', 'np.random.seed', (['(args.random_seed + runid)'], {}), '(args.random_seed + runid)\n', (8322, 8348), True, 'import numpy as np\n'), ((8355, 8392), 'random.seed', 'random.seed', (['(args.random_seed + runid)'], {}), '(args.random_seed + runid)\n', (8366, 8392), False, 'import random\n'), ((8882, 8912), 'numpy.random.shuffle', 'np.random.shuffle', (['label_array'], {}), '(label_array)\n', (8899, 8912), True, 'import numpy as np\n'), ((10110, 10133), 'numpy.zeros', 'np.zeros', (['TOTAL_CLASSES'], {}), '(TOTAL_CLASSES)\n', (10118, 10133), True, 'import numpy as np\n'), ((10158, 10200), 'numpy.zeros', 'np.zeros', (['[model.num_tasks, TOTAL_CLASSES]'], {}), '([model.num_tasks, TOTAL_CLASSES])\n', (10166, 10200), True, 'import numpy as np\n'), ((10527, 10571), 'utils.utils.unit_test_projection_matrices', 'unit_test_projection_matrices', (['proj_matrices'], {}), '(proj_matrices)\n', (10556, 10571), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((42522, 42547), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (42530, 42547), True, 'import numpy as np\n'), ((42674, 42716), 'numpy.zeros', 'np.zeros', (['[model.num_tasks, TOTAL_CLASSES]'], {}), '([model.num_tasks, TOTAL_CLASSES])\n', (42682, 42716), True, 'import numpy as np\n'), ((42748, 42771), 'numpy.zeros', 'np.zeros', (['TOTAL_CLASSES'], {}), '(TOTAL_CLASSES)\n', (42756, 42771), True, 'import numpy as np\n'), ((43630, 43672), 'utils.utils.load_task_specific_data', 'load_task_specific_data', (['test_data', 'labels'], {}), '(test_data, labels)\n', (43653, 43672), False, 'from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check\n'), ((46608, 46636), 'os.path.exists', 'os.path.exists', (['args.log_dir'], {}), '(args.log_dir)\n', (46622, 46636), False, 'import os\n'), ((46704, 46729), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (46715, 46729), False, 'import os\n'), ((47919, 47943), 'numpy.arange', 'np.arange', (['TOTAL_CLASSES'], {}), '(TOTAL_CLASSES)\n', (47928, 47943), True, 'import numpy as np\n'), ((48294, 48330), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (48312, 48330), True, 'import tensorflow as tf\n'), ((48339, 48371), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (48353, 48371), True, 'import numpy as np\n'), ((48380, 48409), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (48391, 48409), False, 'import random\n'), ((48470, 48547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]'}), '(tf.float32, shape=[None, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])\n', (48484, 48547), True, 'import tensorflow as tf\n'), ((48572, 48614), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '()'}), '(dtype=tf.float32, shape=())\n', (48586, 48614), True, 'import tensorflow as tf\n'), ((49471, 49632), 'model.Model', 'Model', (['x', 'y_', 'num_tasks', 'opt', 'args.imp_method', 'args.synap_stgth', 'args.fisher_update_after', 'args.fisher_ema_decay', 'learning_rate'], {'network_arch': 'args.arch'}), '(x, y_, num_tasks, opt, args.imp_method, args.synap_stgth, args.\n fisher_update_after, args.fisher_ema_decay, learning_rate, network_arch\n =args.arch)\n', (49476, 49632), False, 'from model import Model\n'), ((49712, 49728), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (49726, 49728), True, 'import tensorflow as tf\n'), ((49798, 49809), 'time.time', 'time.time', ([], {}), '()\n', (49807, 49809), False, 'import time\n'), ((50036, 50047), 'time.time', 'time.time', ([], {}), '()\n', (50045, 50047), False, 'import time\n'), ((50375, 50428), 'utils.utils.average_acc_stats_across_runs', 'average_acc_stats_across_runs', (['runs', 'model.imp_method'], {}), '(runs, model.imp_method)\n', (50404, 50428), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((50457, 50510), 'utils.utils.average_fgt_stats_across_runs', 'average_fgt_stats_across_runs', (['runs', 'model.imp_method'], {}), '(runs, model.imp_method)\n', (50486, 50510), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((8676, 8700), 'numpy.arange', 'np.arange', (['total_classes'], {}), '(total_classes)\n', (8685, 8700), True, 'import numpy as np\n'), ((8809, 8874), 'numpy.arange', 'np.arange', (['class_label_offset', '(total_classes + class_label_offset)'], {}), '(class_label_offset, total_classes + class_label_offset)\n', (8818, 8874), True, 'import numpy as np\n'), ((9401, 9434), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9432, 9434), True, 'import tensorflow as tf\n'), ((9777, 9843), 'numpy.zeros', 'np.zeros', (['[episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]'], {}), '([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])\n', (9785, 9843), True, 'import numpy as np\n'), ((9874, 9918), 'numpy.zeros', 'np.zeros', (['[episodic_mem_size, TOTAL_CLASSES]'], {}), '([episodic_mem_size, TOTAL_CLASSES])\n', (9882, 9918), True, 'import numpy as np\n'), ((9943, 9982), 'numpy.zeros', 'np.zeros', (['TOTAL_CLASSES'], {'dtype': 'np.int32'}), '(TOTAL_CLASSES, dtype=np.int32)\n', (9951, 9982), True, 'import numpy as np\n'), ((10260, 10285), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (10268, 10285), True, 'import numpy as np\n'), ((11346, 11410), 'utils.utils.load_task_specific_data', 'load_task_specific_data', (["datasets[0]['train']", 'task_labels[task]'], {}), "(datasets[0]['train'], task_labels[task])\n", (11369, 11410), False, 'from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check\n'), ((12470, 12525), 'numpy.ones', 'np.ones', (['[task_train_labels.shape[0]]'], {'dtype': 'np.float32'}), '([task_train_labels.shape[0]], dtype=np.float32)\n', (12477, 12525), True, 'import numpy as np\n'), ((13263, 13292), 'numpy.arange', 'np.arange', (['num_train_examples'], {}), '(num_train_examples)\n', (13272, 13292), True, 'import numpy as np\n'), ((13305, 13328), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (13322, 13328), True, 'import numpy as np\n'), ((42166, 42181), 'numpy.array', 'np.array', (['evals'], {}), '(evals)\n', (42174, 42181), True, 'import numpy as np\n'), ((48825, 48880), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, TOTAL_CLASSES]'}), '(tf.float32, shape=[None, TOTAL_CLASSES])\n', (48839, 48880), True, 'import tensorflow as tf\n'), ((48964, 49015), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (48986, 49015), True, 'import tensorflow as tf\n'), ((49823, 49861), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config', 'graph': 'graph'}), '(config=config, graph=graph)\n', (49833, 49861), True, 'import tensorflow as tf\n'), ((11154, 11196), 'numpy.zeros', 'np.zeros', (['[model.num_tasks, TOTAL_CLASSES]'], {}), '([model.num_tasks, TOTAL_CLASSES])\n', (11162, 11196), True, 'import numpy as np\n'), ((37859, 37875), 'math.isnan', 'math.isnan', (['loss'], {}), '(loss)\n', (37869, 37875), False, 'import math\n'), ((40894, 40909), 'numpy.array', 'np.array', (['ftask'], {}), '(ftask)\n', (40902, 40909), True, 'import numpy as np\n'), ((43750, 43775), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (43758, 43775), True, 'import numpy as np\n'), ((47520, 47543), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (47541, 47543), False, 'import datetime\n'), ((49069, 49131), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (49102, 49131), True, 'import tensorflow as tf\n'), ((11034, 11059), 'numpy.zeros', 'np.zeros', (['model.num_tasks'], {}), '(model.num_tasks)\n', (11042, 11059), True, 'import numpy as np\n'), ((37952, 37963), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (37960, 37963), False, 'import sys\n'), ((48737, 48792), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, TOTAL_CLASSES]'}), '(tf.float32, shape=[None, TOTAL_CLASSES])\n', (48751, 48792), True, 'import tensorflow as tf\n'), ((49351, 49406), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate', 'OPT_MOMENTUM'], {}), '(learning_rate, OPT_MOMENTUM)\n', (49377, 49406), True, 'import tensorflow as tf\n'), ((11670, 11732), 'utils.utils.load_task_specific_data', 'load_task_specific_data', (["datasets[0]['train']", 'task_labels[t_]'], {}), "(datasets[0]['train'], task_labels[t_])\n", (11693, 11732), False, 'from utils.utils import get_sample_weights, sample_from_dataset, update_episodic_memory, concatenate_datasets, samples_for_each_class, sample_from_dataset_icarl, compute_fgt, load_task_specific_data, grad_check\n'), ((11777, 11836), 'numpy.concatenate', 'np.concatenate', (['(task_train_images, task_tr_images)'], {'axis': '(0)'}), '((task_train_images, task_tr_images), axis=0)\n', (11791, 11836), True, 'import numpy as np\n'), ((11881, 11940), 'numpy.concatenate', 'np.concatenate', (['(task_train_labels, task_tr_labels)'], {'axis': '(0)'}), '((task_train_labels, task_tr_labels), axis=0)\n', (11895, 11940), True, 'import numpy as np\n'), ((12242, 12271), 'numpy.nonzero', 'np.nonzero', (['task_train_labels'], {}), '(task_train_labels)\n', (12252, 12271), True, 'import numpy as np\n'), ((18695, 18731), 'numpy.eye', 'np.eye', (['proj_matrices[task].shape[0]'], {}), '(proj_matrices[task].shape[0])\n', (18701, 18731), True, 'import numpy as np\n'), ((24848, 25049), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (24866, 25049), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((23005, 23039), 'numpy.arange', 'np.arange', (['episodic_filled_counter'], {}), '(episodic_filled_counter)\n', (23014, 23039), True, 'import numpy as np\n'), ((23197, 23273), 'numpy.random.choice', 'np.random.choice', (['episodic_filled_counter', 'args.eps_mem_batch'], {'replace': '(False)'}), '(episodic_filled_counter, args.eps_mem_batch, replace=False)\n', (23213, 23273), True, 'import numpy as np\n'), ((26956, 26989), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (26973, 26989), True, 'import numpy as np\n'), ((27029, 27125), 'numpy.concatenate', 'np.concatenate', (['(episodic_images[er_mem_indices], train_x[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_images[er_mem_indices], train_x[offset:offset +\n residual]), axis=0)\n', (27043, 27125), True, 'import numpy as np\n'), ((27159, 27255), 'numpy.concatenate', 'np.concatenate', (['(episodic_labels[er_mem_indices], train_y[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_labels[er_mem_indices], train_y[offset:offset +\n residual]), axis=0)\n', (27173, 27255), True, 'import numpy as np\n'), ((26769, 26797), 'numpy.arange', 'np.arange', (['mem_filled_so_far'], {}), '(mem_filled_so_far)\n', (26778, 26797), True, 'import numpy as np\n'), ((26865, 26935), 'numpy.random.choice', 'np.random.choice', (['mem_filled_so_far', 'args.eps_mem_batch'], {'replace': '(False)'}), '(mem_filled_so_far, args.eps_mem_batch, replace=False)\n', (26881, 26935), True, 'import numpy as np\n'), ((28208, 28316), 'utils.utils.update_reservior', 'update_reservior', (['er_x', 'er_y_', 'episodic_images', 'episodic_labels', 'episodic_mem_size', 'examples_seen_so_far'], {}), '(er_x, er_y_, episodic_images, episodic_labels,\n episodic_mem_size, examples_seen_so_far)\n', (28224, 28316), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((28803, 28836), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (28820, 28836), True, 'import numpy as np\n'), ((28876, 28972), 'numpy.concatenate', 'np.concatenate', (['(episodic_images[er_mem_indices], train_x[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_images[er_mem_indices], train_x[offset:offset +\n residual]), axis=0)\n', (28890, 28972), True, 'import numpy as np\n'), ((29057, 29153), 'numpy.concatenate', 'np.concatenate', (['(episodic_labels[er_mem_indices], train_y[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_labels[er_mem_indices], train_y[offset:offset +\n residual]), axis=0)\n', (29071, 29153), True, 'import numpy as np\n'), ((30050, 30251), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (30068, 30251), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((27302, 27330), 'numpy.nonzero', 'np.nonzero', (['er_train_y_batch'], {}), '(er_train_y_batch)\n', (27312, 27330), True, 'import numpy as np\n'), ((28633, 28661), 'numpy.arange', 'np.arange', (['mem_filled_so_far'], {}), '(mem_filled_so_far)\n', (28642, 28661), True, 'import numpy as np\n'), ((28712, 28782), 'numpy.random.choice', 'np.random.choice', (['mem_filled_so_far', 'args.eps_mem_batch'], {'replace': '(False)'}), '(mem_filled_so_far, args.eps_mem_batch, replace=False)\n', (28728, 28782), True, 'import numpy as np\n'), ((33252, 33453), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (33270, 33453), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((30738, 30806), 'numpy.arange', 'np.arange', (['mem_offset', '(mem_offset + args.mem_size * classes_per_task)'], {}), '(mem_offset, mem_offset + args.mem_size * classes_per_task)\n', (30747, 30806), True, 'import numpy as np\n'), ((30827, 30860), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (30844, 30860), True, 'import numpy as np\n'), ((37298, 37499), 'utils.utils.update_fifo_buffer', 'update_fifo_buffer', (['train_x[offset:offset + residual]', 'train_y[offset:offset + residual]', 'episodic_images', 'episodic_labels', 'task_labels[task]', 'args.mem_size', 'count_cls', 'episodic_filled_counter'], {}), '(train_x[offset:offset + residual], train_y[offset:offset +\n residual], episodic_images, episodic_labels, task_labels[task], args.\n mem_size, count_cls, episodic_filled_counter)\n', (37316, 37499), False, 'from utils.utils import average_acc_stats_across_runs, average_fgt_stats_across_runs, update_reservior, update_fifo_buffer, generate_projection_matrix, unit_test_projection_matrices\n'), ((34195, 34263), 'numpy.arange', 'np.arange', (['mem_offset', '(mem_offset + args.mem_size * classes_per_task)'], {}), '(mem_offset, mem_offset + args.mem_size * classes_per_task)\n', (34204, 34263), True, 'import numpy as np\n'), ((34284, 34317), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (34301, 34317), True, 'import numpy as np\n'), ((36074, 36107), 'numpy.random.shuffle', 'np.random.shuffle', (['er_mem_indices'], {}), '(er_mem_indices)\n', (36091, 36107), True, 'import numpy as np\n'), ((36151, 36247), 'numpy.concatenate', 'np.concatenate', (['(episodic_images[er_mem_indices], train_x[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_images[er_mem_indices], train_x[offset:offset +\n residual]), axis=0)\n', (36165, 36247), True, 'import numpy as np\n'), ((36336, 36432), 'numpy.concatenate', 'np.concatenate', (['(episodic_labels[er_mem_indices], train_y[offset:offset + residual])'], {'axis': '(0)'}), '((episodic_labels[er_mem_indices], train_y[offset:offset +\n residual]), axis=0)\n', (36350, 36432), True, 'import numpy as np\n'), ((36668, 36702), 'numpy.eye', 'np.eye', (['proj_matrices[tt].shape[0]'], {}), '(proj_matrices[tt].shape[0])\n', (36674, 36702), True, 'import numpy as np\n'), ((30589, 30604), 'numpy.arange', 'np.arange', (['task'], {}), '(task)\n', (30598, 30604), True, 'import numpy as np\n'), ((35900, 35928), 'numpy.arange', 'np.arange', (['mem_filled_so_far'], {}), '(mem_filled_so_far)\n', (35909, 35928), True, 'import numpy as np\n'), ((35979, 36049), 'numpy.random.choice', 'np.random.choice', (['mem_filled_so_far', 'args.eps_mem_batch'], {'replace': '(False)'}), '(mem_filled_so_far, args.eps_mem_batch, replace=False)\n', (35995, 36049), True, 'import numpy as np\n'), ((34046, 34061), 'numpy.arange', 'np.arange', (['task'], {}), '(task)\n', (34055, 34061), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from tilec.fg import dBnudT,get_mix
"""
compute various conversion factors for LFI bandpasses
"""
TCMB = 2.726 # Kelvin
TCMB_uK = 2.726e6 # micro-Kelvin
hplanck = 6.626068e-34 # MKS
kboltz = 1.3806503e-23 # MKS
clight = 299792458.0 # MKS
clight_cmpersec = 2.99792458*1.e10 #speed of light in cm/s
N_freqs = 3
LFI_freqs = []
LFI_freqs.append('030')
LFI_freqs.append('044')
LFI_freqs.append('070')
LFI_freqs_GHz = np.array([30.0, 44.0, 70.0])
LFI_files = []
for i in xrange(N_freqs):
print("----------")
print(LFI_freqs[i])
LFI_files.append('../data/LFI_BANDPASS_F'+LFI_freqs[i]+'_reformat.txt')
LFI_loc = np.loadtxt(LFI_files[i])
# check norm, i.e., make sure response is unity for CMB
LFI_loc_GHz = LFI_loc[:,0]
LFI_loc_trans = LFI_loc[:,1]
print("CMB norm = ", np.trapz(LFI_loc_trans, LFI_loc_GHz))
# compute K_CMB -> y_SZ conversion
print("K_CMB -> y_SZ conversion: ", np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6, LFI_loc_GHz) / np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6*get_mix(LFI_loc_GHz,'tSZ')/TCMB_uK, LFI_loc_GHz) / TCMB)
# compute K_CMB -> MJy/sr conversion [IRAS convention, alpha=-1 power-law SED]
print("K_CMB -> MJy/sr conversion [IRAS convention, alpha=-1 power-law SED]: ", np.trapz(LFI_loc_trans*dBnudT(LFI_loc_GHz)*1.e6, LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) * 1.e20)
# compute color correction from IRAS to "dust" (power-law with alpha=4)
print("MJy/sr color correction (power-law, alpha=-1 to alpha=4): ", np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_loc_GHz/LFI_freqs_GHz[i])**4.0, LFI_loc_GHz))
# compute color correction from IRAS to modified blackbody with T=13.6 K, beta=1.4 (to compare to results at https://wiki.cosmos.esa.int/planckpla2015/index.php/UC_CC_Tables )
print("MJy/sr color correction (power-law alpha=-1 to MBB T=13.6 K/beta=1.4): ", np.trapz(LFI_loc_trans*(LFI_freqs_GHz[i]/LFI_loc_GHz), LFI_loc_GHz) / np.trapz(LFI_loc_trans*(LFI_loc_GHz/LFI_freqs_GHz[i])**(1.4+3.) * (np.exp(hplanck*LFI_freqs_GHz[i]*1.e9/(kboltz*13.6))-1.)/(np.exp(hplanck*LFI_loc_GHz*1.e9/(kboltz*13.6))-1.), LFI_loc_GHz))
print("----------")
|
[
"numpy.trapz",
"tilec.fg.dBnudT",
"numpy.array",
"numpy.exp",
"numpy.loadtxt",
"tilec.fg.get_mix"
] |
[((506, 534), 'numpy.array', 'np.array', (['[30.0, 44.0, 70.0]'], {}), '([30.0, 44.0, 70.0])\n', (514, 534), True, 'import numpy as np\n'), ((714, 738), 'numpy.loadtxt', 'np.loadtxt', (['LFI_files[i]'], {}), '(LFI_files[i])\n', (724, 738), True, 'import numpy as np\n'), ((888, 924), 'numpy.trapz', 'np.trapz', (['LFI_loc_trans', 'LFI_loc_GHz'], {}), '(LFI_loc_trans, LFI_loc_GHz)\n', (896, 924), True, 'import numpy as np\n'), ((1630, 1701), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz))', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz), LFI_loc_GHz)\n', (1638, 1701), True, 'import numpy as np\n'), ((1700, 1778), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_loc_GHz / LFI_freqs_GHz[i]) ** 4.0)', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_loc_GHz / LFI_freqs_GHz[i]) ** 4.0, LFI_loc_GHz)\n', (1708, 1778), True, 'import numpy as np\n'), ((2039, 2110), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz))', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz), LFI_loc_GHz)\n', (2047, 2110), True, 'import numpy as np\n'), ((1405, 1476), 'numpy.trapz', 'np.trapz', (['(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz))', 'LFI_loc_GHz'], {}), '(LFI_loc_trans * (LFI_freqs_GHz[i] / LFI_loc_GHz), LFI_loc_GHz)\n', (1413, 1476), True, 'import numpy as np\n'), ((2233, 2295), 'numpy.exp', 'np.exp', (['(hplanck * LFI_loc_GHz * 1000000000.0 / (kboltz * 13.6))'], {}), '(hplanck * LFI_loc_GHz * 1000000000.0 / (kboltz * 13.6))\n', (2239, 2295), True, 'import numpy as np\n'), ((1028, 1047), 'tilec.fg.dBnudT', 'dBnudT', (['LFI_loc_GHz'], {}), '(LFI_loc_GHz)\n', (1034, 1047), False, 'from tilec.fg import dBnudT, get_mix\n'), ((1117, 1144), 'tilec.fg.get_mix', 'get_mix', (['LFI_loc_GHz', '"""tSZ"""'], {}), "(LFI_loc_GHz, 'tSZ')\n", (1124, 1144), False, 'from tilec.fg import dBnudT, get_mix\n'), ((1364, 1383), 'tilec.fg.dBnudT', 'dBnudT', (['LFI_loc_GHz'], {}), '(LFI_loc_GHz)\n', (1370, 1383), False, 'from tilec.fg import dBnudT, get_mix\n'), ((2176, 2243), 'numpy.exp', 'np.exp', (['(hplanck * LFI_freqs_GHz[i] * 1000000000.0 / (kboltz * 13.6))'], {}), '(hplanck * LFI_freqs_GHz[i] * 1000000000.0 / (kboltz * 13.6))\n', (2182, 2243), True, 'import numpy as np\n'), ((1092, 1111), 'tilec.fg.dBnudT', 'dBnudT', (['LFI_loc_GHz'], {}), '(LFI_loc_GHz)\n', (1098, 1111), False, 'from tilec.fg import dBnudT, get_mix\n')]
|
'''
An Elman Network is implemented, taking the output of the last time step of the time series as prediction, and also to
compute the training loss. This is done because this output is thought of as the most informed one.
'''
import torch
from torch import nn
from sklearn.preprocessing import MaxAbsScaler
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
import random
import numpy as np
import copy
import importlib
import os
import einops
from experiment_config import experiment_path, chosen_experiment
spec = importlib.util.spec_from_file_location(chosen_experiment, experiment_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
configuration = config.learning_config
def choose_best(models_and_losses):
index_best = [i[1] for i in models_and_losses].index(min([i[1] for i in models_and_losses]))
epoch = index_best+1
return models_and_losses[index_best], epoch
def save_model(model, epoch, loss):
path = os.path.join(config.models_folder, configuration['classifier'])
if not os.path.exists(path):
os.makedirs(path)
try:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, os.path.join(path, 'model.pth'))
except TypeError:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict,
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, os.path.join(path, 'model.pth'))
class RNN(nn.Module):
def __init__(self, input_size, output_size, hidden_dim, n_layers):
super(RNN, self).__init__()
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.output_size = output_size
self.input_size = input_size
self._device = self.choose_device()
self._rnn = nn.RNN(input_size, hidden_dim, n_layers, nonlinearity=configuration["activation function"]).to(self._device)
self._fc = nn.Linear(hidden_dim, output_size).to(self._device)
self.optimizer = self.choose_optimizer(alpha=configuration["learning rate"] * configuration["mini batch size"]) # linear scaling of LR
self._estimator_type = 'classifier'
def forward(self, x):
seq_length = len(x[0])
# Initializing hidden state for first input using method defined below
hidden = self.init_hidden(seq_length).to(self._device)
if x.dim() == 2:
x = x.view(-1,seq_length, 1)
# Passing in the input and hidden state into the model and obtaining outputs
if x.device == torch.device("cpu"):
self._rnn = self._rnn.to(torch.device("cpu"))
self._fc = self._fc.to(torch.device("cpu"))
out, hidden = self._rnn(x, hidden)
out = self._fc(out)
else:
self._rnn = self._rnn.to(self.choose_device())
self._fc = self._fc.to(self.choose_device())
out, hidden = self._rnn(x, hidden)
# feed output into the fully connected layer
out = self._fc(out)
return out, hidden
def init_hidden(self, seq_length):
device = self._device
# This method generates the first hidden state of zeros which we'll use in the forward pass
hidden = torch.zeros(self.n_layers, seq_length, self.hidden_dim).to(device)
# We'll send the tensor holding the hidden state to the device we specified earlier as well
return hidden
def fit(self, train_loader=None, test_loader=None, X_train=None, y_train=None, X_test=None, y_test=None, early_stopping=True, control_lr=None, prev_epoch=1, prev_loss=1, grid_search_parameter=None):
torch.cuda.empty_cache()
self.early_stopping = early_stopping
self.control_lr = control_lr
if X_train and y_train:
X = X_train
y = y_train
mini_batch_size = configuration["mini batch size"]
criterion = nn.CrossEntropyLoss()
nominal_lr = configuration["learning rate"] * mini_batch_size # linear scaling of LR
lr = nominal_lr
loss = 10000000000 #set initial dummy loss
lrs = []
training_losses = []
models_and_val_losses = []
pause = 0 # for early stopping
if prev_epoch is None or grid_search_parameter:
prev_epoch = 1
if grid_search_parameter is not None:
configuration[configuration["grid search"][0]] = grid_search_parameter
for epoch in range(prev_epoch, configuration["number of epochs"] + 1):
if configuration["optimizer"] == 'SGD' and not epoch == prev_epoch: #ADAM optimizer has internal states and should therefore not be reinitialized every epoch; only for SGD bc here changing the learning rate makes sense
self.optimizer, lr = self.control_learning_rate(lr=lr, loss=loss, losses=training_losses, nominal_lr=nominal_lr, epoch=epoch)
lrs.append(lr)
if X_train and y_train:
zipped_X_y = list(zip(X, y))
random.shuffle(zipped_X_y) #randomly shuffle samples to have different mini batches between epochs
X, y = zip(*zipped_X_y)
X = np.array(X)
y = list(y)
if len(X) % mini_batch_size > 0: #drop some samples if necessary to fit with batch size
samples_to_drop = len(X) % mini_batch_size
X = X[:-samples_to_drop]
y = y[:-samples_to_drop]
mini_batches = X.reshape((int(len(X) / mini_batch_size), mini_batch_size, len(X[0])))
mini_batch_targets = np.array(y).reshape(int(len(y) / mini_batch_size), mini_batch_size)
input_seq = [torch.Tensor(i).view(len(i), -1, 1) for i in mini_batches]
target_seq = [torch.Tensor([i]).view(-1).long() for i in mini_batch_targets]
inout_seq = list(zip(input_seq, target_seq))
#optimizer.zero_grad() # Clears existing gradients from previous epoch
for sequences, labels in inout_seq:
labels = labels.to(self._device)
sequences = sequences.to(self._device)
self.optimizer.zero_grad() # Clears existing gradients from previous batch so as not to backprop through entire dataset
output, hidden = self(sequences)
if configuration['decision criteria'] == 'majority vote':
start_voting_outputs = int((configuration['calibration rate']) * output.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in
output]) # choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
labels = einops.repeat(labels, 'b -> (b copy)', copy=relevant_outputs.size()[1])
labels = torch.stack(torch.split(labels, relevant_outputs.size()[1]), dim=0)
loss = sum([criterion(relevant_outputs[i], labels[i]) for i in list(range(labels.size()[0]))]) / \
labels.size()[0]
else:
last_outputs = torch.stack(
[i[-1] for i in output]) # choose last output of timeseries (most informed output)
relevant_outputs = last_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
loss = criterion(relevant_outputs, labels)
loss.backward() # Does backpropagation and calculates gradients
loss.backward() # Does backpropagation and calculates gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), configuration["gradient clipping"]) # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
self.optimizer.step() # Updates the weights accordingly
self.detach([last_outputs, sequences, labels, hidden]) #detach tensors from GPU to free memory
elif train_loader and test_loader:
import sys
toolbar_width = len(train_loader)
# setup toolbar
print('Epoch {}/{} completed:'.format(epoch, configuration["number of epochs"]))
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1)) # return to start of line, after '['
sys.stdout.flush()
for i, (sequences, labels, raw_seq) in enumerate(train_loader):
labels = labels.to(self._device)
sequences = sequences.to(self._device)
self.optimizer.zero_grad() # Clears existing gradients from previous batch so as not to backprop through entire dataset
output, hidden = self(sequences)
if configuration['decision criteria'] == 'majority vote':
if configuration['calibration rate'] == 1:
start_voting_outputs = int((configuration['calibration rate']) * output.size()[
1]) - 1 # equal to only using the last output
else:
start_voting_outputs = int((configuration['calibration rate']) * output.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in output]) #choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
labels = einops.repeat(labels, 'b -> (b copy)', copy=relevant_outputs.size()[1])
labels = torch.stack(torch.split(labels, relevant_outputs.size()[1]), dim=0)
loss = sum([criterion(relevant_outputs[i], labels[i]) for i in list(range(labels.size()[0]))])/labels.size()[0]
else:
last_outputs = torch.stack([i[-1] for i in output]) #choose last output of timeseries (most informed output)
relevant_outputs = last_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
loss = criterion(relevant_outputs, labels)
loss.backward() # Does backpropagation and calculates gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), configuration["gradient clipping"]) # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
self.optimizer.step() # Updates the weights accordingly
self.detach([relevant_outputs, sequences, labels, hidden]) #detach tensors from GPU to free memory
progress = (i+1) / len(train_loader)
sys.stdout.write("- %.1f%% " %(progress*100))
sys.stdout.flush()
if config.dev_mode:
break
sys.stdout.write("]\n") # this ends the progress bar
sys.stdout.flush()
else:
print('Either provide X and y or dataloaders!')
if X_train and y_train:
training_losses.append(loss)
val_outputs = torch.stack([i[-1].view(-1) for i in self.predict(X_test)[1]]).to(self._device)
val_loss = criterion(val_outputs, torch.Tensor([np.array(y_test)]).view(-1).long().to(self._device))
else:
training_losses.append(loss)
pred, val_outputs, y_test = self.predict(test_loader=test_loader)
val_outputs = torch.stack([i[-1] for i in val_outputs]).to(self._device)
y_test = y_test.view(-1).long().to(self._device)
val_loss = criterion(val_outputs, y_test).to(self._device)
self.detach([val_outputs])
models_and_val_losses.append((copy.deepcopy(self.state_dict), val_loss.item()))
if configuration["save_model"]:
clf, ep = choose_best(models_and_val_losses)
if ep == epoch:
save_model(self, epoch, val_loss.item())
if self.early_stopping:
try:
if abs(models_and_val_losses[-1][1] - models_and_val_losses[-2][1]) < 1*10**-6:
pause += 1
if pause == 5:
print('Validation loss has not changed for {0} epochs! Early stopping of training after {1} epochs!'.format(pause, epoch))
return models_and_val_losses, training_losses, lrs
except IndexError:
pass
if not configuration["cross_validation"] and epoch % 10 == 0:
print('Epoch: {}/{}.............'.format(epoch, configuration["number of epochs"]), end=' ')
print("Loss: {:.4f}".format(loss.item()))
return models_and_val_losses, training_losses, lrs
def predict(self, test_loader=None, X=None):
if X is not None:
input_sequences = torch.stack([torch.Tensor(i).view(len(i), -1) for i in X])
input_sequences = input_sequences.to(self._device)
outputs, hidden = self(input_sequences)
last_outputs = torch.stack([i[-1] for i in outputs]).to(self._device)
probs = nn.Softmax(dim=-1)(last_outputs)
pred = torch.argmax(probs, dim=-1) # chose class that has highest probability
self.detach([input_sequences, hidden, outputs])
return [i.item() for i in pred], outputs
elif test_loader:
pred = torch.Tensor()
y_test = torch.Tensor()
outputs_cumm = torch.Tensor()
for i, (input_sequences, labels, raw_seq) in enumerate(test_loader):
input_sequences = input_sequences.to(self._device)
outputs, hidden = self(input_sequences)
if configuration['decision criteria'] == 'majority vote':
if configuration['calibration rate'] == 1:
start_voting_outputs = int((configuration['calibration rate']) * outputs.size()[
1]) - 1 # equal to only using the last output
else:
start_voting_outputs = int((configuration['calibration rate']) * outputs.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in outputs]) #choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
most_likely_outputs = torch.argmax(nn.Softmax(dim=-1)(relevant_outputs), dim=-1)
majority_vote_result = torch.mode(most_likely_outputs, dim=-1)[0]
pred_new = majority_vote_result.float()
else:
last_outputs = torch.stack([i[-1] for i in outputs]).to(self._device)
probs = nn.Softmax(dim=-1)(last_outputs)
pred_new = torch.argmax(probs, dim=-1).float()
outputs_cumm = torch.cat((outputs_cumm.to(self._device), outputs.float()), 0)
pred = torch.cat((pred.to(self._device), pred_new), 0) # chose class that has highest probability
y_test = torch.cat((y_test, labels.float()), 0) # chose class that has highest probability
self.detach([input_sequences, hidden, outputs])
if configuration["train test split"] <= 1:
share_of_test_set = len(test_loader)*configuration["train test split"]*labels.size()[0]
else:
share_of_test_set = configuration["train test split"]
if y_test.size()[0] >= share_of_test_set: #to choose the test set size (memory issues!!)
break
return [i.item() for i in pred], outputs_cumm, y_test
else:
print('Either provide X or a dataloader!')
def choose_optimizer(self, alpha=configuration["learning rate"]):
if configuration["optimizer"] == 'Adam':
optimizer = torch.optim.Adam(self.parameters(), lr=alpha)
else:
optimizer = torch.optim.SGD(self.parameters(), lr=alpha)
return optimizer
def control_learning_rate(self, lr=None, loss=None, losses=None, epoch=None, nominal_lr=None):
warm_up_share = configuration["percentage of epochs for warm up"] / 100
if self.control_lr == 'warm up' and epoch < int(warm_up_share * configuration["number of epochs"]):
lr = nominal_lr * epoch / int((warm_up_share * configuration["number of epochs"]))
optimizer = self.choose_optimizer(alpha=lr)
elif self.control_lr == 'warm up' and epoch >= int(warm_up_share * configuration["number of epochs"]):
lr = nominal_lr * (configuration["number of epochs"] - epoch) / int((1-warm_up_share) * configuration["number of epochs"])
optimizer = self.choose_optimizer(alpha=lr)
elif self.control_lr == 'LR controlled':
if losses[-1] > loss:
lr = lr * 1.1
optimizer = self.choose_optimizer(alpha=lr)
elif losses[-1] <= loss:
lr = lr * 0.90
optimizer = self.choose_optimizer(alpha=lr)
else:
lr = lr
optimizer = self.choose_optimizer(alpha=lr)
return optimizer, lr
def preprocess(self, X_train, X_test):
scaler = self.fit_scaler(X_train)
X_train = self.preprocessing(X_train, scaler)
X_test = self.preprocessing(X_test, scaler)
return X_train, X_test
def fit_scaler(self, X):
X_zeromean = np.array([x - x.mean() for x in X]) # deduct it's own mean from every sample
maxabs_scaler = MaxAbsScaler().fit(X_zeromean) # fit scaler as to scale training data between -1 and 1
return maxabs_scaler
def preprocessing(self, X, scaler):
X_zeromean = np.array([x - x.mean() for x in X])
X = scaler.transform(X_zeromean)
return X
def score(self, y_test, y_pred):
metrics = precision_recall_fscore_support(y_test, y_pred, average='macro')
accuracy = accuracy_score(y_test, y_pred)
return [accuracy, metrics]
def get_params(self, deep=True):
return {"hidden_dim": self.hidden_dim, "n_layers": self.n_layers, "output_size": self.output_size, "input_size" : self.input_size}
def choose_device(self):
is_cuda = torch.cuda.is_available()
if is_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
return device
def detach(self, inputs=[]):
for i in inputs:
torch.detach(i)
torch.cuda.empty_cache()
return
|
[
"sys.stdout.write",
"torch.argmax",
"sklearn.metrics.accuracy_score",
"random.shuffle",
"sklearn.preprocessing.MaxAbsScaler",
"torch.nn.Softmax",
"sys.stdout.flush",
"torch.device",
"os.path.join",
"sklearn.metrics.precision_recall_fscore_support",
"importlib.util.module_from_spec",
"os.path.exists",
"importlib.util.spec_from_file_location",
"torch.Tensor",
"torch.nn.Linear",
"torch.zeros",
"copy.deepcopy",
"torch.cuda.is_available",
"torch.nn.RNN",
"torch.mode",
"torch.detach",
"os.makedirs",
"torch.stack",
"torch.nn.CrossEntropyLoss",
"numpy.array",
"torch.cuda.empty_cache"
] |
[((572, 646), 'importlib.util.spec_from_file_location', 'importlib.util.spec_from_file_location', (['chosen_experiment', 'experiment_path'], {}), '(chosen_experiment, experiment_path)\n', (610, 646), False, 'import importlib\n'), ((656, 693), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['spec'], {}), '(spec)\n', (687, 693), False, 'import importlib\n'), ((1022, 1085), 'os.path.join', 'os.path.join', (['config.models_folder', "configuration['classifier']"], {}), "(config.models_folder, configuration['classifier'])\n", (1034, 1085), False, 'import os\n'), ((1098, 1118), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1112, 1118), False, 'import os\n'), ((1128, 1145), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1139, 1145), False, 'import os\n'), ((3837, 3861), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3859, 3861), False, 'import torch\n'), ((4105, 4126), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4124, 4126), False, 'from torch import nn\n'), ((18994, 19058), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""macro"""'}), "(y_test, y_pred, average='macro')\n", (19025, 19058), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((19078, 19108), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (19092, 19108), False, 'from sklearn.metrics import accuracy_score\n'), ((19369, 19394), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19392, 19394), False, 'import torch\n'), ((19629, 19653), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (19651, 19653), False, 'import torch\n'), ((1360, 1391), 'os.path.join', 'os.path.join', (['path', '"""model.pth"""'], {}), "(path, 'model.pth')\n", (1372, 1391), False, 'import os\n'), ((2740, 2759), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2752, 2759), False, 'import torch\n'), ((14169, 14196), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(-1)'}), '(probs, dim=-1)\n', (14181, 14196), False, 'import torch\n'), ((19436, 19456), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (19448, 19456), False, 'import torch\n'), ((19492, 19511), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (19504, 19511), False, 'import torch\n'), ((19605, 19620), 'torch.detach', 'torch.detach', (['i'], {}), '(i)\n', (19617, 19620), False, 'import torch\n'), ((1617, 1648), 'os.path.join', 'os.path.join', (['path', '"""model.pth"""'], {}), "(path, 'model.pth')\n", (1629, 1648), False, 'import os\n'), ((1992, 2088), 'torch.nn.RNN', 'nn.RNN', (['input_size', 'hidden_dim', 'n_layers'], {'nonlinearity': "configuration['activation function']"}), "(input_size, hidden_dim, n_layers, nonlinearity=configuration[\n 'activation function'])\n", (1998, 2088), False, 'from torch import nn\n'), ((2120, 2154), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'output_size'], {}), '(hidden_dim, output_size)\n', (2129, 2154), False, 'from torch import nn\n'), ((2798, 2817), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2810, 2817), False, 'import torch\n'), ((2854, 2873), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2866, 2873), False, 'import torch\n'), ((3435, 3490), 'torch.zeros', 'torch.zeros', (['self.n_layers', 'seq_length', 'self.hidden_dim'], {}), '(self.n_layers, seq_length, self.hidden_dim)\n', (3446, 3490), False, 'import torch\n'), ((5317, 5343), 'random.shuffle', 'random.shuffle', (['zipped_X_y'], {}), '(zipped_X_y)\n', (5331, 5343), False, 'import random\n'), ((5489, 5500), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5497, 5500), True, 'import numpy as np\n'), ((14116, 14134), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (14126, 14134), False, 'from torch import nn\n'), ((14400, 14414), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (14412, 14414), False, 'import torch\n'), ((14436, 14450), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (14448, 14450), False, 'import torch\n'), ((14478, 14492), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (14490, 14492), False, 'import torch\n'), ((18641, 18655), 'sklearn.preprocessing.MaxAbsScaler', 'MaxAbsScaler', ([], {}), '()\n', (18653, 18655), False, 'from sklearn.preprocessing import MaxAbsScaler\n'), ((8869, 8917), 'sys.stdout.write', 'sys.stdout.write', (["('[%s]' % (' ' * toolbar_width))"], {}), "('[%s]' % (' ' * toolbar_width))\n", (8885, 8917), False, 'import sys\n'), ((8934, 8952), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8950, 8952), False, 'import sys\n'), ((8969, 9015), 'sys.stdout.write', 'sys.stdout.write', (["('\\x08' * (toolbar_width + 1))"], {}), "('\\x08' * (toolbar_width + 1))\n", (8985, 9015), False, 'import sys\n'), ((9065, 9083), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9081, 9083), False, 'import sys\n'), ((11718, 11741), 'sys.stdout.write', 'sys.stdout.write', (['"""]\n"""'], {}), "(']\\n')\n", (11734, 11741), False, 'import sys\n'), ((11787, 11805), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11803, 11805), False, 'import sys\n'), ((12658, 12688), 'copy.deepcopy', 'copy.deepcopy', (['self.state_dict'], {}), '(self.state_dict)\n', (12671, 12688), False, 'import copy\n'), ((14041, 14078), 'torch.stack', 'torch.stack', (['[i[-1] for i in outputs]'], {}), '([i[-1] for i in outputs])\n', (14052, 14078), False, 'import torch\n'), ((5941, 5952), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5949, 5952), True, 'import numpy as np\n'), ((6923, 6978), 'torch.stack', 'torch.stack', (['[i[start_voting_outputs:] for i in output]'], {}), '([i[start_voting_outputs:] for i in output])\n', (6934, 6978), False, 'import torch\n'), ((7689, 7725), 'torch.stack', 'torch.stack', (['[i[-1] for i in output]'], {}), '([i[-1] for i in output])\n', (7700, 7725), False, 'import torch\n'), ((11545, 11593), 'sys.stdout.write', 'sys.stdout.write', (["('- %.1f%% ' % (progress * 100))"], {}), "('- %.1f%% ' % (progress * 100))\n", (11561, 11593), False, 'import sys\n'), ((11611, 11629), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11627, 11629), False, 'import sys\n'), ((12373, 12414), 'torch.stack', 'torch.stack', (['[i[-1] for i in val_outputs]'], {}), '([i[-1] for i in val_outputs])\n', (12384, 12414), False, 'import torch\n'), ((15186, 15242), 'torch.stack', 'torch.stack', (['[i[start_voting_outputs:] for i in outputs]'], {}), '([i[start_voting_outputs:] for i in outputs])\n', (15197, 15242), False, 'import torch\n'), ((6039, 6054), 'torch.Tensor', 'torch.Tensor', (['i'], {}), '(i)\n', (6051, 6054), False, 'import torch\n'), ((9986, 10041), 'torch.stack', 'torch.stack', (['[i[start_voting_outputs:] for i in output]'], {}), '([i[start_voting_outputs:] for i in output])\n', (9997, 10041), False, 'import torch\n'), ((10661, 10697), 'torch.stack', 'torch.stack', (['[i[-1] for i in output]'], {}), '([i[-1] for i in output])\n', (10672, 10697), False, 'import torch\n'), ((13851, 13866), 'torch.Tensor', 'torch.Tensor', (['i'], {}), '(i)\n', (13863, 13866), False, 'import torch\n'), ((15516, 15555), 'torch.mode', 'torch.mode', (['most_likely_outputs'], {'dim': '(-1)'}), '(most_likely_outputs, dim=-1)\n', (15526, 15555), False, 'import torch\n'), ((15760, 15778), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (15770, 15778), False, 'from torch import nn\n'), ((7201, 7237), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (7212, 7237), False, 'import torch\n'), ((7920, 7956), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (7931, 7956), False, 'import torch\n'), ((15427, 15445), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (15437, 15445), False, 'from torch import nn\n'), ((15677, 15714), 'torch.stack', 'torch.stack', (['[i[-1] for i in outputs]'], {}), '([i[-1] for i in outputs])\n', (15688, 15714), False, 'import torch\n'), ((15824, 15851), 'torch.argmax', 'torch.argmax', (['probs'], {'dim': '(-1)'}), '(probs, dim=-1)\n', (15836, 15851), False, 'import torch\n'), ((6128, 6145), 'torch.Tensor', 'torch.Tensor', (['[i]'], {}), '([i])\n', (6140, 6145), False, 'import torch\n'), ((10208, 10244), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (10219, 10244), False, 'import torch\n'), ((10869, 10905), 'torch.stack', 'torch.stack', (['[i[-1] for i in labels]'], {}), '([i[-1] for i in labels])\n', (10880, 10905), False, 'import torch\n'), ((12145, 12161), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (12153, 12161), True, 'import numpy as np\n')]
|
import numpy as np
import pymc3 as pm
import theano
import theano.tensor as tt
# for reproducibility here's some version info for modules used in this notebook
import platform
import IPython
import matplotlib
import matplotlib.pyplot as plt
import emcee
import corner
import os
from autograd import grad
from files.myIOlib import show_seaborn_plot
print("Python version: {}".format(platform.python_version()))
print("IPython version: {}".format(IPython.__version__))
print("Numpy version: {}".format(np.__version__))
print("Theano version: {}".format(theano.__version__))
print("PyMC3 version: {}".format(pm.__version__))
print("Matplotlib version: {}".format(matplotlib.__version__))
print("emcee version: {}".format(emcee.__version__))
print("corner version: {}".format(corner.__version__))
import numpy as np
import pymc3 as pm
import arviz as az
#Ordering imports
from myIOlib import *
from myModel import *
from myFUQ import *
#Ordering tools
import numpy as np
import arviz as az
from scipy import stats
import matplotlib as mpl
from theano import as_op
import theano.tensor as tt
import scipy.special as sc
import math
import time
# Start timing code execution
t0 = time.time()
################# User settings ###################
# Define the amount of samples
N = 1
# Define time of simulation
timestep = 30
endtime = 10320
t1steps = round(endtime / timestep)
Nt = 2*t1steps+1
x = timestep * np.linspace(0, 2 * t1steps, Nt)
# Units
MPA = 1e6
# Forward/Bayesian Inference calculation
performInference = True
useFEA = False
# Location to store output
outfile = 'output/output_%d.png' % N
#################### Core #########################
# Generate text file for parameters
generate_txt( "parameters.txt" )
# Import parameters.txt to variables
print("Reading model parameters...")
params_aquifer, params_well = read_from_txt( "parameters.txt" )
# Construct the objects for the doublet model
print("Constructing the doublet model...")
aquifer = Aquifer(params_aquifer)
# doublet = DoubletGenerator(aquifer)
from myUQ import *
from files.myUQlib import *
######## Forward Uncertainty Quantification #########
if not performInference:
# Run Bayesian FUQ (input parameters not np. but pm. -> random values, as pdf not work in FEA -> output array of values -> mean, stdv -> pm. )
import pymc3 as pm
from pymc3.distributions import Interpolated
print('Running on PyMC3 v{}'.format(pm.__version__))
# Run Forward Uncertainty Quantification
print("\r\nSolving Forward Uncertainty Quantification...")
# Set input from stoichastic parameters
print("\r\nSetting input from stoichastic parameters...")
parametersRVS = generateRVSfromPDF(N)
print("Stoichastic parameters", parametersRVS)
if useFEA:
# Run Finite Element Analysis (Forward)
print("\r\nRunning FEA...")
sol = performFEA(parametersRVS, aquifer, N, timestep, endtime)
else:
# # Run Analytical Analysis (Forward)
print("\r\nRunning Analytical Analysis...")
sol = performAA(parametersRVS, x)
###########################
# Post processing #
###########################
# Output pressure/temperature matrix and plot for single point in time
fig, ax = plt.subplots(1, 1, figsize=(10, 7), tight_layout=True)
ax.set(xlabel='Wellbore pressure [Pa]', ylabel='Probability')
ax.hist(sol[0][:, t1steps], density=True, histtype='stepfilled', alpha=0.2, bins=20)
# plt.show()
# Evaluate the doublet model
print("\r\nEvaluating numerical solution for the doublet model...")
doublet = DoubletGenerator(aquifer, sol)
pnodelist, Tnodelist = evaluateDoublet(doublet)
######## Inverse Uncertainty Quantification #########
else:
# Run Bayesian Inference
import pymc3 as pm
from pymc3.distributions import Interpolated
from pymc3.distributions.timeseries import EulerMaruyama
print('Running on PyMC3 v{}'.format(pm.__version__))
# Set distribution settings
chains = 4
ndraws = 15 # number of draws from the distribution
nburn = 5 # number of "burn-in points" (which we'll discard)
# Library functions
def get_𝜇_K(porosity, size):
constant = np.random.uniform(low=10, high=100, size=size) # np.random.uniform(low=3.5, high=5.8, size=size)
tau = np.random.uniform(low=0.3, high=0.5, size=size)
tothepower = np.random.uniform(low=3, high=5, size=size)
rc = np.random.uniform(low=10e-6, high=30e-6, size=size)
SSA = 3 / rc
permeability = constant * tau ** 2 * (porosity.random(size=N) ** tothepower / SSA ** 2)
𝜇_K = np.mean(permeability)
# constant = np.random.uniform(low=3.5, high=5.8, size=N)
# tothepower = np.random.uniform(low=3, high=5, size=N)
# Tau = (2) ** (1 / 2)
# S0_sand = np.random.uniform(low=1.5e2, high=2.2e2, size=N) # specific surface area [1/cm]
# K_samples = constant * (φpdf.random(size=N) ** tothepower / S0_sand ** 2)
# Kpdf = pm.Lognormal('K', mu=math.log(np.mean(K_samples)), sd=1) #joined distribution
return 𝜇_K
###########################
# Synthetic data #
###########################
# Set up our data
Nt = Nt # number of data points
CV = 0.001 # coefficient of variation noise
# True data
K_true = 1e-12 # 2.2730989084434785e-08
φ_true = 0.163
H_true = 70
ct_true = 1e-10
Q_true = 0.07
cs_true = 2650
# Lognormal priors for true parameters
Hpdf = stats.lognorm(scale=H_true, s=0.01)
φpdf = stats.lognorm(scale=φ_true, s=0.01)
Kpdf = stats.lognorm(scale=K_true, s=0.01)
ctpdf = stats.lognorm(scale=ct_true, s=0.01)
Qpdf = stats.lognorm(scale=Q_true, s=0.01)
cspdf = stats.lognorm(scale=cs_true, s=0.01)
theta = parametersRVS = [Hpdf.rvs(size=1), φpdf.rvs(size=1), Kpdf.rvs(size=1), ctpdf.rvs(size=1),
Qpdf.rvs(size=1), cspdf.rvs(size=1)]
# parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
# theta = parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
truemodel = my_model(theta, x)
print("truemodel", truemodel)
# Make data
np.random.seed(716742) # set random seed, so the data is reproducible each time
sigma = CV * np.mean(truemodel)
# data = sigma * np.random.randn(Nt) + truemodel
# Use real data
data = get_welldata('PBH')
# plot transient test
parameters = {'axes.labelsize': 14,
'axes.titlesize': 18}
plt.rcParams.update(parameters)
plt.figure(figsize=(10, 3))
# plt.subplot(121)
plt.plot(truemodel/MPA, 'k', label='$p_{true}$', alpha=0.5), plt.plot(data/MPA, 'r', label='$σ_{noise} = 1.0e-2$', alpha=0.5),\
plt.ylabel("p(t) [MPa]"), plt.xlabel("t [min]"), #plt.legend()
plt.tight_layout()
plt.show()
# Create our Op
logl = LogLikeWithGrad(my_loglike, data, x, sigma)
print(logl)
###########################
# Synthetic data #
###########################
# with pm.Model() as SyntheticModel:
#
# # True data (what actually drives the true pressure)
# K_true = 1e-12 # 2.2730989084434785e-08
# φ_true = 0.163
# H_true = 70
# ct_true = 1e-10
# Q_true = 0.07
# cs_true = 2650
#
# # Lognormal priors for true parameters
# Hpdf = pm.Lognormal('H', mu=np.log(H_true), sd=0.01)
# φpdf = pm.Lognormal('φ', mu=np.log(φ_true), sd=0.01)
# Kpdf = pm.Lognormal('K', mu=np.log(K_true), sd=0.01)
# ctpdf = pm.Lognormal('ct', mu=np.log(ct_true), sd=0.01)
# Qpdf = pm.Lognormal('Q', mu=np.log(Q_true), sd=0.01)
# cspdf = pm.Lognormal('cs', mu=np.log(cs_true), sd=0.01)
# parametersRVS = [Hpdf.random(size=Nt), φpdf.random(size=Nt), Kpdf.random(size=Nt), ctpdf.random(size=Nt),
# Qpdf.random(size=Nt), cspdf.random(size=Nt)]
#
# # parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
# solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
# p_true = np.mean(solAA[0].T, axis=1)
# print(p_true)
#
# # Z_t observed data
# np.random.seed(716742) # set random seed, so the data is reproducible each time
# σnoise = 0.1
# sd_p = σnoise * np.var(p_true) ** 0.5
# z_t = p_true + np.random.randn(Nt) * sd_p
# use PyMC3 to sampler from log-likelihood
with pm.Model() as opmodel:
###########################
# Prior information #
###########################
# Mean of expert variables (the specific informative prior)
𝜇_H = aquifer.H # lower_H = 35, upper_H = 105 (COV = 50%)
𝜇_φ = aquifer.φ # lower_φ = 0.1, upper_φ = 0.3 (COV = 50%)
𝜇_ct = aquifer.ct # lower_ct = 0.5e-10, upper_ct = 1.5e-10 (COV = 50%)
𝜇_Q = aquifer.Q # lower_Q = 0.35, upper_Q = 0.105 (COV = 50%)
𝜇_cs = aquifer.cps # lower_cs = 1325 upper_cs = 3975 (COV = 50%)
# Standard deviation of variables (CV=50%)
sd_H = 0.3
sd_φ = 0.3
sd_K = 0.3
sd_ct = 0.3
sd_Q = 0.3
sd_cs = 0.001
# Lognormal priors for unknown model parameters
Hpdf = pm.Uniform('H', lower=35, upper=105)
φpdf = pm.Uniform('φ', lower=0.1, upper=0.3)
Kpdf = pm.Uniform('K', lower=0.5e-13, upper=1.5e-13)
ctpdf = pm.Uniform('ct', lower=0.5e-10, upper=1.5e-10)
Qpdf = pm.Uniform('Q', lower=0.035, upper=0.105)
cspdf = pm.Uniform('cs', lower=1325, upper=3975)
# Hpdf = pm.Lognormal('H', mu=np.log(𝜇_H), sd=sd_H)
# φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
# Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
# ctpdf = pm.Lognormal('ct', mu=np.log(𝜇_ct), sd=sd_ct)
# Qpdf = pm.Lognormal('Q', mu=np.log(𝜇_Q), sd=sd_Q)
# cspdf = pm.Lognormal('cs', mu=np.log(𝜇_cs), sd=sd_cs)
thetaprior = [Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf]
# convert thetaprior to a tensor vector
theta = tt.as_tensor_variable([Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf])
# use a DensityDist
pm.DensityDist(
'likelihood',
lambda v: logl(v),
observed={'v': theta}
# random=my_model_random
)
with opmodel:
# Inference
trace = pm.sample(ndraws, cores=1, chains=chains, tune=nburn, discard_tuned_samples=True)
# plot the traces
print(az.summary(trace, round_to=2))
_ = pm.traceplot(trace, lines=(('K', {}, [K_true ]), ('φ', {}, [φ_true]), ('H', {}, [H_true]), ('ct', {}, [ct_true])
, ('Q', {}, [Q_true]), ('cs', {}, [cs_true])))
# put the chains in an array (for later!)
# samples_pymc3_2 = np.vstack((trace['K'], trace['φ'], trace['H'], trace['ct'], trace['Q'], trace['cs'])).T
# just because we can, let's draw posterior predictive samples of the model
# ppc = pm.sample_posterior_predictive(trace, samples=250, model=opmodel)
# _, ax = plt.subplots()
#
# for vals in ppc['likelihood']:
# plt.plot(x, vals, color='b', alpha=0.05, lw=3)
# ax.plot(x, my_model([H_true, φ_true, K_true, ct_true, Q_true, cs_true], x), 'k--', lw=2)
#
# ax.set_xlabel("Predictor (stdz)")
# ax.set_ylabel("Outcome (stdz)")
# ax.set_title("Posterior predictive checks");
###########################
# Post processing #
###########################
# print('Posterior distributions.')
# cmap = mpl.cm.autumn
# for param in ['K', 'φ', 'H', 'ct', 'Q', 'cs']:
# plt.figure(figsize=(8, 2))
# samples = trace[param]
# smin, smax = np.min(samples), np.max(samples)
# x = np.linspace(smin, smax, 100)
# y = stats.gaussian_kde(samples)(x)
# plt.axvline({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true, 'cs': cs_true}[param], c='k')
# plt.ylabel('Probability density')
# plt.title(param)
#
# plt.tight_layout();
data_spp = az.from_pymc3(trace=trace)
trace_K = az.plot_posterior(data_spp, var_names=['K'], kind='hist')
trace_φ = az.plot_posterior(data_spp, var_names=['φ'], kind='hist')
trace_H = az.plot_posterior(data_spp, var_names=['H'], kind='hist')
trace_Q = az.plot_posterior(data_spp, var_names=['Q'], kind='hist')
trace_ct = az.plot_posterior(data_spp, var_names=['ct'], kind='hist')
trace_cs = az.plot_posterior(data_spp, var_names=['cs'], kind='hist')
joint_plt = az.plot_joint(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False);
# trace_fig = az.plot_trace(trace, var_names=[ 'H', 'φ', 'K', 'ct', 'Q', 'cs'], compact=True);
plt.show()
# a = np.random.uniform(0.1, 0.3)
# b = np.random.uniform(0.5e-12, 1.5e-12)
# _, ax = plt.subplots(1, 2, figsize=(10, 4))
# az.plot_dist(a, color="C1", label="Prior", ax=ax[0])
# az.plot_posterior(data_spp, color="C2", var_names=['φ'], ax=ax[1], kind='hist')
# az.plot_dist(b, color="C1", label="Prior", ax=ax[1])
# az.plot_posterior(data_spp, color="C2", var_names=['K'], label="Posterior", ax=ax[0], kind='hist')
plt.show()
with pm.Model() as PriorModel:
###########################
# Prior information #
###########################
# Mean of expert variables (the specific informative prior)
𝜇_H = aquifer.H # lower_H = 35, upper_H = 105 (COV = 50%)
𝜇_φ = aquifer.φ # lower_φ = 0.1, upper_φ = 0.3 (COV = 50%)
𝜇_ct = aquifer.ct # lower_ct = 0.5e-10, upper_ct = 1.5e-10 (COV = 50%)
𝜇_Q = aquifer.Q # lower_Q = 0.35, upper_Q = 0.105 (COV = 50%)
𝜇_cs = aquifer.cps # lower_cs = 1325 upper_cs = 3975 (COV = 50%)
# Standard deviation of variables (CV=50%)
sd_H = 0.3
sd_φ = 0.3
sd_K = 0.3
sd_ct = 0.3
sd_Q = 0.3
sd_cs = 0.001
# Lognormal priors for unknown model parameters
Hpdf = pm.Lognormal('H', mu=np.log(𝜇_H), sd=sd_H)
φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
ctpdf = pm.Lognormal('ct', mu=np.log(𝜇_ct), sd=sd_ct)
Qpdf = pm.Lognormal('Q', mu=np.log(𝜇_Q), sd=sd_Q)
cspdf = pm.Lognormal('cs', mu=np.log(𝜇_cs), sd=sd_cs)
# Uniform priors for unknown model parameters
# Hpdf = pm.Uniform('H', lower=35, upper=105)
# φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
#φpdf = pm.Uniform('φ', lower=0.1, upper=0.3)
# Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
# ctpdf = pm.Uniform('ct', lower=0.5e-10, upper=1.5e-10)
# Qpdf = pm.Uniform('Q', lower=0.035, upper=0.105)
# cspdf = pm.Uniform('cs', lower=1325, upper=3975)
theta = [Hpdf.random(size=1), φpdf.random(size=1), Kpdf.random(size=1), ctpdf.random(size=1), Qpdf.random(size=1), cspdf.random(size=1)]
# Run Analytical Analysis (Backward)
print("\r\nRunning Analytical Analysis... (Prior, pymc3)")
# p_t = my_model(theta, x) # draw single sample multiple points in time
# p_t = np.mean(solAA[0].T, axis=1) # draw single sample multiple points in time
# Likelihood (sampling distribution) of observations
# z_h = pm.Lognormal('z_h', mu=np.log(p_t), sd=sigma, observed=np.log(data))
# plot 95% CI with seaborn
# with open('pprior.npy', 'wb') as pprior:
# np.save(pprior, p)
# show_seaborn_plot('pprior.npy', "pwell")
# plt.show()
# mu_p = np.mean(p_t)
# sd_p = np.var(p_t) ** 0.5
# p = pm.Lognormal('p', mu=np.log(mu_p), sd=sd_p)
# # Likelihood (predicted distribution) of observations
# y = pm.Normal('y', mu=p, sd=1e4, observed=z_t)
# with PriorModel:
# # Inference
# start = pm.find_MAP() # Find starting value by optimization
# step = pm.NUTS(scaling=start) # Instantiate MCMC sampling algoritm #HamiltonianMC
#
# trace = pm.sample(10000, start=start, step=step, cores=1, chains=chains)
#
# print(az.summary(trace, round_to=2))
# chain_count = trace.get_values('K').shape[0]
# T_pred = pm.sample_posterior_predictive(trace, samples=chain_count, model=PriorModel)
# data_spp = az.from_pymc3(trace=trace)
# joint_plt = az.plot_joint(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False);
# trace_fig = az.plot_trace(trace, var_names=[ 'H', 'φ', 'K', 'ct', 'Q', 'cs'], figsize=(12, 8));
# az.plot_trace(trace, var_names=['H', 'φ', 'K', 'ct', 'Q'], compact=True);
# fig, axes = az.plot_forest(trace, var_names=['H', 'φ', 'K', 'ct', 'Q'], combined=True) #94% confidence interval with only lines (must normalize the means!)
# axes[0].grid();
# trace_H = az.plot_posterior(data_spp, var_names=['φ'], kind='hist')
# trace_p = az.plot_posterior(data_spp, var_names=['p'], kind='hist')
# pm.traceplot(trace)
# plt.show()
traces = [trace]
for _ in range(2):
with pm.Model() as InferenceModel:
# Priors are posteriors from previous iteration
H = from_posterior('H', trace['H'])
φ = from_posterior('φ', trace['φ'])
K = from_posterior('K', trace['K'])
ct = from_posterior('ct', trace['ct'])
Q = from_posterior('Q', trace['Q'])
cs = from_posterior('cs', trace['cs'])
# Random sample method
# parametersRVS = [H.random(size=Nt), φ.random(size=Nt), K.random(size=Nt), ct.random(size=Nt), Q.random(size=Nt), cs.random(size=Nt)]
print("\r\nRunning Analytical Analysis... (Backward, pymc3)")
# solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
# p_t = np.mean(solAA[0].T, axis=1) # draw single sample multiple points in time
# Likelihood (sampling distribution) of observations
# z_h = pm.Lognormal('z_h', mu=np.log(p_t), sd=sd_p, observed=np.log(z_t))
# Inference
# start = pm.find_MAP()
# step = pm.NUTS(scaling=start)
# trace = pm.sample(ndraws, start=start, step=step, cores=1, chains=chains)
thetaprior = [H, φ, K, ct, Q, cs]
# convert thetaprior to a tensor vector
theta = tt.as_tensor_variable([H, φ, K, ct, Q, cs])
# use a DensityDist
pm.DensityDist(
'likelihood',
lambda v: logl(v),
observed={'v': theta}
# random=my_model_random
)
trace = pm.sample(ndraws, cores=1, chains=chains)
traces.append(trace)
# plt.figure(figsize=(10, 3))
# plt.subplot(121)
# plt.plot(np.percentile(trace[ph], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$')
# plt.plot(p_t, 'r', label='$p(t)$')
# plt.legend()
#
# plt.subplot(122)
# plt.hist(trace[lam], 30, label='$\hat{\lambda}$', alpha=0.5)
# plt.axvline(porosity_true, color='r', label='$\lambda$', alpha=0.5)
# plt.legend();
#
# plt.figure(figsize=(10, 6))
# plt.subplot(211)
# plt.plot(np.percentile(trace[ph][..., 0], [2.5, 97.5], axis=0).T, 'k', label='$\hat{p}_{95\%}(t)$')
# plt.plot(ps, 'r', label='$p(t)$')
# plt.legend(loc=0)
# plt.subplot(234), plt.hist(trace['Kh']), plt.axvline(K), plt.xlim([1e-13, 1e-11]), plt.title('K')
# plt.subplot(235), plt.hist(trace['φh']), plt.axvline(φ), plt.xlim([0, 1.0]), plt.title('φ')
# plt.subplot(236), plt.hist(trace['Hh']), plt.axvline(m), plt.xlim([50, 100]), plt.title('H')
# plt.tight_layout()
#
# plt.show()
###########################
# Post processing #
###########################
print('Posterior distributions after ' + str(len(traces)) + ' iterations.')
cmap = mpl.cm.autumn
for param in ['K', 'φ', 'H', 'ct', 'Q']:
plt.figure(figsize=(8, 2))
for update_i, trace in enumerate(traces):
samples = trace[param]
smin, smax = np.min(samples), np.max(samples)
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
plt.plot(x, y, color=cmap(1 - update_i / len(traces)))
plt.axvline({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true}[param], c='k')
plt.ylabel('Frequency')
plt.title(param)
plt.tight_layout();
plt.show()
# Stop timing code execution
t2 = time.time()
print("CPU time [s] : ", t2 - t0)
# Stop timing code execution
print("\r\nDone. Post-processing...")
#################### Postprocessing #########################
print('Post processing. Plot 95% CI with seaborn')
cmap = mpl.cm.autumn
plt.figure(figsize=(8, 2))
for node in range(len(pnodelist)):
with open('pnode' + str(node+2) + '.npy', 'wb') as f:
np.save(f, pnodelist[node])
show_seaborn_plot('pnode' + str(node+2) + '.npy', str(node+2))
# plt.legend(str(node+2))
plt.xlabel("t [min]", size=14)
plt.ylabel("p(t) [MPa]", size=14)
plt.tight_layout();
plt.figure(figsize=(8, 2))
for node in range(len(Tnodelist)):
with open('Tnode' + str(node+2) + '.npy', 'wb') as f:
np.save(f, Tnodelist[node])
show_seaborn_plot('Tnode' + str(node+2) + '.npy', str(node+2))
plt.legend(str(node+2))
plt.xlabel("t [min]", size=14)
plt.ylabel("T(t) [K]", size=14)
plt.tight_layout();
# plt.figure(figsize=(8, 2))
# with open('power.npy', 'wb') as f:
# np.save(f, doublet.Phe/1e6)
# show_seaborn_plot('power.npy', 'power output')
# plt.xlabel("t [min]", size=14)
# plt.ylabel("P(t) [MW]", size=14)
plt.show()
# plot 95% CI with seaborn
# with open('pprior.npy', 'wb') as pprior:
# np.save(pprior, sol[0])
#
# show_seaborn_plot('pprior.npy', "p9")
# plt.show()
# with open('pmatrix.npy', 'rb') as f:
# a = np.load(f)
# print("saved solution matrix", a)
# plot 95% CI with seaborn
# with open('pnode9.npy', 'wb') as f9:
# np.save(f9, doublet.pnode9)
#
# with open('pnode8.npy', 'wb') as f8:
# np.save(f8, doublet.pnode8)
# plot_solution(sol, outfile)
# plt.show()
|
[
"pymc3.sample",
"matplotlib.pyplot.title",
"platform.python_version",
"numpy.random.seed",
"arviz.plot_joint",
"arviz.from_pymc3",
"matplotlib.pyplot.figure",
"numpy.mean",
"pymc3.Uniform",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axvline",
"theano.tensor.as_tensor_variable",
"arviz.plot_posterior",
"numpy.max",
"matplotlib.pyplot.rcParams.update",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"pymc3.Model",
"matplotlib.pyplot.show",
"numpy.save",
"scipy.stats.gaussian_kde",
"numpy.min",
"pymc3.traceplot",
"matplotlib.pyplot.ylabel",
"arviz.summary",
"numpy.random.uniform",
"numpy.log",
"matplotlib.pyplot.plot",
"scipy.stats.lognorm",
"time.time",
"matplotlib.pyplot.xlabel"
] |
[((1211, 1222), 'time.time', 'time.time', ([], {}), '()\n', (1220, 1222), False, 'import time\n'), ((21085, 21096), 'time.time', 'time.time', ([], {}), '()\n', (21094, 21096), False, 'import time\n'), ((21351, 21377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 2)'}), '(figsize=(8, 2))\n', (21361, 21377), True, 'import matplotlib.pyplot as plt\n'), ((21604, 21634), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [min]"""'], {'size': '(14)'}), "('t [min]', size=14)\n", (21614, 21634), True, 'import matplotlib.pyplot as plt\n'), ((21635, 21668), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(t) [MPa]"""'], {'size': '(14)'}), "('p(t) [MPa]', size=14)\n", (21645, 21668), True, 'import matplotlib.pyplot as plt\n'), ((21669, 21687), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21685, 21687), True, 'import matplotlib.pyplot as plt\n'), ((21690, 21716), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 2)'}), '(figsize=(8, 2))\n', (21700, 21716), True, 'import matplotlib.pyplot as plt\n'), ((21941, 21971), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [min]"""'], {'size': '(14)'}), "('t [min]', size=14)\n", (21951, 21971), True, 'import matplotlib.pyplot as plt\n'), ((21972, 22003), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T(t) [K]"""'], {'size': '(14)'}), "('T(t) [K]', size=14)\n", (21982, 22003), True, 'import matplotlib.pyplot as plt\n'), ((22004, 22022), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22020, 22022), True, 'import matplotlib.pyplot as plt\n'), ((22243, 22253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22251, 22253), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1471), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * t1steps)', 'Nt'], {}), '(0, 2 * t1steps, Nt)\n', (1451, 1471), True, 'import numpy as np\n'), ((3282, 3336), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 7)', 'tight_layout': '(True)'}), '(1, 1, figsize=(10, 7), tight_layout=True)\n', (3294, 3336), True, 'import matplotlib.pyplot as plt\n'), ((5563, 5598), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'H_true', 's': '(0.01)'}), '(scale=H_true, s=0.01)\n', (5576, 5598), False, 'from scipy import stats\n'), ((5611, 5646), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'φ_true', 's': '(0.01)'}), '(scale=φ_true, s=0.01)\n', (5624, 5646), False, 'from scipy import stats\n'), ((5657, 5692), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'K_true', 's': '(0.01)'}), '(scale=K_true, s=0.01)\n', (5670, 5692), False, 'from scipy import stats\n'), ((5705, 5741), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'ct_true', 's': '(0.01)'}), '(scale=ct_true, s=0.01)\n', (5718, 5741), False, 'from scipy import stats\n'), ((5753, 5788), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'Q_true', 's': '(0.01)'}), '(scale=Q_true, s=0.01)\n', (5766, 5788), False, 'from scipy import stats\n'), ((5801, 5837), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'cs_true', 's': '(0.01)'}), '(scale=cs_true, s=0.01)\n', (5814, 5837), False, 'from scipy import stats\n'), ((6244, 6266), 'numpy.random.seed', 'np.random.seed', (['(716742)'], {}), '(716742)\n', (6258, 6266), True, 'import numpy as np\n'), ((6578, 6609), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['parameters'], {}), '(parameters)\n', (6597, 6609), True, 'import matplotlib.pyplot as plt\n'), ((6614, 6641), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (6624, 6641), True, 'import matplotlib.pyplot as plt\n'), ((6868, 6886), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6884, 6886), True, 'import matplotlib.pyplot as plt\n'), ((6892, 6902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6900, 6902), True, 'import matplotlib.pyplot as plt\n'), ((10744, 10910), 'pymc3.traceplot', 'pm.traceplot', (['trace'], {'lines': "(('K', {}, [K_true]), ('φ', {}, [φ_true]), ('H', {}, [H_true]), ('ct', {},\n [ct_true]), ('Q', {}, [Q_true]), ('cs', {}, [cs_true]))"}), "(trace, lines=(('K', {}, [K_true]), ('φ', {}, [φ_true]), ('H',\n {}, [H_true]), ('ct', {}, [ct_true]), ('Q', {}, [Q_true]), ('cs', {}, [\n cs_true])))\n", (10756, 10910), True, 'import pymc3 as pm\n'), ((12287, 12313), 'arviz.from_pymc3', 'az.from_pymc3', ([], {'trace': 'trace'}), '(trace=trace)\n', (12300, 12313), True, 'import arviz as az\n'), ((12328, 12385), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['K']", 'kind': '"""hist"""'}), "(data_spp, var_names=['K'], kind='hist')\n", (12345, 12385), True, 'import arviz as az\n'), ((12401, 12458), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['φ']", 'kind': '"""hist"""'}), "(data_spp, var_names=['φ'], kind='hist')\n", (12418, 12458), True, 'import arviz as az\n'), ((12472, 12529), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['H']", 'kind': '"""hist"""'}), "(data_spp, var_names=['H'], kind='hist')\n", (12489, 12529), True, 'import arviz as az\n'), ((12544, 12601), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['Q']", 'kind': '"""hist"""'}), "(data_spp, var_names=['Q'], kind='hist')\n", (12561, 12601), True, 'import arviz as az\n'), ((12617, 12675), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['ct']", 'kind': '"""hist"""'}), "(data_spp, var_names=['ct'], kind='hist')\n", (12634, 12675), True, 'import arviz as az\n'), ((12691, 12749), 'arviz.plot_posterior', 'az.plot_posterior', (['data_spp'], {'var_names': "['cs']", 'kind': '"""hist"""'}), "(data_spp, var_names=['cs'], kind='hist')\n", (12708, 12749), True, 'import arviz as az\n'), ((12766, 12840), 'arviz.plot_joint', 'az.plot_joint', (['data_spp'], {'var_names': "['K', 'φ']", 'kind': '"""kde"""', 'fill_last': '(False)'}), "(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False)\n", (12779, 12840), True, 'import arviz as az\n'), ((12946, 12956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12954, 12956), True, 'import matplotlib.pyplot as plt\n'), ((13407, 13417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13415, 13417), True, 'import matplotlib.pyplot as plt\n'), ((21014, 21032), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21030, 21032), True, 'import matplotlib.pyplot as plt\n'), ((21039, 21049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21047, 21049), True, 'import matplotlib.pyplot as plt\n'), ((389, 414), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (412, 414), False, 'import platform\n'), ((4242, 4288), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(10)', 'high': '(100)', 'size': 'size'}), '(low=10, high=100, size=size)\n', (4259, 4288), True, 'import numpy as np\n'), ((4354, 4401), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.3)', 'high': '(0.5)', 'size': 'size'}), '(low=0.3, high=0.5, size=size)\n', (4371, 4401), True, 'import numpy as np\n'), ((4423, 4466), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(3)', 'high': '(5)', 'size': 'size'}), '(low=3, high=5, size=size)\n', (4440, 4466), True, 'import numpy as np\n'), ((4480, 4531), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1e-05)', 'high': '(3e-05)', 'size': 'size'}), '(low=1e-05, high=3e-05, size=size)\n', (4497, 4531), True, 'import numpy as np\n'), ((4666, 4687), 'numpy.mean', 'np.mean', (['permeability'], {}), '(permeability)\n', (4673, 4687), True, 'import numpy as np\n'), ((6342, 6360), 'numpy.mean', 'np.mean', (['truemodel'], {}), '(truemodel)\n', (6349, 6360), True, 'import numpy as np\n'), ((6669, 6730), 'matplotlib.pyplot.plot', 'plt.plot', (['(truemodel / MPA)', '"""k"""'], {'label': '"""$p_{true}$"""', 'alpha': '(0.5)'}), "(truemodel / MPA, 'k', label='$p_{true}$', alpha=0.5)\n", (6677, 6730), True, 'import matplotlib.pyplot as plt\n'), ((6730, 6796), 'matplotlib.pyplot.plot', 'plt.plot', (['(data / MPA)', '"""r"""'], {'label': '"""$σ_{noise} = 1.0e-2$"""', 'alpha': '(0.5)'}), "(data / MPA, 'r', label='$σ_{noise} = 1.0e-2$', alpha=0.5)\n", (6738, 6796), True, 'import matplotlib.pyplot as plt\n'), ((6801, 6825), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p(t) [MPa]"""'], {}), "('p(t) [MPa]')\n", (6811, 6825), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [min]"""'], {}), "('t [min]')\n", (6837, 6848), True, 'import matplotlib.pyplot as plt\n'), ((8540, 8550), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (8548, 8550), True, 'import pymc3 as pm\n'), ((9437, 9473), 'pymc3.Uniform', 'pm.Uniform', (['"""H"""'], {'lower': '(35)', 'upper': '(105)'}), "('H', lower=35, upper=105)\n", (9447, 9473), True, 'import pymc3 as pm\n'), ((9490, 9527), 'pymc3.Uniform', 'pm.Uniform', (['"""φ"""'], {'lower': '(0.1)', 'upper': '(0.3)'}), "('φ', lower=0.1, upper=0.3)\n", (9500, 9527), True, 'import pymc3 as pm\n'), ((9542, 9585), 'pymc3.Uniform', 'pm.Uniform', (['"""K"""'], {'lower': '(5e-14)', 'upper': '(1.5e-13)'}), "('K', lower=5e-14, upper=1.5e-13)\n", (9552, 9585), True, 'import pymc3 as pm\n'), ((9604, 9648), 'pymc3.Uniform', 'pm.Uniform', (['"""ct"""'], {'lower': '(5e-11)', 'upper': '(1.5e-10)'}), "('ct', lower=5e-11, upper=1.5e-10)\n", (9614, 9648), True, 'import pymc3 as pm\n'), ((9666, 9707), 'pymc3.Uniform', 'pm.Uniform', (['"""Q"""'], {'lower': '(0.035)', 'upper': '(0.105)'}), "('Q', lower=0.035, upper=0.105)\n", (9676, 9707), True, 'import pymc3 as pm\n'), ((9724, 9764), 'pymc3.Uniform', 'pm.Uniform', (['"""cs"""'], {'lower': '(1325)', 'upper': '(3975)'}), "('cs', lower=1325, upper=3975)\n", (9734, 9764), True, 'import pymc3 as pm\n'), ((10272, 10333), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['[Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf]'], {}), '([Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf])\n', (10293, 10333), True, 'import theano.tensor as tt\n'), ((10581, 10667), 'pymc3.sample', 'pm.sample', (['ndraws'], {'cores': '(1)', 'chains': 'chains', 'tune': 'nburn', 'discard_tuned_samples': '(True)'}), '(ndraws, cores=1, chains=chains, tune=nburn, discard_tuned_samples\n =True)\n', (10590, 10667), True, 'import pymc3 as pm\n'), ((13428, 13438), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (13436, 13438), True, 'import pymc3 as pm\n'), ((20520, 20546), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 2)'}), '(figsize=(8, 2))\n', (20530, 20546), True, 'import matplotlib.pyplot as plt\n'), ((20857, 20955), 'matplotlib.pyplot.axvline', 'plt.axvline', (["{'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true}[param]"], {'c': '"""k"""'}), "({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q':\n Q_true}[param], c='k')\n", (20868, 20955), True, 'import matplotlib.pyplot as plt\n'), ((20960, 20983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (20970, 20983), True, 'import matplotlib.pyplot as plt\n'), ((20992, 21008), 'matplotlib.pyplot.title', 'plt.title', (['param'], {}), '(param)\n', (21001, 21008), True, 'import matplotlib.pyplot as plt\n'), ((21479, 21506), 'numpy.save', 'np.save', (['f', 'pnodelist[node]'], {}), '(f, pnodelist[node])\n', (21486, 21506), True, 'import numpy as np\n'), ((21818, 21845), 'numpy.save', 'np.save', (['f', 'Tnodelist[node]'], {}), '(f, Tnodelist[node])\n', (21825, 21845), True, 'import numpy as np\n'), ((10704, 10733), 'arviz.summary', 'az.summary', (['trace'], {'round_to': '(2)'}), '(trace, round_to=2)\n', (10714, 10733), True, 'import arviz as az\n'), ((17463, 17473), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (17471, 17473), True, 'import pymc3 as pm\n'), ((18742, 18785), 'theano.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['[H, φ, K, ct, Q, cs]'], {}), '([H, φ, K, ct, Q, cs])\n', (18763, 18785), True, 'import theano.tensor as tt\n'), ((19025, 19066), 'pymc3.sample', 'pm.sample', (['ndraws'], {'cores': '(1)', 'chains': 'chains'}), '(ndraws, cores=1, chains=chains)\n', (19034, 19066), True, 'import pymc3 as pm\n'), ((20706, 20734), 'numpy.linspace', 'np.linspace', (['smin', 'smax', '(100)'], {}), '(smin, smax, 100)\n', (20717, 20734), True, 'import numpy as np\n'), ((14349, 14360), 'numpy.log', 'np.log', (['μ_H'], {}), '(μ_H)\n', (14355, 14360), True, 'import numpy as np\n'), ((14409, 14420), 'numpy.log', 'np.log', (['μ_φ'], {}), '(μ_φ)\n', (14415, 14420), True, 'import numpy as np\n'), ((14538, 14550), 'numpy.log', 'np.log', (['μ_ct'], {}), '(μ_ct)\n', (14544, 14550), True, 'import numpy as np\n'), ((14598, 14609), 'numpy.log', 'np.log', (['μ_Q'], {}), '(μ_Q)\n', (14604, 14609), True, 'import numpy as np\n'), ((14658, 14670), 'numpy.log', 'np.log', (['μ_cs'], {}), '(μ_cs)\n', (14664, 14670), True, 'import numpy as np\n'), ((20657, 20672), 'numpy.min', 'np.min', (['samples'], {}), '(samples)\n', (20663, 20672), True, 'import numpy as np\n'), ((20674, 20689), 'numpy.max', 'np.max', (['samples'], {}), '(samples)\n', (20680, 20689), True, 'import numpy as np\n'), ((20751, 20778), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['samples'], {}), '(samples)\n', (20769, 20778), False, 'from scipy import stats\n')]
|
# Some implementation details regarding PyBullet:
#
# PyBullet's IK solver uses damped least squares (DLS) optimization. This is commonly
# known as Levenberg-Marquardt (LM) optimization.
from __future__ import annotations
import dataclasses
from typing import NamedTuple, Optional
import numpy as np
import pybullet as p
from dm_robotics.geometry.geometry import Pose
from dm_robotics.transformations import transformations as tr
from coffee.client import BulletClient, ClientConfig, ConnectionMode
from coffee.joints import Joints
from coffee.structs import LinkState
from coffee.utils import geometry_utils
class IKSolution(NamedTuple):
"""An IK solution returned by the IKSolver.
Attributes:
qpos: The joint configuration.
linear_err: The linear error between the solved pose and the target pose.
angular_err: The angular error between the solved pose and the target pose.
"""
qpos: np.ndarray
linear_err: float
angular_err: float
@dataclasses.dataclass
class IKSolver:
"""Inverse kinematics solver.
Computes a joint configuration that brings an element (in a kinematic chain) to a
desired pose.
"""
pb_client: BulletClient
joints: Joints
ik_point_joint_id: int
joint_damping: float = 0.0
nullspace_reference: Optional[np.ndarray] = None
def __post_init__(self) -> None:
if self.nullspace_reference is None:
self.nullspace_reference = 0.5 * np.sum(self.joints.joints_range, axis=1)
# Dirty hack to get around pybullet's lack of support for computing FK given a
# joint configuration as an argument.
# See: https://github.com/bulletphysics/bullet3/issues/2603
self._shadow_client = BulletClient.create(
mode=ConnectionMode.DIRECT,
config=ClientConfig(),
)
manipulator_kwargs = self.pb_client._body_cache[self.joints.body_id]
shadow_body_id = self._shadow_client.load_urdf(**manipulator_kwargs)
# Make sure the shadow robot is in the same world pose as the actual one.
pos, quat = self.pb_client.getBasePositionAndOrientation(self.joints.body_id)
self._shadow_client.resetBasePositionAndOrientation(
bodyUniqueId=shadow_body_id,
posObj=pos,
ornObj=quat,
)
self._shadow_joints = Joints.from_body_id(shadow_body_id, self._shadow_client)
def solve(
self,
ref_pose: Pose,
linear_tol: float = 1e-3,
angular_tol: float = 1e-3,
max_steps: int = 100,
num_attempts: int = 50,
stop_on_first_successful_attempt: bool = False,
inital_joint_configuration: Optional[np.ndarray] = None,
nullspace_reference: Optional[np.ndarray] = None,
verbose: bool = False,
) -> Optional[np.ndarray]:
"""Attempts to solve the inverse kinematics problem.
This method computes a joint configuration that solves the IK problem. It
returns None if no solution is found. If multiple solutions are
found, it will return the one where the joints are closer to the
`nullspace_reference`. If no `nullspace_reference is provided, it will use the
center of the joint ranges as reference.
Args:
ref_pose: Target pose of the controlled element in Cartesian world frame.
linear_tol: The linear tolerance, in meters, that determines if the solution
found is valid.
angular_tol: The angular tolerance, in radians, that determines if the
solution found is valid.
max_steps:
num_attempts: The number of different attempts the solver should do. For a
given target pose, there exists an infinite number of possible
solutions, having more attempts allows to compare different joint
configurations. The solver will return the solution where the joints are
closer to the `nullspace_reference`. Note that not all attempts are
successful, and thus, having more attempts gives better chances of
finding a correct solution.
stop_on_first_successful_attempt: If true, the method will return the
first solution that meets the tolerance criteria. If false, returns the
solution where the joints are closer to `nullspace_reference`.
inital_joint_configuration: A joint configuration that will be used for
the first attempt. This can be useful in the case of a complex pose,
a user could provide the initial guess that is close to the desired
solution. If None, all the joints will be set to 0 for the first
attempt.
nullspace_reference: The desired joint configuration that is set as the
nullspace goal. When the controlled element is in the desired pose, the
solver will try and bring the joint configuration closer to the
nullspace reference without moving the element. If no nullspace
reference is provided, the center of the joint ranges is used as
reference. Can be overriden in the `solve` method.
Returns:
The corresponding joint configuration if a solution is found, else None.
Raises:
ValueError: If the `nullspace_reference` does not have the correct length.
ValueError: If the `inital_joint_configuration` does not have the correct
length.
"""
if nullspace_reference is None:
nullspace_reference = self.nullspace_reference
else:
if len(nullspace_reference) != self.joints.dof:
raise ValueError("nullspace_reference has an invalid length.")
if inital_joint_configuration is None:
inital_joint_configuration = self.joints.zeros_array()
else:
inital_joint_configuration = np.array(inital_joint_configuration)
if len(inital_joint_configuration) != self.joints.dof:
raise ValueError("inital_joint_configuration has an invalid length.")
nullspace_jnt_qpos_min_err = np.inf
sol_qpos = None
success = False
# Each iteration of this loop attempts to solve the inverse kinematics.
# If a solution is found, it is compared to previous solutions.
for attempt in range(num_attempts):
# Use the user provided joint configuration for the first attempt.
if attempt == 0:
qpos_new = inital_joint_configuration
else:
# Randomize the initial joint configuration so that the IK can find
# a different solution.
qpos_new = np.random.uniform(
low=self.joints.joints_lower_limit,
high=self.joints.joints_upper_limit,
)
# Reset the joints to this configuration.
for i, joint_id in enumerate(self._shadow_joints.controllable_joints):
self._shadow_client.resetJointState(
self._shadow_joints.body_id,
joint_id,
qpos_new[i],
)
# Solve the IK.
joint_qpos, linear_err, angular_err = self._solve_ik(
ref_pose,
max_steps,
verbose,
)
# Check if the attempt was successful. The solution is saved if the
# joints are closer to the nullspace reference.
if linear_err <= linear_tol and angular_err <= angular_tol:
success = True
nullspace_jnt_qpos_err = float(
np.linalg.norm(joint_qpos - nullspace_reference)
)
if nullspace_jnt_qpos_err < nullspace_jnt_qpos_min_err:
nullspace_jnt_qpos_min_err = nullspace_jnt_qpos_err
sol_qpos = joint_qpos
if verbose:
print(
f"attempt: {attempt} "
f"- nullspace_jnt_qpos_min_err: {nullspace_jnt_qpos_min_err:.4f} "
f"- success: {success}"
)
if success and stop_on_first_successful_attempt:
break
if not success:
print(f"Unable to solve inverse kinematics for ref_pose: {ref_pose}")
else:
if verbose:
print(f"Found a solution in {attempt} attempts.")
return sol_qpos
def _solve_ik(
self,
ref_pose: Pose,
max_steps: int,
verbose: bool,
) -> IKSolution:
"""Finds a joint configuration that brings element pose to target pose."""
try:
qpos = self._shadow_client.calculateInverseKinematics(
bodyUniqueId=self._shadow_joints.body_id,
endEffectorLinkIndex=self.ik_point_joint_id,
targetPosition=ref_pose.position,
targetOrientation=geometry_utils.as_quaternion_xyzw(
ref_pose.quaternion
),
residualThreshold=1e-5,
maxNumIterations=max_steps,
jointDamping=self._shadow_joints.const_array(
self.joint_damping
).tolist(),
)
if np.isnan(np.sum(qpos)):
qpos = None
else:
# Clip to joint limits.
qpos = np.clip(
a=qpos,
a_min=self._shadow_joints.joints_lower_limit,
a_max=self._shadow_joints.joints_upper_limit,
)
except p.error as e:
if verbose:
print(f"IK failed with error message: {e}")
qpos = None
# If we were unable to find a solution, exit early.
if qpos is None:
return IKSolution(np.empty(self._shadow_joints.dof), np.inf, np.inf)
# If we found a solution, we compute its associated pose and compare with the
# target pose. We do this by first using forward kinematics to compute the
# pose of the controlled element associated with the solution and then computing
# linear and angular errors.
# Forward kinematics.
for i, joint_id in enumerate(self._shadow_joints.controllable_joints):
self._shadow_client.resetJointState(
self._shadow_joints.body_id,
joint_id,
qpos[i],
)
cur_pose = self.forward_kinematics(shadow=True)
# Error computation.
linear_err = float(np.linalg.norm(ref_pose.position - cur_pose.position))
err_quat = tr.quat_diff_active(ref_pose.quaternion, cur_pose.quaternion)
err_axis_angle = tr.quat_to_axisangle(err_quat)
angular_err = float(np.linalg.norm(err_axis_angle))
return IKSolution(np.array(qpos), linear_err, angular_err)
def forward_kinematics(self, shadow: bool = False) -> Pose:
if shadow:
eef_link_state = LinkState(
*self._shadow_client.getLinkState(
bodyUniqueId=self._shadow_joints.body_id,
linkIndex=self.ik_point_joint_id,
computeLinkVelocity=0,
computeForwardKinematics=True,
)
)
else:
eef_link_state = LinkState(
*self.pb_client.getLinkState(
bodyUniqueId=self.joints.body_id,
linkIndex=self.ik_point_joint_id,
computeLinkVelocity=0,
computeForwardKinematics=True,
)
)
return Pose(
position=eef_link_state.link_world_position,
quaternion=geometry_utils.as_quaternion_wxyz(
eef_link_state.link_world_orientation
),
)
|
[
"numpy.random.uniform",
"coffee.client.ClientConfig",
"numpy.sum",
"coffee.joints.Joints.from_body_id",
"numpy.empty",
"dm_robotics.transformations.transformations.quat_diff_active",
"numpy.clip",
"coffee.utils.geometry_utils.as_quaternion_wxyz",
"coffee.utils.geometry_utils.as_quaternion_xyzw",
"numpy.array",
"numpy.linalg.norm",
"dm_robotics.transformations.transformations.quat_to_axisangle"
] |
[((2359, 2415), 'coffee.joints.Joints.from_body_id', 'Joints.from_body_id', (['shadow_body_id', 'self._shadow_client'], {}), '(shadow_body_id, self._shadow_client)\n', (2378, 2415), False, 'from coffee.joints import Joints\n'), ((10821, 10882), 'dm_robotics.transformations.transformations.quat_diff_active', 'tr.quat_diff_active', (['ref_pose.quaternion', 'cur_pose.quaternion'], {}), '(ref_pose.quaternion, cur_pose.quaternion)\n', (10840, 10882), True, 'from dm_robotics.transformations import transformations as tr\n'), ((10908, 10938), 'dm_robotics.transformations.transformations.quat_to_axisangle', 'tr.quat_to_axisangle', (['err_quat'], {}), '(err_quat)\n', (10928, 10938), True, 'from dm_robotics.transformations import transformations as tr\n'), ((6023, 6059), 'numpy.array', 'np.array', (['inital_joint_configuration'], {}), '(inital_joint_configuration)\n', (6031, 6059), True, 'import numpy as np\n'), ((10747, 10800), 'numpy.linalg.norm', 'np.linalg.norm', (['(ref_pose.position - cur_pose.position)'], {}), '(ref_pose.position - cur_pose.position)\n', (10761, 10800), True, 'import numpy as np\n'), ((10967, 10997), 'numpy.linalg.norm', 'np.linalg.norm', (['err_axis_angle'], {}), '(err_axis_angle)\n', (10981, 10997), True, 'import numpy as np\n'), ((11026, 11040), 'numpy.array', 'np.array', (['qpos'], {}), '(qpos)\n', (11034, 11040), True, 'import numpy as np\n'), ((1466, 1506), 'numpy.sum', 'np.sum', (['self.joints.joints_range'], {'axis': '(1)'}), '(self.joints.joints_range, axis=1)\n', (1472, 1506), True, 'import numpy as np\n'), ((1819, 1833), 'coffee.client.ClientConfig', 'ClientConfig', ([], {}), '()\n', (1831, 1833), False, 'from coffee.client import BulletClient, ClientConfig, ConnectionMode\n'), ((6834, 6929), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.joints.joints_lower_limit', 'high': 'self.joints.joints_upper_limit'}), '(low=self.joints.joints_lower_limit, high=self.joints.\n joints_upper_limit)\n', (6851, 6929), True, 'import numpy as np\n'), ((9454, 9466), 'numpy.sum', 'np.sum', (['qpos'], {}), '(qpos)\n', (9460, 9466), True, 'import numpy as np\n'), ((9578, 9690), 'numpy.clip', 'np.clip', ([], {'a': 'qpos', 'a_min': 'self._shadow_joints.joints_lower_limit', 'a_max': 'self._shadow_joints.joints_upper_limit'}), '(a=qpos, a_min=self._shadow_joints.joints_lower_limit, a_max=self.\n _shadow_joints.joints_upper_limit)\n', (9585, 9690), True, 'import numpy as np\n'), ((10018, 10051), 'numpy.empty', 'np.empty', (['self._shadow_joints.dof'], {}), '(self._shadow_joints.dof)\n', (10026, 10051), True, 'import numpy as np\n'), ((11920, 11992), 'coffee.utils.geometry_utils.as_quaternion_wxyz', 'geometry_utils.as_quaternion_wxyz', (['eef_link_state.link_world_orientation'], {}), '(eef_link_state.link_world_orientation)\n', (11953, 11992), False, 'from coffee.utils import geometry_utils\n'), ((7804, 7852), 'numpy.linalg.norm', 'np.linalg.norm', (['(joint_qpos - nullspace_reference)'], {}), '(joint_qpos - nullspace_reference)\n', (7818, 7852), True, 'import numpy as np\n'), ((9109, 9163), 'coffee.utils.geometry_utils.as_quaternion_xyzw', 'geometry_utils.as_quaternion_xyzw', (['ref_pose.quaternion'], {}), '(ref_pose.quaternion)\n', (9142, 9163), False, 'from coffee.utils import geometry_utils\n')]
|
import os
import json
import shutil
import argparse
import numpy as np
from PIL import Image
def getSeqInfo(dataset_dir, seq):
ann_dir = os.path.join(dataset_dir, 'Annotations', '480p')
seq_path = os.path.join(ann_dir, seq)
frame_list = os.listdir(seq_path)
frame_num = len(frame_list)
frames = os.listdir(os.path.join(ann_dir, seq))
masks = np.stack([np.array(Image.open(os.path.join(ann_dir, seq, f)).convert('P'), dtype=np.uint8) for f in frames])
img_size = [masks.shape[1], masks.shape[0]]
obj_ids = np.delete(np.unique(masks), 0)
return frame_num, img_size, len(obj_ids)
def create_json(root_dir):
val_txt_dst = os.path.join(root_dir, 'ImageSets', '2017', 'val.txt')
with open(val_txt_dst, 'r') as f:
val_seqs = f.readlines()
f.close()
val_seqs = list(map(lambda elem: elem.strip(), val_seqs))
# create davis.json
'''Generate global json'''
json_dict = dict()
json_dict['attributes'] = []
json_dict['sets'] = ["train", "val"]
json_dict['years'] = [2018]
json_dict['sequences'] = dict()
for idx, seq in enumerate(val_seqs):
seq = seq.strip()
seq_dict = {'attributes': [], 'eval_t': True, 'name': seq, 'set': 'val', 'year': 2018, 'num_scribbles': 3}
seq_dict['num_frames'], seq_dict['image_size'], seq_dict['num_objects'] = getSeqInfo(root_dir, seq)
json_dict['sequences'][seq] = seq_dict
print(f'valid: {idx+1}')
global_json_path = os.path.join(root_dir, 'scb_ytbvos.json')
with open(global_json_path, 'wt') as f:
json.dump(json_dict, f, indent=2, separators=(',', ': '))
def create_dataset(src_ytbvos_path, dst_ytbvos_path, scb_ytbvos_path):
if os.path.exists(src_ytbvos_path):
os.makedirs(dst_ytbvos_path, exist_ok=True)
# set youtube original path
src_dir_JPEGImages = os.path.join(src_ytbvos_path, 'train', 'JPEGImages')
src_dir_Annotations = os.path.join(src_ytbvos_path, 'train', 'CleanedAnnotations')
# set youtube davis-like path
dst_dir_ImageSets = os.path.join(dst_ytbvos_path, 'ImageSets', '2017')
dst_dir_JPEGImages = os.path.join(dst_ytbvos_path, 'JPEGImages', '480p')
dst_dir_Annotations = os.path.join(dst_ytbvos_path, 'Annotations', '480p')
dst_dir_Scribbles = os.path.join(dst_ytbvos_path, 'Scribbles')
if os.path.isdir(src_dir_JPEGImages) and os.path.isdir(src_dir_Annotations) and os.path.isdir(scb_ytbvos_path):
# load sequence list
assert len(os.listdir(src_dir_JPEGImages)) == len(os.listdir(src_dir_Annotations))
with open(os.path.join(scb_ytbvos_path, 'val.txt'), 'r') as f:
seqs_list = f.readlines()
f.close()
seqs_list = list(map(lambda elem: elem.strip(), seqs_list))
else:
if not os.path.isdir(src_dir_JPEGImages): print(f"{src_dir_JPEGImages} is not found in {src_ytbvos_path}")
if not os.path.isdir(src_dir_Annotations): print(f"{src_dir_Annotations} is not found in {src_ytbvos_path}")
if not os.path.isdir(scb_ytbvos_path): print(f"{scb_ytbvos_path} is not found")
return
# create dist dirs
os.makedirs(dst_dir_ImageSets, exist_ok=True)
os.makedirs(dst_dir_JPEGImages, exist_ok=True)
os.makedirs(dst_dir_Annotations, exist_ok=True)
os.makedirs(dst_dir_Scribbles, exist_ok=True)
# --- copy files ---
# ImageSets
shutil.copyfile(os.path.join(scb_ytbvos_path, 'val.txt'), os.path.join(dst_dir_ImageSets, 'val.txt'))
len_seq = []
for i, seq in enumerate(seqs_list):
print(f"validation set {i+1}")
# JPEGImages
src_dir_JPEGImages_seq = os.path.join(src_dir_JPEGImages, seq)
dst_dir_JPEGImages_seq = os.path.join(dst_dir_JPEGImages, seq)
os.makedirs(dst_dir_JPEGImages_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_JPEGImages_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_JPEGImages_seq, file)
dst_path = os.path.join(dst_dir_JPEGImages_seq, f"{str(j).zfill(5)}.jpg")
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# if not os.path.exists(dst_path): os.symlink(src_path, dst_path)
# Annotations
src_dir_Annotations_seq = os.path.join(src_dir_Annotations, seq)
dst_dir_Annotations_seq = os.path.join(dst_dir_Annotations, seq)
os.makedirs(dst_dir_Annotations_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_Annotations_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_Annotations_seq, file)
dst_path = os.path.join(dst_dir_Annotations_seq, f"{str(j).zfill(5)}.png")
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# if not os.path.exists(dst_path): os.symlink(src_path, dst_path)
# Scribbles
src_dir_Scribbles_seq = os.path.join(scb_ytbvos_path, seq)
dst_dir_Scribbles_seq = os.path.join(dst_dir_Scribbles, seq)
os.makedirs(dst_dir_Scribbles_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_Scribbles_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_Scribbles_seq, file)
dst_path = os.path.join(dst_dir_Scribbles_seq, file)
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# statistic
file_name = np.sort(os.listdir(src_dir_JPEGImages_seq))
len_seq.append(len(file_name))
# create sequences information
create_json(dst_ytbvos_path)
print(f"done")
else:
print(f"{src_ytbvos_path} not existed")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src', type=str, required=True)
parser.add_argument('--scb', type=str, required=True)
parser.add_argument('--dst', type=str, default='data/Scribble_Youtube_VOS')
args = parser.parse_args()
src_ytbvos_path = args.src
dst_ytbvos_path = args.dst
scb_ytbvos_path = args.scb
create_dataset(src_ytbvos_path, dst_ytbvos_path, scb_ytbvos_path)
if __name__ == '__main__':
main()
|
[
"json.dump",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"shutil.copyfile",
"os.path.join",
"os.listdir",
"numpy.unique"
] |
[((143, 191), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Annotations"""', '"""480p"""'], {}), "(dataset_dir, 'Annotations', '480p')\n", (155, 191), False, 'import os\n'), ((207, 233), 'os.path.join', 'os.path.join', (['ann_dir', 'seq'], {}), '(ann_dir, seq)\n', (219, 233), False, 'import os\n'), ((251, 271), 'os.listdir', 'os.listdir', (['seq_path'], {}), '(seq_path)\n', (261, 271), False, 'import os\n'), ((664, 718), 'os.path.join', 'os.path.join', (['root_dir', '"""ImageSets"""', '"""2017"""', '"""val.txt"""'], {}), "(root_dir, 'ImageSets', '2017', 'val.txt')\n", (676, 718), False, 'import os\n'), ((1486, 1527), 'os.path.join', 'os.path.join', (['root_dir', '"""scb_ytbvos.json"""'], {}), "(root_dir, 'scb_ytbvos.json')\n", (1498, 1527), False, 'import os\n'), ((1719, 1750), 'os.path.exists', 'os.path.exists', (['src_ytbvos_path'], {}), '(src_ytbvos_path)\n', (1733, 1750), False, 'import os\n'), ((5977, 6002), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6000, 6002), False, 'import argparse\n'), ((329, 355), 'os.path.join', 'os.path.join', (['ann_dir', 'seq'], {}), '(ann_dir, seq)\n', (341, 355), False, 'import os\n'), ((550, 566), 'numpy.unique', 'np.unique', (['masks'], {}), '(masks)\n', (559, 566), True, 'import numpy as np\n'), ((1580, 1637), 'json.dump', 'json.dump', (['json_dict', 'f'], {'indent': '(2)', 'separators': "(',', ': ')"}), "(json_dict, f, indent=2, separators=(',', ': '))\n", (1589, 1637), False, 'import json\n'), ((1760, 1803), 'os.makedirs', 'os.makedirs', (['dst_ytbvos_path'], {'exist_ok': '(True)'}), '(dst_ytbvos_path, exist_ok=True)\n', (1771, 1803), False, 'import os\n'), ((1870, 1922), 'os.path.join', 'os.path.join', (['src_ytbvos_path', '"""train"""', '"""JPEGImages"""'], {}), "(src_ytbvos_path, 'train', 'JPEGImages')\n", (1882, 1922), False, 'import os\n'), ((1953, 2013), 'os.path.join', 'os.path.join', (['src_ytbvos_path', '"""train"""', '"""CleanedAnnotations"""'], {}), "(src_ytbvos_path, 'train', 'CleanedAnnotations')\n", (1965, 2013), False, 'import os\n'), ((2081, 2131), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""ImageSets"""', '"""2017"""'], {}), "(dst_ytbvos_path, 'ImageSets', '2017')\n", (2093, 2131), False, 'import os\n'), ((2161, 2212), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""JPEGImages"""', '"""480p"""'], {}), "(dst_ytbvos_path, 'JPEGImages', '480p')\n", (2173, 2212), False, 'import os\n'), ((2243, 2295), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""Annotations"""', '"""480p"""'], {}), "(dst_ytbvos_path, 'Annotations', '480p')\n", (2255, 2295), False, 'import os\n'), ((2324, 2366), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""Scribbles"""'], {}), "(dst_ytbvos_path, 'Scribbles')\n", (2336, 2366), False, 'import os\n'), ((3229, 3274), 'os.makedirs', 'os.makedirs', (['dst_dir_ImageSets'], {'exist_ok': '(True)'}), '(dst_dir_ImageSets, exist_ok=True)\n', (3240, 3274), False, 'import os\n'), ((3283, 3329), 'os.makedirs', 'os.makedirs', (['dst_dir_JPEGImages'], {'exist_ok': '(True)'}), '(dst_dir_JPEGImages, exist_ok=True)\n', (3294, 3329), False, 'import os\n'), ((3338, 3385), 'os.makedirs', 'os.makedirs', (['dst_dir_Annotations'], {'exist_ok': '(True)'}), '(dst_dir_Annotations, exist_ok=True)\n', (3349, 3385), False, 'import os\n'), ((3394, 3439), 'os.makedirs', 'os.makedirs', (['dst_dir_Scribbles'], {'exist_ok': '(True)'}), '(dst_dir_Scribbles, exist_ok=True)\n', (3405, 3439), False, 'import os\n'), ((2379, 2412), 'os.path.isdir', 'os.path.isdir', (['src_dir_JPEGImages'], {}), '(src_dir_JPEGImages)\n', (2392, 2412), False, 'import os\n'), ((2417, 2451), 'os.path.isdir', 'os.path.isdir', (['src_dir_Annotations'], {}), '(src_dir_Annotations)\n', (2430, 2451), False, 'import os\n'), ((2456, 2486), 'os.path.isdir', 'os.path.isdir', (['scb_ytbvos_path'], {}), '(scb_ytbvos_path)\n', (2469, 2486), False, 'import os\n'), ((3514, 3554), 'os.path.join', 'os.path.join', (['scb_ytbvos_path', '"""val.txt"""'], {}), "(scb_ytbvos_path, 'val.txt')\n", (3526, 3554), False, 'import os\n'), ((3556, 3598), 'os.path.join', 'os.path.join', (['dst_dir_ImageSets', '"""val.txt"""'], {}), "(dst_dir_ImageSets, 'val.txt')\n", (3568, 3598), False, 'import os\n'), ((3771, 3808), 'os.path.join', 'os.path.join', (['src_dir_JPEGImages', 'seq'], {}), '(src_dir_JPEGImages, seq)\n', (3783, 3808), False, 'import os\n'), ((3846, 3883), 'os.path.join', 'os.path.join', (['dst_dir_JPEGImages', 'seq'], {}), '(dst_dir_JPEGImages, seq)\n', (3858, 3883), False, 'import os\n'), ((3896, 3946), 'os.makedirs', 'os.makedirs', (['dst_dir_JPEGImages_seq'], {'exist_ok': '(True)'}), '(dst_dir_JPEGImages_seq, exist_ok=True)\n', (3907, 3946), False, 'import os\n'), ((4456, 4494), 'os.path.join', 'os.path.join', (['src_dir_Annotations', 'seq'], {}), '(src_dir_Annotations, seq)\n', (4468, 4494), False, 'import os\n'), ((4533, 4571), 'os.path.join', 'os.path.join', (['dst_dir_Annotations', 'seq'], {}), '(dst_dir_Annotations, seq)\n', (4545, 4571), False, 'import os\n'), ((4584, 4635), 'os.makedirs', 'os.makedirs', (['dst_dir_Annotations_seq'], {'exist_ok': '(True)'}), '(dst_dir_Annotations_seq, exist_ok=True)\n', (4595, 4635), False, 'import os\n'), ((5144, 5178), 'os.path.join', 'os.path.join', (['scb_ytbvos_path', 'seq'], {}), '(scb_ytbvos_path, seq)\n', (5156, 5178), False, 'import os\n'), ((5215, 5251), 'os.path.join', 'os.path.join', (['dst_dir_Scribbles', 'seq'], {}), '(dst_dir_Scribbles, seq)\n', (5227, 5251), False, 'import os\n'), ((5264, 5313), 'os.makedirs', 'os.makedirs', (['dst_dir_Scribbles_seq'], {'exist_ok': '(True)'}), '(dst_dir_Scribbles_seq, exist_ok=True)\n', (5275, 5313), False, 'import os\n'), ((2861, 2894), 'os.path.isdir', 'os.path.isdir', (['src_dir_JPEGImages'], {}), '(src_dir_JPEGImages)\n', (2874, 2894), False, 'import os\n'), ((2980, 3014), 'os.path.isdir', 'os.path.isdir', (['src_dir_Annotations'], {}), '(src_dir_Annotations)\n', (2993, 3014), False, 'import os\n'), ((3101, 3131), 'os.path.isdir', 'os.path.isdir', (['scb_ytbvos_path'], {}), '(scb_ytbvos_path)\n', (3114, 3131), False, 'import os\n'), ((3979, 4013), 'os.listdir', 'os.listdir', (['src_dir_JPEGImages_seq'], {}), '(src_dir_JPEGImages_seq)\n', (3989, 4013), False, 'import os\n'), ((4091, 4133), 'os.path.join', 'os.path.join', (['src_dir_JPEGImages_seq', 'file'], {}), '(src_dir_JPEGImages_seq, file)\n', (4103, 4133), False, 'import os\n'), ((4668, 4703), 'os.listdir', 'os.listdir', (['src_dir_Annotations_seq'], {}), '(src_dir_Annotations_seq)\n', (4678, 4703), False, 'import os\n'), ((4781, 4824), 'os.path.join', 'os.path.join', (['src_dir_Annotations_seq', 'file'], {}), '(src_dir_Annotations_seq, file)\n', (4793, 4824), False, 'import os\n'), ((5346, 5379), 'os.listdir', 'os.listdir', (['src_dir_Scribbles_seq'], {}), '(src_dir_Scribbles_seq)\n', (5356, 5379), False, 'import os\n'), ((5457, 5498), 'os.path.join', 'os.path.join', (['src_dir_Scribbles_seq', 'file'], {}), '(src_dir_Scribbles_seq, file)\n', (5469, 5498), False, 'import os\n'), ((5526, 5567), 'os.path.join', 'os.path.join', (['dst_dir_Scribbles_seq', 'file'], {}), '(dst_dir_Scribbles_seq, file)\n', (5538, 5567), False, 'import os\n'), ((5710, 5744), 'os.listdir', 'os.listdir', (['src_dir_JPEGImages_seq'], {}), '(src_dir_JPEGImages_seq)\n', (5720, 5744), False, 'import os\n'), ((2544, 2574), 'os.listdir', 'os.listdir', (['src_dir_JPEGImages'], {}), '(src_dir_JPEGImages)\n', (2554, 2574), False, 'import os\n'), ((2583, 2614), 'os.listdir', 'os.listdir', (['src_dir_Annotations'], {}), '(src_dir_Annotations)\n', (2593, 2614), False, 'import os\n'), ((2638, 2678), 'os.path.join', 'os.path.join', (['scb_ytbvos_path', '"""val.txt"""'], {}), "(scb_ytbvos_path, 'val.txt')\n", (2650, 2678), False, 'import os\n'), ((4247, 4271), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (4261, 4271), False, 'import os\n'), ((4273, 4308), 'shutil.copyfile', 'shutil.copyfile', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (4288, 4308), False, 'import shutil\n'), ((4939, 4963), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (4953, 4963), False, 'import os\n'), ((4965, 5000), 'shutil.copyfile', 'shutil.copyfile', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (4980, 5000), False, 'import shutil\n'), ((5591, 5615), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (5605, 5615), False, 'import os\n'), ((5617, 5652), 'shutil.copyfile', 'shutil.copyfile', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (5632, 5652), False, 'import shutil\n'), ((399, 428), 'os.path.join', 'os.path.join', (['ann_dir', 'seq', 'f'], {}), '(ann_dir, seq, f)\n', (411, 428), False, 'import os\n')]
|
#
# Created by djz on 2022/04/01.
#
import numpy as np
from typing import Dict
from transformers.file_utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import GenericTensor, Pipeline
def sigmoid(_outputs):
return 1.0 / (1.0 + np.exp(-_outputs))
def softmax(_outputs):
maxes = np.max(_outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class ClassificationFunction(ExplicitEnum):
SIGMOID = "sigmoid"
SOFTMAX = "softmax"
NONE = "none"
class TextClassificationPipeline(Pipeline):
return_all_scores = False
function_to_apply = ClassificationFunction.NONE
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, **tokenizer_kwargs):
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if return_all_scores is not None:
postprocess_params["return_all_scores"] = return_all_scores
if isinstance(function_to_apply, str):
function_to_apply = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self, *args, **kwargs):
result = super().__call__(*args, **kwargs)
if isinstance(args[0], str):
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
return_tensors = 'pt'
return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs, function_to_apply=None, return_all_scores=False):
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
function_to_apply = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
function_to_apply = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
function_to_apply = self.model.config.function_to_apply
else:
function_to_apply = ClassificationFunction.NONE
outputs = model_outputs["logits"][0]
outputs = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
scores = sigmoid(outputs)
elif function_to_apply == ClassificationFunction.SOFTMAX:
scores = softmax(outputs)
elif function_to_apply == ClassificationFunction.NONE:
scores = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}")
if return_all_scores:
return [{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores)]
else:
return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()}
|
[
"numpy.max",
"numpy.exp"
] |
[((331, 371), 'numpy.max', 'np.max', (['_outputs'], {'axis': '(-1)', 'keepdims': '(True)'}), '(_outputs, axis=-1, keepdims=True)\n', (337, 371), True, 'import numpy as np\n'), ((390, 414), 'numpy.exp', 'np.exp', (['(_outputs - maxes)'], {}), '(_outputs - maxes)\n', (396, 414), True, 'import numpy as np\n'), ((276, 293), 'numpy.exp', 'np.exp', (['(-_outputs)'], {}), '(-_outputs)\n', (282, 293), True, 'import numpy as np\n')]
|
#coding: UTF-8
import sys
import os
import os.path
import glob
import cv2
import numpy as np
CAPTUREDDIR = './captured'
CALIBFLAG = 0 # cv2.CALIB_FIX_K3
def calibFromImages(dirname, chess_shape, chess_block_size):
if not os.path.exists(dirname):
print('Directory \'' + dirname + '\' was not found')
return None
filenames = sorted(glob.glob(dirname + '/*'))
if len(filenames) == 0:
print('No image was found in \'' + dirname + '\'')
return None
print('=== Camera Calibration ===')
objp = np.zeros((chess_shape[0]*chess_shape[1], 3), np.float32)
objp[:, :2] = chess_block_size * \
np.mgrid[0:chess_shape[0], 0:chess_shape[1]].T.reshape(-1, 2)
print('Finding chess corners in input images ...')
objp_list = []
imgp_list = []
img_shape = None
for f in filenames:
print(' ' + f + ' : ', end='')
img = cv2.imread(f, cv2.IMREAD_GRAYSCALE)
if img_shape is None:
img_shape = img.shape
elif img_shape != img.shape:
print('Mismatch size')
continue
ret, imgp = cv2.findChessboardCorners(img, chess_shape, None)
if ret:
print('Found')
objp_list.append(objp)
imgp_list.append(imgp)
else:
print('Not found')
print(' ', len(objp_list), 'images are used')
ret, cam_int, cam_dist, rvecs, tvecs = cv2.calibrateCamera(
objp_list, imgp_list, img_shape, None, None, None, None, CALIBFLAG
)
print('Image size :', img_shape)
print('RMS :', ret)
print('Intrinsic parameters :')
print(cam_int)
print('Distortion parameters :')
print(cam_dist)
print()
rmtxs = list(map(lambda vec: cv2.Rodrigues(vec)[0], rvecs))
fs = cv2.FileStorage('calibration.xml', cv2.FILE_STORAGE_WRITE)
fs.write('img_shape', img_shape)
fs.write('rms', ret)
fs.write('intrinsic', cam_int)
fs.write('distortion', cam_dist)
fs.write('rotation_vectors', np.array(rvecs))
fs.write('rotation_matrixes', np.array(rmtxs))
fs.write('translation_vectors', np.array(tvecs))
fs.release()
return (img_shape, ret, cam_int, cam_dist, rvecs, tvecs)
if __name__ == '__main__':
if len(sys.argv) == 4:
chess_shape = (int(sys.argv[1]), int(sys.argv[2]))
chess_block_size = float(sys.argv[3])
calibFromImages(CAPTUREDDIR, chess_shape, chess_block_size)
else:
print('Usage :')
print(' Save captured images into \'' + CAPTUREDDIR + '\'')
print(
' Run \'python3 caliblate_camera_from_images.py <num of chess corners in vert> <num of chess corners in hori> <chess block size(m or mm)>')
|
[
"cv2.findChessboardCorners",
"numpy.zeros",
"os.path.exists",
"cv2.imread",
"cv2.FileStorage",
"cv2.Rodrigues",
"numpy.array",
"cv2.calibrateCamera",
"glob.glob"
] |
[((548, 606), 'numpy.zeros', 'np.zeros', (['(chess_shape[0] * chess_shape[1], 3)', 'np.float32'], {}), '((chess_shape[0] * chess_shape[1], 3), np.float32)\n', (556, 606), True, 'import numpy as np\n'), ((1422, 1513), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objp_list', 'imgp_list', 'img_shape', 'None', 'None', 'None', 'None', 'CALIBFLAG'], {}), '(objp_list, imgp_list, img_shape, None, None, None, None,\n CALIBFLAG)\n', (1441, 1513), False, 'import cv2\n'), ((1784, 1842), 'cv2.FileStorage', 'cv2.FileStorage', (['"""calibration.xml"""', 'cv2.FILE_STORAGE_WRITE'], {}), "('calibration.xml', cv2.FILE_STORAGE_WRITE)\n", (1799, 1842), False, 'import cv2\n'), ((231, 254), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (245, 254), False, 'import os\n'), ((361, 386), 'glob.glob', 'glob.glob', (["(dirname + '/*')"], {}), "(dirname + '/*')\n", (370, 386), False, 'import glob\n'), ((907, 942), 'cv2.imread', 'cv2.imread', (['f', 'cv2.IMREAD_GRAYSCALE'], {}), '(f, cv2.IMREAD_GRAYSCALE)\n', (917, 942), False, 'import cv2\n'), ((1120, 1169), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['img', 'chess_shape', 'None'], {}), '(img, chess_shape, None)\n', (1145, 1169), False, 'import cv2\n'), ((2010, 2025), 'numpy.array', 'np.array', (['rvecs'], {}), '(rvecs)\n', (2018, 2025), True, 'import numpy as np\n'), ((2061, 2076), 'numpy.array', 'np.array', (['rmtxs'], {}), '(rmtxs)\n', (2069, 2076), True, 'import numpy as np\n'), ((2114, 2129), 'numpy.array', 'np.array', (['tvecs'], {}), '(tvecs)\n', (2122, 2129), True, 'import numpy as np\n'), ((1743, 1761), 'cv2.Rodrigues', 'cv2.Rodrigues', (['vec'], {}), '(vec)\n', (1756, 1761), False, 'import cv2\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.