code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import torch
import torch.nn as nn
import numpy as np
class IndexTranslator(object):
def __init__(self, state):
self.state = state
self.px = self.state[:, 0].reshape(-1, 1)
self.py = self.state[:, 1].reshape(-1, 1)
self.vx = self.state[:, 2].reshape(-1, 1)
self.vy = self.state[:, 3].reshape(-1, 1)
self.radius = self.state[:, 4].reshape(-1, 1)
self.pgx = self.state[:, 5].reshape(-1, 1)
self.pgy = self.state[:, 6].reshape(-1, 1)
self.v_pref = self.state[:, 7].reshape(-1, 1)
self.theta = self.state[:, 8].reshape(-1, 1)
self.px1 = self.state[:, 9].reshape(-1, 1)
self.py1 = self.state[:, 10].reshape(-1, 1)
self.vx1 = self.state[:, 11].reshape(-1, 1)
self.vy1 = self.state[:, 12].reshape(-1, 1)
self.radius1 = self.state[:, 13].reshape(-1, 1)
class ValueNetwork(nn.Module):
def __init__(self, state_dim, fc_layers, kinematic, reparametrization=True):
super(ValueNetwork, self).__init__()
self.reparametrization = reparametrization
if reparametrization:
state_dim = 15
self.kinematic = kinematic
self.value_network = nn.Sequential(nn.Linear(state_dim, fc_layers[0]), nn.ReLU(),
nn.Linear(fc_layers[0], fc_layers[1]), nn.ReLU(),
nn.Linear(fc_layers[1], fc_layers[2]), nn.ReLU(),
nn.Linear(fc_layers[2], 1))
def rotate(self, state, device):
# first translate the coordinate then rotate around the origin
# 'px', 'py', 'vx', 'vy', 'radius', 'pgx', 'pgy', 'v_pref', 'theta', 'px1', 'py1', 'vx1', 'vy1', 'radius1'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
state = IndexTranslator(state.cpu().numpy())
dx = state.pgx - state.px
dy = state.pgy - state.py
rot = np.arctan2(state.pgy-state.py, state.pgx-state.px)
dg = np.linalg.norm(np.concatenate([dx, dy], axis=1), axis=1, keepdims=True)
v_pref = state.v_pref
vx = state.vx * np.cos(rot) + state.vy * np.sin(rot)
vy = state.vy * np.cos(rot) - state.vx * np.sin(rot)
radius = state.radius
if self.kinematic:
theta = state.theta - rot
else:
theta = state.theta
vx1 = state.vx1 * np.cos(rot) + state.vy1 * np.sin(rot)
vy1 = state.vy1 * np.cos(rot) - state.vx1 * np.sin(rot)
px1 = (state.px1 - state.px) * np.cos(rot) + (state.py1 - state.py) * np.sin(rot)
py1 = (state.py1 - state.py) * np.cos(rot) - (state.px1 - state.px) * np.sin(rot)
radius1 = state.radius1
radius_sum = radius + radius1
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
da = np.linalg.norm(np.concatenate([state.px - state.px1, state.py - state.py1], axis=1), axis=1, keepdims=True)
new_state = np.concatenate([dg, v_pref, vx, vy, radius, theta, vx1, vy1, px1, py1,
radius1, radius_sum, cos_theta, sin_theta, da], axis=1)
return torch.Tensor(new_state).to(device)
def forward(self, state, device):
if self.reparametrization:
state = self.rotate(state, device)
temp_value_network = self.value_network;
value = temp_value_network(state)
return value
|
[
"torch.nn.ReLU",
"torch.Tensor",
"numpy.arctan2",
"numpy.cos",
"torch.nn.Linear",
"numpy.concatenate",
"numpy.sin"
] |
[((1990, 2044), 'numpy.arctan2', 'np.arctan2', (['(state.pgy - state.py)', '(state.pgx - state.px)'], {}), '(state.pgy - state.py, state.pgx - state.px)\n', (2000, 2044), True, 'import numpy as np\n'), ((2818, 2831), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2824, 2831), True, 'import numpy as np\n'), ((2852, 2865), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2858, 2865), True, 'import numpy as np\n'), ((3008, 3138), 'numpy.concatenate', 'np.concatenate', (['[dg, v_pref, vx, vy, radius, theta, vx1, vy1, px1, py1, radius1, radius_sum,\n cos_theta, sin_theta, da]'], {'axis': '(1)'}), '([dg, v_pref, vx, vy, radius, theta, vx1, vy1, px1, py1,\n radius1, radius_sum, cos_theta, sin_theta, da], axis=1)\n', (3022, 3138), True, 'import numpy as np\n'), ((1216, 1250), 'torch.nn.Linear', 'nn.Linear', (['state_dim', 'fc_layers[0]'], {}), '(state_dim, fc_layers[0])\n', (1225, 1250), True, 'import torch.nn as nn\n'), ((1252, 1261), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1259, 1261), True, 'import torch.nn as nn\n'), ((1306, 1343), 'torch.nn.Linear', 'nn.Linear', (['fc_layers[0]', 'fc_layers[1]'], {}), '(fc_layers[0], fc_layers[1])\n', (1315, 1343), True, 'import torch.nn as nn\n'), ((1345, 1354), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1352, 1354), True, 'import torch.nn as nn\n'), ((1399, 1436), 'torch.nn.Linear', 'nn.Linear', (['fc_layers[1]', 'fc_layers[2]'], {}), '(fc_layers[1], fc_layers[2])\n', (1408, 1436), True, 'import torch.nn as nn\n'), ((1438, 1447), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1445, 1447), True, 'import torch.nn as nn\n'), ((1492, 1518), 'torch.nn.Linear', 'nn.Linear', (['fc_layers[2]', '(1)'], {}), '(fc_layers[2], 1)\n', (1501, 1518), True, 'import torch.nn as nn\n'), ((2070, 2102), 'numpy.concatenate', 'np.concatenate', (['[dx, dy]'], {'axis': '(1)'}), '([dx, dy], axis=1)\n', (2084, 2102), True, 'import numpy as np\n'), ((2894, 2962), 'numpy.concatenate', 'np.concatenate', (['[state.px - state.px1, state.py - state.py1]'], {'axis': '(1)'}), '([state.px - state.px1, state.py - state.py1], axis=1)\n', (2908, 2962), True, 'import numpy as np\n'), ((2181, 2192), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (2187, 2192), True, 'import numpy as np\n'), ((2206, 2217), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (2212, 2217), True, 'import numpy as np\n'), ((2242, 2253), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (2248, 2253), True, 'import numpy as np\n'), ((2267, 2278), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (2273, 2278), True, 'import numpy as np\n'), ((2446, 2457), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (2452, 2457), True, 'import numpy as np\n'), ((2472, 2483), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (2478, 2483), True, 'import numpy as np\n'), ((2510, 2521), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (2516, 2521), True, 'import numpy as np\n'), ((2536, 2547), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (2542, 2547), True, 'import numpy as np\n'), ((2587, 2598), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (2593, 2598), True, 'import numpy as np\n'), ((2626, 2637), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (2632, 2637), True, 'import numpy as np\n'), ((2677, 2688), 'numpy.cos', 'np.cos', (['rot'], {}), '(rot)\n', (2683, 2688), True, 'import numpy as np\n'), ((2716, 2727), 'numpy.sin', 'np.sin', (['rot'], {}), '(rot)\n', (2722, 2727), True, 'import numpy as np\n'), ((3186, 3209), 'torch.Tensor', 'torch.Tensor', (['new_state'], {}), '(new_state)\n', (3198, 3209), False, 'import torch\n')]
|
from Data import Data
import itertools
import joblib
import numpy as np
import pandas as pd
import pickle
import re
import statsmodels.api as sm
import sys
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn import svm
from sklearn.preprocessing import OneHotEncoder
import os
class Predict(object):
"""This class makes predictions of house prices"""
def __init__(self, features = ['tfarea', 'numberrooms', 'propertytype', 'oldnew'], LR=True,
RF=False, GB=False, test_prop=0.2, regions=[], seed = 1704, outcome = ['ln_y'],
hot_load_models=True, save_models=True, model_append = [''], output_geo=True,
merge_lsoa=False, gb_model = GradientBoostingRegressor()):
self.model_dir = os.path.join(os.path.abspath(''), 'models')
self.data_dir = os.path.join(os.path.abspath(''), 'data')
self.features = features
self.LR = LR
self.RF = RF
self.GB = GB
self.gb_model = gb_model
self.test_prop = test_prop
self.regions = regions
self.seed = seed
self.outcome = outcome
self.hot_load_models = hot_load_models
self.save_models = save_models
self.merge_lsoa = merge_lsoa
self.model_append = model_append
self.feature_acronyms = [i[0:3] for i in self.features]
if self.model_append == ['']:
self.model_append = '_'.join(self.regions + self.feature_acronyms)
else:
self.model_append = '_'.join(self.regions + self.feature_acronyms + self.model_append)
self.output_geo = output_geo
self.data = Data(regions=self.regions, merge_lsoa = self.merge_lsoa).data
self.generate_outcome()
self.generate_features()
self.train_test_split()
self.estimate_model(LR = self.LR, RF = self.RF, GB = self.GB)
self.oos_r2()
if self.output_geo:
self.output_geo_df()
def train_test_split(self):
self.X_train, self.X_test, self.y_train, self.y_test =\
train_test_split(self.data[self.features], self.data['outcome'],
test_size = self.test_prop, random_state = self.seed)
print("Training set dimensions: {}".format(self.X_train.shape))
def generate_outcome(self):
self.data['y'] = self.data['price']
self.data['ln_y'] = self.data['price'].apply(np.log)
self.data['rel_y'] = self.data['priceper']
self.data['outcome'] = self.data[self.outcome]
def generate_features(self):
""" Generate features to include into the predictions"""
# identify categorical versus continuous features
self.cat_features =\
list(itertools.compress(self.features, [i == 'object' for i in self.data[self.features].dtypes]))
self.other_features=\
list(itertools.compress(self.features, [i != 'object' for i in self.data[self.features].dtypes]))
print("Categorical features identified: {}".format(self.cat_features))
print("Continous features identified: {}".format(self.other_features))
# one-hot encode all categorical observations
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(self.data[self.cat_features])
self.data[enc.get_feature_names(self.cat_features)] = enc.\
transform(self.data[self.cat_features]).toarray()
# new features
self.features = list(itertools.chain(*[self.other_features,
list(enc.get_feature_names(self.cat_features))]))
def estimate_model(self, LR, RF, GB):
if LR:
self.lr()
else:
self.lr_predictions = np.nan
if RF:
self.rf()
else:
self.rf_predictions = np.nan
if GB:
self.gb(gb_model = self.gb_model)
else:
self.gb_predictions = np.nan
def output_geo_df(self):
assert pd.Series(self.X_test.index).isin(pd.Series(self.data.index)).mean() == 1
assert pd.Series(self.y_test.index).isin(pd.Series(self.data.index)).mean() == 1
geo_output = pd.DataFrame({'true': self.y_test.values,
'lr_pred': self.lr_predictions,
'rf_pred': self.rf_predictions,
'gb_pred': self.gb_predictions,
},
index = self.y_test.index)
geo_df = self.data[['lsoa11', 'msoa11', 'laua', 'lad11nm', 'gor', 'rgn11nm']]
full_geo = pd.merge(geo_output, geo_df, left_index=True, right_index=True)
filename = 'geo_output_' + '_'.join(self.regions) + '.csv'
print("Writing " + filename)
full_geo.to_csv(os.path.join(self.data_dir, 'edit', filename))
def oos_r2(self):
TSS = np.square(self.y_test - self.y_test.mean()).sum()
ESS_lr = np.square(self.y_test - self.lr_predictions).sum()
ESS_rf = np.square(self.y_test - self.rf_predictions).sum()
ESS_gb = np.square(self.y_test - self.gb_predictions).sum()
self.LR_oos_r2 = (TSS - ESS_lr)/TSS
self.RF_oos_r2 = (TSS - ESS_rf)/TSS
self.GB_oos_r2 = (TSS - ESS_gb)/TSS
def lr(self, predict_linreg=True, verbose=True):
"""Run a standard OLS"""
model_path = os.path.join(self.model_dir, 'LR' + self.model_append + '.sav')
# setup model, either hot load or estimate directly
if self.hot_load_models:
print("Hotloading model")
try:
self.reg = pickle.load(open(model_path, 'rb'))
except FileNotFoundError:
print("Could not find saved model for hot loading")
sys.exit(1)
else:
self.reg = LinearRegression()
self.reg.fit(self.X_train, self.y_train) # train
if self.save_models:
print("Saving LR model {}".format(model_path))
pickle.dump(self.reg, open(model_path, 'wb'))
self.lr_coeff = self.reg.coef_
self.lr_predictions = self.reg.predict(self.X_test)
self.lr_rmse = mean_squared_error(self.lr_predictions, self.y_test)
if verbose:
print('LR RMSE: {:3f}'.format(self.lr_rmse))
def rf(self, rf=RandomForestRegressor(n_estimators=1000),
verbose=True):
""" Estimate Random Forest model on the pre-specified feature space,
option to perform cross validation on pre-defined paramter grid
"""
self.rf = rf
# setup models
model_path = os.path.join(self.model_dir, 'RF' + self.model_append + '.sav')
# setup model, either hot load or estimate directly
if self.hot_load_models:
print("Hotloading model")
try:
self.rf = pickle.load(open(model_path, 'rb'))
except FileNotFoundError:
print("Could not find saved model for hot loading")
sys.exit(1)
else:
self.rf.fit(self.X_train.to_numpy(), self.y_train.ravel())
if self.save_models:
print("Saving RF model {}".format(model_path))
pickle.dump(self.rf, open(model_path, 'wb'))
# estimate RF model on train set and evaluate performance on test set
self.rf_predictions = self.rf.predict(self.X_test)
self.rf_rmse = mean_squared_error(self.rf_predictions, self.y_test)
if verbose:
print('RF RMSE: {}'.format(self.rf_rmse))
def gb(self, verbose=True, gb_model = GradientBoostingRegressor()):
""" Estimate Gradien Boosting model to pre-specified feature space,
option to perform cross validation on pre-defined paramter grid
"""
# estimate GB model on the train set and evaluate predictive accuracy
# setup models
model_path = os.path.join(self.model_dir, 'GB' + self.model_append + '.sav')
self.gb = gb_model
# setup model, either hot load or estimate directly
if self.hot_load_models:
print("Hotloading model")
try:
self.gb = pickle.load(open(model_path, 'rb'))
except FileNotFoundError:
print("Could not find saved model for hot loading")
sys.exit(1)
else:
self.gb.fit(self.X_train.to_numpy(), self.y_train.ravel())
if self.save_models:
print("Saving GB model {}".format(model_path))
pickle.dump(self.gb, open(model_path, 'wb'))
self.gb_predictions = self.gb.predict(self.X_test)
self.gb_rmse = mean_squared_error(self.gb_predictions, self.y_test)
if verbose:
print('GB RMSE: {}'.format(self.gb_rmse))
|
[
"Data.Data",
"pandas.Series",
"sklearn.ensemble.RandomForestRegressor",
"itertools.compress",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"pandas.merge",
"os.path.join",
"sklearn.metrics.mean_squared_error",
"numpy.square",
"sys.exit",
"pandas.DataFrame",
"sklearn.ensemble.GradientBoostingRegressor",
"os.path.abspath",
"sklearn.linear_model.LinearRegression"
] |
[((959, 986), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {}), '()\n', (984, 986), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((2368, 2487), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.data[self.features]', "self.data['outcome']"], {'test_size': 'self.test_prop', 'random_state': 'self.seed'}), "(self.data[self.features], self.data['outcome'], test_size=\n self.test_prop, random_state=self.seed)\n", (2384, 2487), False, 'from sklearn.model_selection import train_test_split\n'), ((3550, 3588), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (3563, 3588), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((4537, 4709), 'pandas.DataFrame', 'pd.DataFrame', (["{'true': self.y_test.values, 'lr_pred': self.lr_predictions, 'rf_pred':\n self.rf_predictions, 'gb_pred': self.gb_predictions}"], {'index': 'self.y_test.index'}), "({'true': self.y_test.values, 'lr_pred': self.lr_predictions,\n 'rf_pred': self.rf_predictions, 'gb_pred': self.gb_predictions}, index=\n self.y_test.index)\n", (4549, 4709), True, 'import pandas as pd\n'), ((4952, 5015), 'pandas.merge', 'pd.merge', (['geo_output', 'geo_df'], {'left_index': '(True)', 'right_index': '(True)'}), '(geo_output, geo_df, left_index=True, right_index=True)\n', (4960, 5015), True, 'import pandas as pd\n'), ((5758, 5821), 'os.path.join', 'os.path.join', (['self.model_dir', "('LR' + self.model_append + '.sav')"], {}), "(self.model_dir, 'LR' + self.model_append + '.sav')\n", (5770, 5821), False, 'import os\n'), ((6586, 6638), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['self.lr_predictions', 'self.y_test'], {}), '(self.lr_predictions, self.y_test)\n', (6604, 6638), False, 'from sklearn.metrics import mean_squared_error\n'), ((6741, 6781), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(1000)'}), '(n_estimators=1000)\n', (6762, 6781), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((7043, 7106), 'os.path.join', 'os.path.join', (['self.model_dir', "('RF' + self.model_append + '.sav')"], {}), "(self.model_dir, 'RF' + self.model_append + '.sav')\n", (7055, 7106), False, 'import os\n'), ((7885, 7937), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['self.rf_predictions', 'self.y_test'], {}), '(self.rf_predictions, self.y_test)\n', (7903, 7937), False, 'from sklearn.metrics import mean_squared_error\n'), ((8061, 8088), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {}), '()\n', (8086, 8088), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((8391, 8454), 'os.path.join', 'os.path.join', (['self.model_dir', "('GB' + self.model_append + '.sav')"], {}), "(self.model_dir, 'GB' + self.model_append + '.sav')\n", (8403, 8454), False, 'import os\n'), ((9180, 9232), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['self.gb_predictions', 'self.y_test'], {}), '(self.gb_predictions, self.y_test)\n', (9198, 9232), False, 'from sklearn.metrics import mean_squared_error\n'), ((1028, 1047), 'os.path.abspath', 'os.path.abspath', (['""""""'], {}), "('')\n", (1043, 1047), False, 'import os\n'), ((1097, 1116), 'os.path.abspath', 'os.path.abspath', (['""""""'], {}), "('')\n", (1112, 1116), False, 'import os\n'), ((1932, 1986), 'Data.Data', 'Data', ([], {'regions': 'self.regions', 'merge_lsoa': 'self.merge_lsoa'}), '(regions=self.regions, merge_lsoa=self.merge_lsoa)\n', (1936, 1986), False, 'from Data import Data\n'), ((3081, 3179), 'itertools.compress', 'itertools.compress', (['self.features', "[(i == 'object') for i in self.data[self.features].dtypes]"], {}), "(self.features, [(i == 'object') for i in self.data[self.\n features].dtypes])\n", (3099, 3179), False, 'import itertools\n'), ((3223, 3321), 'itertools.compress', 'itertools.compress', (['self.features', "[(i != 'object') for i in self.data[self.features].dtypes]"], {}), "(self.features, [(i != 'object') for i in self.data[self.\n features].dtypes])\n", (3241, 3321), False, 'import itertools\n'), ((5157, 5202), 'os.path.join', 'os.path.join', (['self.data_dir', '"""edit"""', 'filename'], {}), "(self.data_dir, 'edit', filename)\n", (5169, 5202), False, 'import os\n'), ((6216, 6234), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6232, 6234), False, 'from sklearn.linear_model import LinearRegression\n'), ((5316, 5360), 'numpy.square', 'np.square', (['(self.y_test - self.lr_predictions)'], {}), '(self.y_test - self.lr_predictions)\n', (5325, 5360), True, 'import numpy as np\n'), ((5385, 5429), 'numpy.square', 'np.square', (['(self.y_test - self.rf_predictions)'], {}), '(self.y_test - self.rf_predictions)\n', (5394, 5429), True, 'import numpy as np\n'), ((5454, 5498), 'numpy.square', 'np.square', (['(self.y_test - self.gb_predictions)'], {}), '(self.y_test - self.gb_predictions)\n', (5463, 5498), True, 'import numpy as np\n'), ((6165, 6176), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6173, 6176), False, 'import sys\n'), ((7449, 7460), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7457, 7460), False, 'import sys\n'), ((8835, 8846), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8843, 8846), False, 'import sys\n'), ((4385, 4411), 'pandas.Series', 'pd.Series', (['self.data.index'], {}), '(self.data.index)\n', (4394, 4411), True, 'import pandas as pd\n'), ((4475, 4501), 'pandas.Series', 'pd.Series', (['self.data.index'], {}), '(self.data.index)\n', (4484, 4501), True, 'import pandas as pd\n'), ((4351, 4379), 'pandas.Series', 'pd.Series', (['self.X_test.index'], {}), '(self.X_test.index)\n', (4360, 4379), True, 'import pandas as pd\n'), ((4441, 4469), 'pandas.Series', 'pd.Series', (['self.y_test.index'], {}), '(self.y_test.index)\n', (4450, 4469), True, 'import pandas as pd\n')]
|
import cv2
import dlib
import numpy as np
import pyautogui
import imutils
import time
from imutils import face_utils
WHITE_COLOR = (255, 255, 255)
YELLOW_COLOR = (0, 255, 255)
RED_COLOR = (0, 0, 255)
GREEN_COLOR = (0, 255, 0)
BLUE_COLOR = (255, 0, 0)
BLACK_COLOR = (0, 0, 0)
MOUTH_AR_THRESH = 0.6
shape_predictor = "./shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
(l_eye_start, l_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS['left_eye']
(r_eye_start, r_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']
(mouth_start, mouth_end) = face_utils.FACIAL_LANDMARKS_IDXS['mouth']
(nose_start, nose_end) = face_utils.FACIAL_LANDMARKS_IDXS['nose']
#webcm
vid = cv2.VideoCapture(0)
resolution_w = 1366
resolution_h = 768
cam_w = 640
cam_h = 480
mouse_x = 0
mouse_y = 0
unit_w = resolution_w / cam_w
unit_h = resolution_h / cam_h
padding_x, padding_y = 50, 50
control_padding = 20
#set guide rect
rect_start = (cam_w//2-100, cam_h//2-100)
rect_end = (cam_w//2+100, cam_h//2+100)
process = False
counter = 0
cursor_coordinates = ()
pyautogui.FAILSAFE = False
def mouth_aspect_ratio(mouth):
# Compute the euclidean distances between the three sets
# of vertical mouth landmarks (x, y)-coordinates
A = np.linalg.norm(mouth[13] - mouth[19])
B = np.linalg.norm(mouth[14] - mouth[18])
C = np.linalg.norm(mouth[15] - mouth[17])
# Compute the euclidean distance between the horizontal
# mouth landmarks (x, y)-coordinates
D = np.linalg.norm(mouth[12] - mouth[16])
# Compute the mouth aspect ratio
mar = (A + B + C) / (2 * D)
# Return the mouth aspect ratio
return mar
while True:
_, frame = vid.read()
frame = cv2.flip(frame, 1)
frame = imutils.resize(frame, width=cam_w, height=cam_h)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces
rects = detector(gray, 0)
# if face detected
if len(rects) > 0:
rect = rects[0]
else:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
continue
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
if(process == True):
mouth = shape[mouth_start:mouth_end]
nose = shape[nose_start: nose_end]
cv2.circle(frame, (nose[3, 0], nose[3, 1]), 5, BLUE_COLOR, 1)
cv2.rectangle(frame, rect_start, rect_end, RED_COLOR, 2)
cv2.line(frame, (cursor_coordinates[0]-padding_x, cursor_coordinates[1]), (cursor_coordinates[0]+padding_x, cursor_coordinates[1]), YELLOW_COLOR, 2)
cv2.line(frame, (cursor_coordinates[0], cursor_coordinates[1]-padding_y), (cursor_coordinates[0], cursor_coordinates[1]+padding_y), YELLOW_COLOR, 2)
cv2.imshow("Frame", frame)
if nose[3,0] > cursor_coordinates[0]+control_padding:
if mouse_x <= 1910:
mouse_x += 5
elif nose[3,0] < cursor_coordinates[0]-control_padding:
if mouse_x >= 10:
mouse_x -= 5
if nose[3,1] > cursor_coordinates[1]+control_padding:
if mouse_y <= 1080:
mouse_y += 5
elif nose[3,1] < cursor_coordinates[1]-control_padding:
if mouse_y >= 10:
mouse_y -= 5
#if mouth open click
mar = mouth_aspect_ratio(mouth)
if(mar>MOUTH_AR_THRESH):
pyautogui.click(mouse_x, mouse_y)
pyautogui.moveTo(mouse_x, mouse_y)
key = cv2.waitKey(1) & 0xFF
else:
#get eyes
left_eye = shape[l_eye_start:l_eye_end]
right_eye = shape[r_eye_start:r_eye_end]
nose = shape[nose_start: nose_end]
# swap left and right
temp = left_eye
left_eye = right_eye
right_eye = temp
#is face inside of rectangle
if(left_eye[3,0]>rect_start[0] and left_eye[3,0]<rect_end[0]
and right_eye[3,0]>rect_start[0] and right_eye[3,0]<rect_end[0]
and left_eye[3,1]>rect_start[1] and left_eye[3,1]<rect_end[1]
and right_eye[3,1]>rect_start[1] and right_eye[3,1]<rect_end[1]):
cv2.putText(frame, str(counter//10), (cam_w//2-100, cam_h//2+100), cv2.FONT_HERSHEY_SIMPLEX, 1, GREEN_COLOR)
counter += 1
if(counter/10 > 10):
cursor_coordinates = nose[3]
process = True
else:
counter = 0
cv2.rectangle(frame, rect_start, rect_end, WHITE_COLOR, 2)
cv2.putText(frame, "Hold your face inside of rectangle for 10 sec", (cam_w//2-100, cam_h//2+200), cv2.FONT_HERSHEY_PLAIN, 1, GREEN_COLOR)
cv2.imshow("Frame", frame)
key = cv2.waitKey(10) & 0xFF
|
[
"cv2.rectangle",
"cv2.flip",
"pyautogui.moveTo",
"cv2.line",
"dlib.shape_predictor",
"cv2.imshow",
"cv2.putText",
"dlib.get_frontal_face_detector",
"imutils.resize",
"cv2.circle",
"pyautogui.click",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.linalg.norm",
"imutils.face_utils.shape_to_np",
"cv2.waitKey"
] |
[((370, 402), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (400, 402), False, 'import dlib\n'), ((415, 452), 'dlib.shape_predictor', 'dlib.shape_predictor', (['shape_predictor'], {}), '(shape_predictor)\n', (435, 452), False, 'import dlib\n'), ((748, 767), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (764, 767), False, 'import cv2\n'), ((1297, 1334), 'numpy.linalg.norm', 'np.linalg.norm', (['(mouth[13] - mouth[19])'], {}), '(mouth[13] - mouth[19])\n', (1311, 1334), True, 'import numpy as np\n'), ((1343, 1380), 'numpy.linalg.norm', 'np.linalg.norm', (['(mouth[14] - mouth[18])'], {}), '(mouth[14] - mouth[18])\n', (1357, 1380), True, 'import numpy as np\n'), ((1389, 1426), 'numpy.linalg.norm', 'np.linalg.norm', (['(mouth[15] - mouth[17])'], {}), '(mouth[15] - mouth[17])\n', (1403, 1426), True, 'import numpy as np\n'), ((1537, 1574), 'numpy.linalg.norm', 'np.linalg.norm', (['(mouth[12] - mouth[16])'], {}), '(mouth[12] - mouth[16])\n', (1551, 1574), True, 'import numpy as np\n'), ((1748, 1766), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (1756, 1766), False, 'import cv2\n'), ((1779, 1827), 'imutils.resize', 'imutils.resize', (['frame'], {'width': 'cam_w', 'height': 'cam_h'}), '(frame, width=cam_w, height=cam_h)\n', (1793, 1827), False, 'import imutils\n'), ((1839, 1878), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1851, 1878), False, 'import cv2\n'), ((2143, 2172), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (2165, 2172), False, 'from imutils import face_utils\n'), ((2016, 2042), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2026, 2042), False, 'import cv2\n'), ((2296, 2357), 'cv2.circle', 'cv2.circle', (['frame', '(nose[3, 0], nose[3, 1])', '(5)', 'BLUE_COLOR', '(1)'], {}), '(frame, (nose[3, 0], nose[3, 1]), 5, BLUE_COLOR, 1)\n', (2306, 2357), False, 'import cv2\n'), ((2366, 2422), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'rect_start', 'rect_end', 'RED_COLOR', '(2)'], {}), '(frame, rect_start, rect_end, RED_COLOR, 2)\n', (2379, 2422), False, 'import cv2\n'), ((2431, 2592), 'cv2.line', 'cv2.line', (['frame', '(cursor_coordinates[0] - padding_x, cursor_coordinates[1])', '(cursor_coordinates[0] + padding_x, cursor_coordinates[1])', 'YELLOW_COLOR', '(2)'], {}), '(frame, (cursor_coordinates[0] - padding_x, cursor_coordinates[1]),\n (cursor_coordinates[0] + padding_x, cursor_coordinates[1]), YELLOW_COLOR, 2\n )\n', (2439, 2592), False, 'import cv2\n'), ((2588, 2749), 'cv2.line', 'cv2.line', (['frame', '(cursor_coordinates[0], cursor_coordinates[1] - padding_y)', '(cursor_coordinates[0], cursor_coordinates[1] + padding_y)', 'YELLOW_COLOR', '(2)'], {}), '(frame, (cursor_coordinates[0], cursor_coordinates[1] - padding_y),\n (cursor_coordinates[0], cursor_coordinates[1] + padding_y), YELLOW_COLOR, 2\n )\n', (2596, 2749), False, 'import cv2\n'), ((2745, 2771), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2755, 2771), False, 'import cv2\n'), ((3422, 3456), 'pyautogui.moveTo', 'pyautogui.moveTo', (['mouse_x', 'mouse_y'], {}), '(mouse_x, mouse_y)\n', (3438, 3456), False, 'import pyautogui\n'), ((4406, 4464), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'rect_start', 'rect_end', 'WHITE_COLOR', '(2)'], {}), '(frame, rect_start, rect_end, WHITE_COLOR, 2)\n', (4419, 4464), False, 'import cv2\n'), ((4473, 4622), 'cv2.putText', 'cv2.putText', (['frame', '"""Hold your face inside of rectangle for 10 sec"""', '(cam_w // 2 - 100, cam_h // 2 + 200)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', 'GREEN_COLOR'], {}), "(frame, 'Hold your face inside of rectangle for 10 sec', (cam_w //\n 2 - 100, cam_h // 2 + 200), cv2.FONT_HERSHEY_PLAIN, 1, GREEN_COLOR)\n", (4484, 4622), False, 'import cv2\n'), ((4619, 4645), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (4629, 4645), False, 'import cv2\n'), ((2057, 2071), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2068, 2071), False, 'import cv2\n'), ((3379, 3412), 'pyautogui.click', 'pyautogui.click', (['mouse_x', 'mouse_y'], {}), '(mouse_x, mouse_y)\n', (3394, 3412), False, 'import pyautogui\n'), ((3471, 3485), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3482, 3485), False, 'import cv2\n'), ((4661, 4676), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4672, 4676), False, 'import cv2\n')]
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import resize_bbox
class TestResizeBbox(unittest.TestCase):
def test_resize_bbox(self):
bbox = np.random.uniform(
low=0., high=32., size=(10, 4))
out = resize_bbox(bbox, in_size=(32, 32), out_size=(64, 128))
bbox_expected = bbox.copy()
bbox_expected[:, 0] = bbox[:, 0] * 2
bbox_expected[:, 1] = bbox[:, 1] * 4
bbox_expected[:, 2] = bbox[:, 2] * 2
bbox_expected[:, 3] = bbox[:, 3] * 4
np.testing.assert_equal(out, bbox_expected)
testing.run_module(__name__, __file__)
|
[
"chainercv.transforms.resize_bbox",
"chainer.testing.run_module",
"numpy.testing.assert_equal",
"numpy.random.uniform"
] |
[((605, 643), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (623, 643), False, 'from chainer import testing\n'), ((201, 252), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(32.0)', 'size': '(10, 4)'}), '(low=0.0, high=32.0, size=(10, 4))\n', (218, 252), True, 'import numpy as np\n'), ((279, 334), 'chainercv.transforms.resize_bbox', 'resize_bbox', (['bbox'], {'in_size': '(32, 32)', 'out_size': '(64, 128)'}), '(bbox, in_size=(32, 32), out_size=(64, 128))\n', (290, 334), False, 'from chainercv.transforms import resize_bbox\n'), ((559, 602), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out', 'bbox_expected'], {}), '(out, bbox_expected)\n', (582, 602), True, 'import numpy as np\n')]
|
# This examples shows how to perform collision detection between the end-effector of a robot and a point cloud depicted as a Height Field
# Note: this feature requires Meshcat to be installed, this can be done using
# pip install --user meshcat
import pinocchio as pin
import hppfcl as fcl
import numpy as np
import sys
from os.path import dirname, join, abspath
from pinocchio.visualize import MeshcatVisualizer
# Load the URDF model.
# Conversion with str seems to be necessary when executing this file with ipython
pinocchio_model_dir = join(dirname(dirname(str(abspath(__file__)))),"models")
model_path = join(pinocchio_model_dir,"example-robot-data/robots")
mesh_dir = pinocchio_model_dir
urdf_filename = "panda.urdf"
urdf_model_path = join(join(model_path,"panda_description/urdf"),urdf_filename)
model, collision_model, visual_model = pin.buildModelsFromUrdf(urdf_model_path, mesh_dir)
# Add point clouds
num_points = 5000
points = np.random.rand(3, num_points)
point_cloud_placement = pin.SE3.Identity() # Placement of the point cloud wrt the WORLD frame
point_cloud_placement.translation = np.array([0.2,0.2,-0.5])
X = points[0,:]
Y = points[1,:]
Z = points[2,:]
nx = 20
x_grid = np.linspace(0.,1.,nx)
x_half_pad = 0.5*(x_grid[1] - x_grid[0])
x_bins = np.digitize(X, x_grid + x_half_pad)
x_dim = x_grid[-1] - x_grid[0]
ny = 20
y_grid = np.linspace(0.,1.,ny)
y_half_pad = 0.5*(y_grid[1] - y_grid[0])
y_bins = np.digitize(Y, y_grid + y_half_pad)
y_dim = y_grid[-1] - y_grid[0]
point_bins = y_bins * nx + x_bins
heights = np.zeros((ny, nx))
np.maximum.at(heights.ravel(), point_bins, Z)
point_cloud = fcl.BVHModelOBBRSS()
point_cloud.beginModel(0, num_points)
point_cloud.addVertices(points.T)
height_field = fcl.HeightFieldOBBRSS(x_dim, y_dim, heights, min(Z))
height_field_placement = point_cloud_placement * pin.SE3(np.eye(3), 0.5*np.array([x_grid[0] + x_grid[-1], y_grid[0] + y_grid[-1], 0.]))
go_point_cloud = pin.GeometryObject("point_cloud",0,point_cloud,point_cloud_placement)
go_point_cloud.meshColor = np.ones((4))
collision_model.addGeometryObject(go_point_cloud)
visual_model.addGeometryObject(go_point_cloud)
go_height_field = pin.GeometryObject("height_field",0,height_field,height_field_placement)
go_height_field.meshColor = np.ones((4))
height_field_collision_id = collision_model.addGeometryObject(go_height_field)
visual_model.addGeometryObject(go_height_field)
# Add colllision pair between the height field and the panda_hand geometry
panda_hand_collision_id = collision_model.getGeometryId("panda_hand_0")
go_panda_hand = collision_model.geometryObjects[panda_hand_collision_id]
go_panda_hand.geometry.buildConvexRepresentation(False)
go_panda_hand.geometry = go_panda_hand.geometry.convex # We need to work with the convex hull of the real mesh
collision_pair = pin.CollisionPair(height_field_collision_id, panda_hand_collision_id)
collision_model.addCollisionPair(collision_pair)
viz = MeshcatVisualizer(model, collision_model, visual_model)
# Start a new MeshCat server and client.
# Note: the server can also be started separately using the "meshcat-server" command in a terminal:
# this enables the server to remain active after the current script ends.
#
# Option open=True pens the visualizer.
# Note: the visualizer can also be opened seperately by visiting the provided URL.
try:
viz.initViewer(open=True)
except ImportError as err:
print("Error while initializing the viewer. It seems you should install Python meshcat")
print(err)
sys.exit(0)
# Load the robot in the viewer.
viz.loadViewerModel()
# Display a robot configuration.
q0 = pin.neutral(model)
viz.display(q0)
is_collision = False
data = model.createData()
collision_data = collision_model.createData()
while not is_collision:
q = pin.randomConfiguration(model)
is_collision = pin.computeCollisions(model, data, collision_model, collision_data, q, True)
print("Found a configuration in collision:",q)
viz.display(q)
|
[
"numpy.random.rand",
"pinocchio.buildModelsFromUrdf",
"numpy.array",
"pinocchio.computeCollisions",
"sys.exit",
"numpy.linspace",
"pinocchio.randomConfiguration",
"pinocchio.SE3.Identity",
"numpy.eye",
"numpy.ones",
"numpy.digitize",
"pinocchio.visualize.MeshcatVisualizer",
"pinocchio.CollisionPair",
"hppfcl.BVHModelOBBRSS",
"pinocchio.GeometryObject",
"os.path.join",
"pinocchio.neutral",
"numpy.zeros",
"os.path.abspath"
] |
[((613, 667), 'os.path.join', 'join', (['pinocchio_model_dir', '"""example-robot-data/robots"""'], {}), "(pinocchio_model_dir, 'example-robot-data/robots')\n", (617, 667), False, 'from os.path import dirname, join, abspath\n'), ((847, 897), 'pinocchio.buildModelsFromUrdf', 'pin.buildModelsFromUrdf', (['urdf_model_path', 'mesh_dir'], {}), '(urdf_model_path, mesh_dir)\n', (870, 897), True, 'import pinocchio as pin\n'), ((945, 974), 'numpy.random.rand', 'np.random.rand', (['(3)', 'num_points'], {}), '(3, num_points)\n', (959, 974), True, 'import numpy as np\n'), ((999, 1017), 'pinocchio.SE3.Identity', 'pin.SE3.Identity', ([], {}), '()\n', (1015, 1017), True, 'import pinocchio as pin\n'), ((1105, 1131), 'numpy.array', 'np.array', (['[0.2, 0.2, -0.5]'], {}), '([0.2, 0.2, -0.5])\n', (1113, 1131), True, 'import numpy as np\n'), ((1197, 1222), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'nx'], {}), '(0.0, 1.0, nx)\n', (1208, 1222), True, 'import numpy as np\n'), ((1269, 1304), 'numpy.digitize', 'np.digitize', (['X', '(x_grid + x_half_pad)'], {}), '(X, x_grid + x_half_pad)\n', (1280, 1304), True, 'import numpy as np\n'), ((1354, 1379), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'ny'], {}), '(0.0, 1.0, ny)\n', (1365, 1379), True, 'import numpy as np\n'), ((1426, 1461), 'numpy.digitize', 'np.digitize', (['Y', '(y_grid + y_half_pad)'], {}), '(Y, y_grid + y_half_pad)\n', (1437, 1461), True, 'import numpy as np\n'), ((1538, 1556), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {}), '((ny, nx))\n', (1546, 1556), True, 'import numpy as np\n'), ((1618, 1638), 'hppfcl.BVHModelOBBRSS', 'fcl.BVHModelOBBRSS', ([], {}), '()\n', (1636, 1638), True, 'import hppfcl as fcl\n'), ((1934, 2006), 'pinocchio.GeometryObject', 'pin.GeometryObject', (['"""point_cloud"""', '(0)', 'point_cloud', 'point_cloud_placement'], {}), "('point_cloud', 0, point_cloud, point_cloud_placement)\n", (1952, 2006), True, 'import pinocchio as pin\n'), ((2031, 2041), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2038, 2041), True, 'import numpy as np\n'), ((2160, 2235), 'pinocchio.GeometryObject', 'pin.GeometryObject', (['"""height_field"""', '(0)', 'height_field', 'height_field_placement'], {}), "('height_field', 0, height_field, height_field_placement)\n", (2178, 2235), True, 'import pinocchio as pin\n'), ((2261, 2271), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2268, 2271), True, 'import numpy as np\n'), ((2807, 2876), 'pinocchio.CollisionPair', 'pin.CollisionPair', (['height_field_collision_id', 'panda_hand_collision_id'], {}), '(height_field_collision_id, panda_hand_collision_id)\n', (2824, 2876), True, 'import pinocchio as pin\n'), ((2933, 2988), 'pinocchio.visualize.MeshcatVisualizer', 'MeshcatVisualizer', (['model', 'collision_model', 'visual_model'], {}), '(model, collision_model, visual_model)\n', (2950, 2988), False, 'from pinocchio.visualize import MeshcatVisualizer\n'), ((3610, 3628), 'pinocchio.neutral', 'pin.neutral', (['model'], {}), '(model)\n', (3621, 3628), True, 'import pinocchio as pin\n'), ((750, 792), 'os.path.join', 'join', (['model_path', '"""panda_description/urdf"""'], {}), "(model_path, 'panda_description/urdf')\n", (754, 792), False, 'from os.path import dirname, join, abspath\n'), ((3771, 3801), 'pinocchio.randomConfiguration', 'pin.randomConfiguration', (['model'], {}), '(model)\n', (3794, 3801), True, 'import pinocchio as pin\n'), ((3822, 3898), 'pinocchio.computeCollisions', 'pin.computeCollisions', (['model', 'data', 'collision_model', 'collision_data', 'q', '(True)'], {}), '(model, data, collision_model, collision_data, q, True)\n', (3843, 3898), True, 'import pinocchio as pin\n'), ((1837, 1846), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1843, 1846), True, 'import numpy as np\n'), ((3504, 3515), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3512, 3515), False, 'import sys\n'), ((1852, 1915), 'numpy.array', 'np.array', (['[x_grid[0] + x_grid[-1], y_grid[0] + y_grid[-1], 0.0]'], {}), '([x_grid[0] + x_grid[-1], y_grid[0] + y_grid[-1], 0.0])\n', (1860, 1915), True, 'import numpy as np\n'), ((568, 585), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (575, 585), False, 'from os.path import dirname, join, abspath\n')]
|
#!/usr/bin/env python
#------------------------------------------------------------
# Purpose: Program to straight line parameters
# to data with errors in both coordinates. Compare
# the results with SciPy's ODR routine.
# Vog, 27 Nov, 2011
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from numpy.random import normal
from kapteyn import kmpfit
from scipy.odr import Data, Model, ODR, RealData, odr_stop
def model(p, x):
# Model is staight line: y = a + b*x
a, b = p
return a + b*x
def residuals(p, data):
# Residuals function for effective variance
a, b = p
x, y, ex, ey = data
w = ey*ey + b*b*ex*ex
wi = numpy.sqrt(numpy.where(w==0.0, 0.0, 1.0/(w)))
d = wi*(y-model(p,x))
return d
def residuals2(p, data):
# Minimum distance formula with expression for x_model
a, b = p
x, y, ex, ey = data
wx = 1/(ex*ex)
wy = 1/(ey*ey)
df = b
xd = x + (wy*(y-model(p,x))*df)/(wx+wy*df*df)
yd = model(p,xd)
D = numpy.sqrt( wx*(x-xd)**2+wy*(y-yd)**2 )
return D
# Create the data
N = 20
a0 = 2; b0 = 1.6
x = numpy.linspace(0.0, 12.0, N)
y = model((a0,b0),x) + normal(0.0, 1.5, N) # Mean 0, sigma 1
errx = normal(0.0, 0.4, N)
erry = normal(0.0, 0.5, N)
beta0 = [0,0]
print("\n========== Results SciPy's ODR ============")
linear = Model(model)
mydata = RealData(x, y, sx=errx, sy=erry)
myodr = ODR(mydata, linear, beta0=beta0, maxit=5000)
myoutput = myodr.run()
print("Fitted parameters: ", myoutput.beta)
print("Covariance errors: ", numpy.sqrt(myoutput.cov_beta.diagonal()))
print("Standard errors: ", myoutput.sd_beta)
print("Minimum (reduced)chi^2: ", myoutput.res_var)
beta = myoutput.beta
# Prepare fit routine
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errx, erry))
try:
fitobj.fit(params0=beta0)
except Exception as mes:
print("Something wrong with fit: ", mes)
raise SystemExit
print("\n\n======== Results kmpfit: w1 = ey*ey + b*b*ex*ex =========")
print("Params: ", fitobj.params)
print("Covariance errors: ", fitobj.xerror)
print("Standard errors ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Message: ", fitobj.message)
fitobj2 = kmpfit.Fitter(residuals=residuals2, data=(x, y, errx, erry))
try:
fitobj2.fit(params0=beta0)
except Exception as mes:
print("Something wrong with fit: ", mes)
raise SystemExit
print("\n\n======== Results kmpfit: r = ex*ex/(ey*ey), xd = (x-a*r+y*b*r)/(1+r) =========")
print("Params: ", fitobj2.params)
print("Covariance errors: ", fitobj2.xerror)
print("Standard errors ", fitobj2.stderr)
print("Chi^2 min: ", fitobj2.chi2_min)
print("Reduced Chi^2: ", fitobj2.rchi2_min)
print("Message: ", fitobj2.message)
t = "\nTHE WILLAMSON APPROACH"
print(t, "\n", "="*len(t))
# Step 1: Get a and b for a, b with standard weighted least squares calculation
def lingres(xa, ya, w):
# Return a, b for the relation y = a + b*x
# given data in xa, ya and weights in w
sum = w.sum()
sumX = (w*xa).sum()
sumY = (w*ya).sum()
sumX2 = (w*xa*xa).sum()
sumY2 = (w*ya*ya).sum()
sumXY = (w*xa*ya).sum()
delta = sum * sumX2 - sumX * sumX
a = (sumX2*sumY - sumX*sumXY) / delta
b = (sumXY*sum - sumX*sumY) / delta
return a, b
w = numpy.where(erry==0.0, 0.0, 1.0/(erry*erry))
a,b = lingres(x, y, w)
a_y = a; b_y = b # Williamson initial Parameters
ui = errx**2
vi = erry**2
n = 0
cont = True
while cont:
# Step 2: Use this slope to find weighting for each point
wi = (vi+b*b*ui)**-1
# Step 3: Calcu;ate weighted avarages
w_sum = wi.sum()
x_av = (wi*x).sum() / w_sum
x_diff = x - x_av
y_av = (wi*y).sum() / w_sum
y_diff = y - y_av
# Step 4: Calculate the 'improvement' vector zi
zi = wi*(vi*x_diff + b*ui*y_diff)
b_will = (wi*zi*y_diff).sum()/ (wi*zi*x_diff).sum()
cont = abs(b-b_will) > 1e-12 and n < 100
n += 1
b = b_will
# Step 5: Repeat steps 2-4 until convergence
# Step 6: Calculate 'a' using the averages of a and y
a_will = y_av - b_will*x_av # Improved parameters
# Step 7: The variances
wi = (vi+b_will*b_will*ui)**-1
w_sum = wi.sum()
z_av = (wi*zi).sum() / w_sum
zi2 = zi - z_av
Q =1.0/(wi*(x_diff*y_diff/b_will + 4*zi2*(zi-x_diff))).sum()
sigb2 = Q*Q * (wi*wi*(x_diff**2*vi+y_diff**2*ui)).sum()
siga2 = 1.0/w_sum + 2*(x_av+2*z_av)*z_av*Q + (x_av+2*z_av)**2*sigb2
siga = numpy.sqrt(siga2)
sigb = numpy.sqrt(sigb2)
print("Williamson Fitted A, B: ", a_will, b_will)
print("Parameter errors: ", siga, sigb)
# Some plotting
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure(1)
frame = fig.add_subplot(1,1,1, aspect=1, adjustable='datalim')
frame.errorbar(x, y, xerr=errx, yerr=erry, fmt='bo')
# Plot first fit
frame.plot(x, model(beta,x), '-y', lw=4, label="ODR", alpha=0.6)
frame.plot(x, model(fitobj.params,x), 'c', ls='--', lw=2, label="kmpfit")
frame.plot(x, model(fitobj2.params,x), '#ffaa00', label="kmpfit correct")
frame.plot(x, model((a_will,b_will),x), 'g', label="Williamson")
frame.plot(x, model((a0,b0),x), '#ab12cc', label="True")
frame.set_xlabel("X")
frame.set_ylabel("Y")
frame.set_title("Weights in both coordinates. Model: $y=a+bx$")
frame.grid(True)
leg = frame.legend(loc=1)
show()
|
[
"numpy.random.normal",
"numpy.sqrt",
"numpy.where",
"scipy.odr.ODR",
"scipy.odr.Model",
"numpy.linspace",
"scipy.odr.RealData",
"matplotlib.pyplot.figure",
"kapteyn.kmpfit.Fitter",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] |
[((1153, 1181), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(12.0)', 'N'], {}), '(0.0, 12.0, N)\n', (1167, 1181), False, 'import numpy\n'), ((1251, 1270), 'numpy.random.normal', 'normal', (['(0.0)', '(0.4)', 'N'], {}), '(0.0, 0.4, N)\n', (1257, 1270), False, 'from numpy.random import normal\n'), ((1279, 1298), 'numpy.random.normal', 'normal', (['(0.0)', '(0.5)', 'N'], {}), '(0.0, 0.5, N)\n', (1285, 1298), False, 'from numpy.random import normal\n'), ((1379, 1391), 'scipy.odr.Model', 'Model', (['model'], {}), '(model)\n', (1384, 1391), False, 'from scipy.odr import Data, Model, ODR, RealData, odr_stop\n'), ((1401, 1433), 'scipy.odr.RealData', 'RealData', (['x', 'y'], {'sx': 'errx', 'sy': 'erry'}), '(x, y, sx=errx, sy=erry)\n', (1409, 1433), False, 'from scipy.odr import Data, Model, ODR, RealData, odr_stop\n'), ((1442, 1486), 'scipy.odr.ODR', 'ODR', (['mydata', 'linear'], {'beta0': 'beta0', 'maxit': '(5000)'}), '(mydata, linear, beta0=beta0, maxit=5000)\n', (1445, 1486), False, 'from scipy.odr import Data, Model, ODR, RealData, odr_stop\n'), ((1792, 1851), 'kapteyn.kmpfit.Fitter', 'kmpfit.Fitter', ([], {'residuals': 'residuals', 'data': '(x, y, errx, erry)'}), '(residuals=residuals, data=(x, y, errx, erry))\n', (1805, 1851), False, 'from kapteyn import kmpfit\n'), ((2358, 2418), 'kapteyn.kmpfit.Fitter', 'kmpfit.Fitter', ([], {'residuals': 'residuals2', 'data': '(x, y, errx, erry)'}), '(residuals=residuals2, data=(x, y, errx, erry))\n', (2371, 2418), False, 'from kapteyn import kmpfit\n'), ((3483, 3533), 'numpy.where', 'numpy.where', (['(erry == 0.0)', '(0.0)', '(1.0 / (erry * erry))'], {}), '(erry == 0.0, 0.0, 1.0 / (erry * erry))\n', (3494, 3533), False, 'import numpy\n'), ((4593, 4610), 'numpy.sqrt', 'numpy.sqrt', (['siga2'], {}), '(siga2)\n', (4603, 4610), False, 'import numpy\n'), ((4618, 4635), 'numpy.sqrt', 'numpy.sqrt', (['sigb2'], {}), '(sigb2)\n', (4628, 4635), False, 'import numpy\n'), ((4744, 4762), 'matplotlib.pyplot.rc', 'rc', (['"""font"""'], {'size': '(9)'}), "('font', size=9)\n", (4746, 4762), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((4763, 4787), 'matplotlib.pyplot.rc', 'rc', (['"""legend"""'], {'fontsize': '(8)'}), "('legend', fontsize=8)\n", (4765, 4787), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((4794, 4803), 'matplotlib.pyplot.figure', 'figure', (['(1)'], {}), '(1)\n', (4800, 4803), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((5424, 5430), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5428, 5430), False, 'from matplotlib.pyplot import figure, show, rc\n'), ((1053, 1104), 'numpy.sqrt', 'numpy.sqrt', (['(wx * (x - xd) ** 2 + wy * (y - yd) ** 2)'], {}), '(wx * (x - xd) ** 2 + wy * (y - yd) ** 2)\n', (1063, 1104), False, 'import numpy\n'), ((1205, 1224), 'numpy.random.normal', 'normal', (['(0.0)', '(1.5)', 'N'], {}), '(0.0, 1.5, N)\n', (1211, 1224), False, 'from numpy.random import normal\n'), ((740, 775), 'numpy.where', 'numpy.where', (['(w == 0.0)', '(0.0)', '(1.0 / w)'], {}), '(w == 0.0, 0.0, 1.0 / w)\n', (751, 775), False, 'import numpy\n')]
|
import os
import KratosMultiphysics
from KratosMultiphysics import Logger
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication.DEM_analysis_stage
import numpy as np
import auxiliary_functions_for_tests
this_working_dir_backup = os.getcwd()
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class DEM3D_SearchToleranceMain(KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage, KratosUnittest.TestCase):
def Initialize(self):
super().Initialize()
for node in self.spheres_model_part.Nodes:
self.initial_normal_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Z)
@classmethod
def GetMainPath(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
def GetProblemNameWithPath(self):
return os.path.join(self.main_path, self.DEM_parameters["problem_name"].GetString())
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
for node in self.spheres_model_part.Nodes:
#reference data with freq=1 searchtolerance=0.0
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.86502139707038
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3859516373258987
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5929799879392164
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
def Finalize(self):
self.procedures.RemoveFoldersWithResults(str(self.main_path), str(self.problem_name), '')
super().Finalize()
class DEM3D_SearchTolerance1(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.8654458179811835
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3861319639727263
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.594495289987086
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class DEM3D_SearchTolerance2(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.865445816566027
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.386128017385994
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5941551772701182
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class DEM3D_SearchTolerance3(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.86502139707038
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3859516373258987
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5929799879392164
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class TestSearchTolerance(KratosUnittest.TestCase):
@classmethod
def test_SearchA(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(0.0)
project_parameters["search_tolerance_against_walls"].SetDouble(0.0)
project_parameters["NeighbourSearchFrequency"].SetInt(1)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchToleranceMain, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchB(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(0.0)
project_parameters["search_tolerance_against_walls"].SetDouble(0.0)
project_parameters["NeighbourSearchFrequency"].SetInt(10)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance1, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchC(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(1e-04)
project_parameters["search_tolerance_against_walls"].SetDouble(1e-04)
project_parameters["NeighbourSearchFrequency"].SetInt(20)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance2, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchD(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(1e-03)
project_parameters["search_tolerance_against_walls"].SetDouble(1e-03)
project_parameters["NeighbourSearchFrequency"].SetInt(20)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance3, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
if __name__ == "__main__":
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
KratosUnittest.main()
|
[
"numpy.isclose",
"KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep",
"KratosMultiphysics.KratosUnittest.main",
"os.path.join",
"os.getcwd",
"os.path.realpath",
"KratosMultiphysics.Logger.GetDefaultOutput",
"KratosMultiphysics.Model",
"auxiliary_functions_for_tests.GetHardcodedNumberOfThreads"
] |
[((340, 351), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (349, 351), False, 'import os\n'), ((9710, 9731), 'KratosMultiphysics.KratosUnittest.main', 'KratosUnittest.main', ([], {}), '()\n', (9729, 9731), True, 'import KratosMultiphysics.KratosUnittest as KratosUnittest\n'), ((74, 99), 'KratosMultiphysics.Logger.GetDefaultOutput', 'Logger.GetDefaultOutput', ([], {}), '()\n', (97, 99), False, 'from KratosMultiphysics import Logger\n'), ((2537, 2638), 'KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep', 'KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep', (['self'], {}), '(\n self)\n', (2627, 2638), False, 'import KratosMultiphysics\n'), ((3818, 3919), 'KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep', 'KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep', (['self'], {}), '(\n self)\n', (3908, 3919), False, 'import KratosMultiphysics\n'), ((5098, 5199), 'KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep', 'KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep', (['self'], {}), '(\n self)\n', (5188, 5199), False, 'import KratosMultiphysics\n'), ((6503, 6550), 'os.path.join', 'os.path.join', (['path', '"""ProjectParametersDEM.json"""'], {}), "(path, 'ProjectParametersDEM.json')\n", (6515, 6550), False, 'import os\n'), ((6919, 6945), 'KratosMultiphysics.Model', 'KratosMultiphysics.Model', ([], {}), '()\n', (6943, 6945), False, 'import KratosMultiphysics\n'), ((7323, 7370), 'os.path.join', 'os.path.join', (['path', '"""ProjectParametersDEM.json"""'], {}), "(path, 'ProjectParametersDEM.json')\n", (7335, 7370), False, 'import os\n'), ((7740, 7766), 'KratosMultiphysics.Model', 'KratosMultiphysics.Model', ([], {}), '()\n', (7764, 7766), False, 'import KratosMultiphysics\n'), ((8141, 8188), 'os.path.join', 'os.path.join', (['path', '"""ProjectParametersDEM.json"""'], {}), "(path, 'ProjectParametersDEM.json')\n", (8153, 8188), False, 'import os\n'), ((8562, 8588), 'KratosMultiphysics.Model', 'KratosMultiphysics.Model', ([], {}), '()\n', (8586, 8588), False, 'import KratosMultiphysics\n'), ((8963, 9010), 'os.path.join', 'os.path.join', (['path', '"""ProjectParametersDEM.json"""'], {}), "(path, 'ProjectParametersDEM.json')\n", (8975, 9010), False, 'import os\n'), ((9384, 9410), 'KratosMultiphysics.Model', 'KratosMultiphysics.Model', ([], {}), '()\n', (9408, 9410), False, 'import KratosMultiphysics\n'), ((420, 446), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (436, 446), False, 'import os\n'), ((7087, 7146), 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', ([], {}), '()\n', (7144, 7146), False, 'import auxiliary_functions_for_tests\n'), ((7905, 7964), 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', ([], {}), '()\n', (7962, 7964), False, 'import auxiliary_functions_for_tests\n'), ((8727, 8786), 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', ([], {}), '()\n', (8784, 8786), False, 'import auxiliary_functions_for_tests\n'), ((9549, 9608), 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', 'auxiliary_functions_for_tests.GetHardcodedNumberOfThreads', ([], {}), '()\n', (9606, 9608), False, 'import auxiliary_functions_for_tests\n'), ((9643, 9668), 'KratosMultiphysics.Logger.GetDefaultOutput', 'Logger.GetDefaultOutput', ([], {}), '()\n', (9666, 9668), False, 'from KratosMultiphysics import Logger\n'), ((881, 907), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (897, 907), False, 'import os\n'), ((1332, 1381), 'numpy.isclose', 'np.isclose', (['self.time', '(0.02)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.02, rtol=0.0, atol=1e-06)\n', (1342, 1381), True, 'import numpy as np\n'), ((1654, 1704), 'numpy.isclose', 'np.isclose', (['self.time', '(0.115)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.115, rtol=0.0, atol=1e-06)\n', (1664, 1704), True, 'import numpy as np\n'), ((1979, 2028), 'numpy.isclose', 'np.isclose', (['self.time', '(0.22)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.22, rtol=0.0, atol=1e-06)\n', (1989, 2028), True, 'import numpy as np\n'), ((2763, 2812), 'numpy.isclose', 'np.isclose', (['self.time', '(0.02)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.02, rtol=0.0, atol=1e-06)\n', (2773, 2812), True, 'import numpy as np\n'), ((3087, 3137), 'numpy.isclose', 'np.isclose', (['self.time', '(0.115)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.115, rtol=0.0, atol=1e-06)\n', (3097, 3137), True, 'import numpy as np\n'), ((3412, 3461), 'numpy.isclose', 'np.isclose', (['self.time', '(0.22)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.22, rtol=0.0, atol=1e-06)\n', (3422, 3461), True, 'import numpy as np\n'), ((4044, 4093), 'numpy.isclose', 'np.isclose', (['self.time', '(0.02)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.02, rtol=0.0, atol=1e-06)\n', (4054, 4093), True, 'import numpy as np\n'), ((4367, 4417), 'numpy.isclose', 'np.isclose', (['self.time', '(0.115)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.115, rtol=0.0, atol=1e-06)\n', (4377, 4417), True, 'import numpy as np\n'), ((4691, 4740), 'numpy.isclose', 'np.isclose', (['self.time', '(0.22)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.22, rtol=0.0, atol=1e-06)\n', (4701, 4740), True, 'import numpy as np\n'), ((5324, 5373), 'numpy.isclose', 'np.isclose', (['self.time', '(0.02)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.02, rtol=0.0, atol=1e-06)\n', (5334, 5373), True, 'import numpy as np\n'), ((5646, 5696), 'numpy.isclose', 'np.isclose', (['self.time', '(0.115)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.115, rtol=0.0, atol=1e-06)\n', (5656, 5696), True, 'import numpy as np\n'), ((5971, 6020), 'numpy.isclose', 'np.isclose', (['self.time', '(0.22)'], {'rtol': '(0.0)', 'atol': '(1e-06)'}), '(self.time, 0.22, rtol=0.0, atol=1e-06)\n', (5981, 6020), True, 'import numpy as np\n'), ((6418, 6444), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (6434, 6444), False, 'import os\n'), ((7238, 7264), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (7254, 7264), False, 'import os\n'), ((8056, 8082), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8072, 8082), False, 'import os\n'), ((8878, 8904), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8894, 8904), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import pywph as pw
import pywph_vanilla as pw2
import numpy as np
import matplotlib.pyplot as plt
M, N = 256, 256
J = 6
L = 4
dn = 0
data_ini = np.load('data/I_1.npy')
data = data_ini[:256,:256]
datab = data_ini[256:,256:]
""" Without normalization """
# Version dev
wph_op = pw.WPHOp(M, N, J, L=L, dn=dn, device='cpu')
stats = wph_op(data, padding = True).cpu().numpy()
print(stats.shape, stats)
# Version vanilla
wph_op2 = pw2.WPHOp(M, N, J, L=L, dn=dn, device='cpu')
stats2 = wph_op2(data, padding = True).cpu().numpy()
print(stats2.shape, stats2)
# Comparison
diff = (stats2-stats)
""" With normalization """
# Version dev
wph_op = pw.WPHOp(M, N, J, L=L, dn=dn, device='cpu')
stats = wph_op(data, padding = True, norm = 'auto').cpu().numpy()
print(stats.shape, stats)
# Version Vanilla
wph_op2 = pw2.WPHOp(M, N, J, L=L, dn=dn, device='cpu')
stats2 = wph_op2(data, padding = True, norm = 'auto').cpu().numpy()
print(stats2.shape, stats2)
plt.figure()
plt.plot(np.real(stats))
plt.plot(np.real(stats2))
""" Deuxième passage avec normalisation """
# Version dev
statsb = wph_op(datab, padding = True, norm = 'auto').cpu().numpy()
print(statsb.shape, statsb)
# Version Vanilla
statsb2 = wph_op2(datab, padding = True, norm = 'auto').cpu().numpy()
print(statsb2.shape, statsb2)
plt.figure()
plt.plot(np.real(statsb))
plt.plot(np.real(statsb2))
""" With normalization 2d map """
# Version dev
wph_op = pw.WPHOp(M, N, J, L=L, dn=dn, device='cpu')
stats = wph_op(datab, padding = True, norm = 'auto').cpu().numpy()
print(stats.shape, stats)
# Version Vanilla
wph_op2 = pw2.WPHOp(M, N, J, L=L, dn=dn, device='cpu')
stats2 = wph_op2(datab, padding = True, norm = 'auto').cpu().numpy()
print(stats2.shape, stats2)
|
[
"pywph.WPHOp",
"numpy.real",
"matplotlib.pyplot.figure",
"numpy.load",
"pywph_vanilla.WPHOp"
] |
[((171, 194), 'numpy.load', 'np.load', (['"""data/I_1.npy"""'], {}), "('data/I_1.npy')\n", (178, 194), True, 'import numpy as np\n'), ((306, 349), 'pywph.WPHOp', 'pw.WPHOp', (['M', 'N', 'J'], {'L': 'L', 'dn': 'dn', 'device': '"""cpu"""'}), "(M, N, J, L=L, dn=dn, device='cpu')\n", (314, 349), True, 'import pywph as pw\n'), ((457, 501), 'pywph_vanilla.WPHOp', 'pw2.WPHOp', (['M', 'N', 'J'], {'L': 'L', 'dn': 'dn', 'device': '"""cpu"""'}), "(M, N, J, L=L, dn=dn, device='cpu')\n", (466, 501), True, 'import pywph_vanilla as pw2\n'), ((671, 714), 'pywph.WPHOp', 'pw.WPHOp', (['M', 'N', 'J'], {'L': 'L', 'dn': 'dn', 'device': '"""cpu"""'}), "(M, N, J, L=L, dn=dn, device='cpu')\n", (679, 714), True, 'import pywph as pw\n'), ((836, 880), 'pywph_vanilla.WPHOp', 'pw2.WPHOp', (['M', 'N', 'J'], {'L': 'L', 'dn': 'dn', 'device': '"""cpu"""'}), "(M, N, J, L=L, dn=dn, device='cpu')\n", (845, 880), True, 'import pywph_vanilla as pw2\n'), ((978, 990), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (988, 990), True, 'import matplotlib.pyplot as plt\n'), ((1318, 1330), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1328, 1330), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1486), 'pywph.WPHOp', 'pw.WPHOp', (['M', 'N', 'J'], {'L': 'L', 'dn': 'dn', 'device': '"""cpu"""'}), "(M, N, J, L=L, dn=dn, device='cpu')\n", (1451, 1486), True, 'import pywph as pw\n'), ((1609, 1653), 'pywph_vanilla.WPHOp', 'pw2.WPHOp', (['M', 'N', 'J'], {'L': 'L', 'dn': 'dn', 'device': '"""cpu"""'}), "(M, N, J, L=L, dn=dn, device='cpu')\n", (1618, 1653), True, 'import pywph_vanilla as pw2\n'), ((1000, 1014), 'numpy.real', 'np.real', (['stats'], {}), '(stats)\n', (1007, 1014), True, 'import numpy as np\n'), ((1025, 1040), 'numpy.real', 'np.real', (['stats2'], {}), '(stats2)\n', (1032, 1040), True, 'import numpy as np\n'), ((1340, 1355), 'numpy.real', 'np.real', (['statsb'], {}), '(statsb)\n', (1347, 1355), True, 'import numpy as np\n'), ((1366, 1382), 'numpy.real', 'np.real', (['statsb2'], {}), '(statsb2)\n', (1373, 1382), True, 'import numpy as np\n')]
|
import numpy as np
import scipy
import cv2
def get_pixel_neighbors(height, width):
"""
Estimate the 4 neighbors of every pixel in an image
:param height: image height
:param width: image width
:return: pixel index - neighbor index lists
"""
pix_id = []
neighbor_id = []
for i in range(height):
for j in range(width):
n = []
if i == 0:
n = n + [(i + 1) * width + j]
elif i == height - 1:
n = n + [(i - 1) * width + j]
else:
n = n + [(i + 1) * width + j, (i - 1) * width + j]
if j == 0:
n = n + [i * width + j + 1]
elif j == width - 1:
n = n + [i * width + j - 1]
else:
n = n + [i * width + j + 1, i * width + j - 1]
for k in n:
pix_id.append(i*width+j)
neighbor_id.append(k)
return pix_id, neighbor_id
limps = np.array(
[[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 11], [11, 12], [12, 13], [1, 8],
[8, 9], [9, 10], [14, 15], [16, 17], [0, 14], [0, 15], [14, 16], [15, 17]])
def get_instance_skeleton_buffer(h, w, poses):
output = np.zeros((h, w, 3), dtype=np.float32) - 1
for i in range(len(poses)):
keypoints = poses[i]
lbl = i
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
return output[:, :, 0]
def get_poseimg_for_opt(sel_pose, poseimg, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 1
output = np.zeros((h, w, 3), dtype=np.float32) - 1
II, JJ = (poseimg > 0).nonzero()
Isel, J_sel = (poseimg == sel_pose).nonzero()
output[II, JJ] = 0
output[Isel, J_sel] = 2
init_mask[Isel, J_sel] = 1
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((25, 25), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def draw_poses_for_optimization(sel_pose, keypoints_list, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 0
output = np.zeros((h, w, 3), dtype=np.float32)-1
for i in range(len(keypoints_list)):
keypoints = keypoints_list[i]
if i == sel_pose:
lbl = 2
else:
lbl = 1
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
# Draw circles for the bg players keypoints
# for k in range(bg_keypoints.shape[0]):
# cv2.circle(output, (int(bg_keypoints[k, 0]), int(bg_keypoints[k, 1])), 2, (bg_keypoint_lable, 0, 0), -1)
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((5, 5), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def set_U(strokes, h, w, dim):
N = h*w
y = np.zeros((N, dim))
U = scipy.sparse.lil_matrix((N, N))
for p in range(strokes.shape[0]):
i = strokes[p, 1]
j = strokes[p, 0]
index = int(i * w + j)
for ii in range(dim):
y[index, ii] = strokes[p, ii+2]
U[index, index] = 1
return U, y
def set_DW(image, edges=None, sigma1=1000., sigma2=0.01):
image = image.astype(float)
h, w = image.shape[0:2]
N = h * w
pixd, neighborid = get_pixel_neighbors(h, w)
i, j = np.unravel_index(pixd, (h, w))
ii, jj = np.unravel_index(neighborid, (h, w))
pix_diff = np.squeeze((image[i, j, :] - image[ii, jj, :]) ** 2)
if len(pix_diff.shape) == 1:
pix_diff = pix_diff[:, np.newaxis]
weight0 = np.exp(-(np.sum(pix_diff, axis=1)) / sigma1)
weight1 = np.exp(-((edges[i, j]) ** 2) / sigma2)
# neighbor_info = np.vstack((pixd, neighborid, weight0)).T
M = len(pixd)
D = scipy.sparse.lil_matrix((M, N))
W = scipy.sparse.lil_matrix((M, M))
p = np.arange(0, M, 1)
D[p, pixd] = 1
D[p, neighborid] = -1
W[p, p] = weight1
return D, W
|
[
"scipy.sparse.lil_matrix",
"numpy.ones",
"numpy.minimum",
"numpy.squeeze",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.unravel_index",
"numpy.arange"
] |
[((990, 1177), 'numpy.array', 'np.array', (['[[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 11], [11, 12],\n [12, 13], [1, 8], [8, 9], [9, 10], [14, 15], [16, 17], [0, 14], [0, 15],\n [14, 16], [15, 17]]'], {}), '([[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 11],\n [11, 12], [12, 13], [1, 8], [8, 9], [9, 10], [14, 15], [16, 17], [0, 14\n ], [0, 15], [14, 16], [15, 17]])\n', (998, 1177), True, 'import numpy as np\n'), ((5362, 5380), 'numpy.zeros', 'np.zeros', (['(N, dim)'], {}), '((N, dim))\n', (5370, 5380), True, 'import numpy as np\n'), ((5390, 5421), 'scipy.sparse.lil_matrix', 'scipy.sparse.lil_matrix', (['(N, N)'], {}), '((N, N))\n', (5413, 5421), False, 'import scipy\n'), ((5858, 5888), 'numpy.unravel_index', 'np.unravel_index', (['pixd', '(h, w)'], {}), '(pixd, (h, w))\n', (5874, 5888), True, 'import numpy as np\n'), ((5902, 5938), 'numpy.unravel_index', 'np.unravel_index', (['neighborid', '(h, w)'], {}), '(neighborid, (h, w))\n', (5918, 5938), True, 'import numpy as np\n'), ((5955, 6007), 'numpy.squeeze', 'np.squeeze', (['((image[i, j, :] - image[ii, jj, :]) ** 2)'], {}), '((image[i, j, :] - image[ii, jj, :]) ** 2)\n', (5965, 6007), True, 'import numpy as np\n'), ((6157, 6191), 'numpy.exp', 'np.exp', (['(-edges[i, j] ** 2 / sigma2)'], {}), '(-edges[i, j] ** 2 / sigma2)\n', (6163, 6191), True, 'import numpy as np\n'), ((6288, 6319), 'scipy.sparse.lil_matrix', 'scipy.sparse.lil_matrix', (['(M, N)'], {}), '((M, N))\n', (6311, 6319), False, 'import scipy\n'), ((6328, 6359), 'scipy.sparse.lil_matrix', 'scipy.sparse.lil_matrix', (['(M, M)'], {}), '((M, M))\n', (6351, 6359), False, 'import scipy\n'), ((6369, 6387), 'numpy.arange', 'np.arange', (['(0)', 'M', '(1)'], {}), '(0, M, 1)\n', (6378, 6387), True, 'import numpy as np\n'), ((1249, 1286), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.float32'}), '((h, w, 3), dtype=np.float32)\n', (1257, 1286), True, 'import numpy as np\n'), ((2577, 2614), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.float32'}), '((h, w, 3), dtype=np.float32)\n', (2585, 2614), True, 'import numpy as np\n'), ((2871, 2898), 'numpy.ones', 'np.ones', (['(25, 25)', 'np.uint8'], {}), '((25, 25), np.uint8)\n', (2878, 2898), True, 'import numpy as np\n'), ((3032, 3078), 'numpy.array', 'np.array', (['[J_bg[rand_index], I_bg[rand_index]]'], {}), '([J_bg[rand_index], I_bg[rand_index]])\n', (3040, 3078), True, 'import numpy as np\n'), ((3391, 3428), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.float32'}), '((h, w, 3), dtype=np.float32)\n', (3399, 3428), True, 'import numpy as np\n'), ((4934, 4959), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (4941, 4959), True, 'import numpy as np\n'), ((5093, 5139), 'numpy.array', 'np.array', (['[J_bg[rand_index], I_bg[rand_index]]'], {}), '([J_bg[rand_index], I_bg[rand_index]])\n', (5101, 5139), True, 'import numpy as np\n'), ((1579, 1611), 'numpy.minimum', 'np.minimum', (['bone_start[0]', '(w - 1)'], {}), '(bone_start[0], w - 1)\n', (1589, 1611), True, 'import numpy as np\n'), ((1656, 1688), 'numpy.minimum', 'np.minimum', (['bone_start[1]', '(h - 1)'], {}), '(bone_start[1], h - 1)\n', (1666, 1688), True, 'import numpy as np\n'), ((1732, 1762), 'numpy.minimum', 'np.minimum', (['bone_end[0]', '(w - 1)'], {}), '(bone_end[0], w - 1)\n', (1742, 1762), True, 'import numpy as np\n'), ((1805, 1835), 'numpy.minimum', 'np.minimum', (['bone_end[1]', '(h - 1)'], {}), '(bone_end[1], h - 1)\n', (1815, 1835), True, 'import numpy as np\n'), ((3801, 3833), 'numpy.minimum', 'np.minimum', (['bone_start[0]', '(w - 1)'], {}), '(bone_start[0], w - 1)\n', (3811, 3833), True, 'import numpy as np\n'), ((3878, 3910), 'numpy.minimum', 'np.minimum', (['bone_start[1]', '(h - 1)'], {}), '(bone_start[1], h - 1)\n', (3888, 3910), True, 'import numpy as np\n'), ((3954, 3984), 'numpy.minimum', 'np.minimum', (['bone_end[0]', '(w - 1)'], {}), '(bone_end[0], w - 1)\n', (3964, 3984), True, 'import numpy as np\n'), ((4027, 4057), 'numpy.minimum', 'np.minimum', (['bone_end[1]', '(h - 1)'], {}), '(bone_end[1], h - 1)\n', (4037, 4057), True, 'import numpy as np\n'), ((6107, 6131), 'numpy.sum', 'np.sum', (['pix_diff'], {'axis': '(1)'}), '(pix_diff, axis=1)\n', (6113, 6131), True, 'import numpy as np\n')]
|
import math
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
def normal_pdf(x):
import math
return torch.exp(-0.5 * x**2) / math.sqrt(2 * math.pi)
def normal_cdf(y, h=0.01, tau=0.5):
# Approximation of Q-function given by López-Benítez & Casadevall (2011)
# based on a second-order exponential function & Q(x) = 1 - Q(-x):
Q_fn = lambda x: torch.exp(-0.4920*x**2 - 0.2887*x - 1.1893)
m = len(y)
y_prime = (tau - y) / h
sum_ = torch.sum(Q_fn(y_prime[y_prime > 0])) \
+ torch.sum(1 - Q_fn(torch.abs(y_prime[y_prime < 0]))) \
+ 0.5 * len(y_prime[y_prime == 0])
return sum_ / m
def Huber_loss(x, delta):
if abs(x) < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
def Huber_loss_derivative(x, delta):
if x > delta:
return delta
elif x < -delta:
return -delta
return x
def get_fairness_metrics(Y, Z, Ytilde, n_classes, n_sensitive_attrs):
DDP = 0
DEO = 0
for y in range(n_classes):
Pr_Ytilde_y = (Ytilde == y).mean()
Ytilde_y_given_Y_y = np.logical_and(Ytilde==y, Y==y)
for z in range(n_sensitive_attrs):
DDP += abs(np.logical_and(Ytilde==y, Z==z).mean() / (Z==z).mean() - Pr_Ytilde_y)
DEO += abs(np.logical_and(Ytilde_y_given_Y_y, Z==z).mean() / np.logical_and(Y==y, Z==z).mean() - Ytilde_y_given_Y_y.mean() / (Y==y).mean())
return DDP, DEO
class BCELossAccuracy():
def __init__(self):
self.loss_function = nn.BCELoss()
@staticmethod
def accuracy(y_hat, labels):
with torch.no_grad():
y_tilde = (y_hat > 0.5).int()
accuracy = (y_tilde == labels.int()).float().mean().item()
return accuracy
def __call__(self, y_hat, labels):
loss = self.loss_function(y_hat, labels)
accuracy = self.accuracy(y_hat, labels)
return loss, accuracy
#
class CELossAccuracy():
def __init__(self):
self.loss_function = nn.CrossEntropyLoss()
@staticmethod
def accuracy(y_hat, labels):
with torch.no_grad():
y_tilde = y_hat.argmax(axis=1)
accuracy = (y_tilde == labels).float().mean().item()
return accuracy
def __call__(self, y_hat, labels):
loss = self.loss_function(y_hat, labels)
accuracy = self.accuracy(y_hat, labels)
return loss, accuracy
#
class FairnessLoss():
def __init__(self, h, tau, delta, notion, n_classes, n_sensitive_attrs, sensitive_attrs):
self.h = h
self.tau = tau
self.delta = delta
self.fairness_notion = notion
self.n_classes = n_classes
self.n_sensitive_attrs = n_sensitive_attrs
self.sensitive_attrs = sensitive_attrs
if self.n_classes > 2:
self.tau = 0.5
assert self.fairness_notion in ['DP', 'EO']
def DDP_loss(self, y_hat, Z):
m = y_hat.shape[0]
backward_loss = 0
logging_loss = 0
if self.n_classes == 2:
Pr_Ytilde1 = normal_cdf(y_hat.detach(), self.h, self.tau)
for z in self.sensitive_attrs:
Pr_Ytilde1_Z = normal_cdf(y_hat.detach()[Z==z], self.h, self.tau)
m_z = Z[Z==z].shape[0]
Prob_diff_Z = Pr_Ytilde1_Z - Pr_Ytilde1
_dummy = \
torch.dot(
normal_pdf((self.tau - y_hat.detach()[Z==z]) / self.h).view(-1),
y_hat[Z==z].view(-1)
) / (self.h * m_z) -\
torch.dot(
normal_pdf((self.tau - y_hat.detach()) / self.h).view(-1),
y_hat.view(-1)
) / (self.h * m)
_dummy *= Huber_loss_derivative(Prob_diff_Z, self.delta)
backward_loss += _dummy
logging_loss += Huber_loss(Prob_diff_Z, self.delta)
else:
idx_set = list(range(self.n_classes)) if self.n_classes > 2 else [0]
for y in idx_set:
Pr_Ytilde1 = normal_cdf(y_hat[:,y].detach(), self.h, self.tau)
for z in self.sensitive_attrs:
Pr_Ytilde1_Z = normal_cdf(y_hat[:,y].detach()[Z==z], self.h, self.tau)
m_z = Z[Z==z].shape[0]
Prob_diff_Z = Pr_Ytilde1_Z - Pr_Ytilde1
_dummy = Huber_loss_derivative(Prob_diff_Z, self.delta)
_dummy *= \
torch.dot(
normal_pdf((self.tau - y_hat[:,y].detach()[Z==z]) / self.h).view(-1),
y_hat[:,y][Z==z].view(-1)
) / (self.h * m_z) -\
torch.dot(
normal_pdf((self.tau - y_hat[:,y].detach()) / self.h).view(-1),
y_hat[:,y].view(-1)
) / (self.h * m)
backward_loss += _dummy
logging_loss += Huber_loss(Prob_diff_Z, self.delta).item()
return backward_loss, logging_loss
def DEO_loss(self, y_hat, Y, Z):
backward_loss = 0
logging_loss = 0
if self.n_classes == 2:
for y in [0,1]:
Pr_Ytilde1_Y = normal_cdf(y_hat[Y==y].detach(), self.h, self.tau)
m_y = (Y==y).sum().item()
for z in self.sensitive_attrs:
Pr_Ytilde1_YZ = normal_cdf(y_hat[torch.logical_and(Y==y, Z==z)].detach(), self.h, self.tau)
m_zy = torch.logical_and(Y==y, Z==z).sum().item()
Prob_diff_Z = Pr_Ytilde1_YZ - Pr_Ytilde1_Y
_dummy = Huber_loss_derivative(Prob_diff_Z, self.delta)
_dummy *= \
torch.dot(
normal_pdf((self.tau - y_hat[torch.logical_and(Y==y, Z==z)].detach()) / self.h).view(-1),
y_hat[torch.logical_and(Y==y, Z==z)].view(-1)
) / (self.h * m_zy) -\
torch.dot(
normal_pdf((self.tau - y_hat[Y==y].detach()) / self.h).view(-1),
y_hat[Y==y].view(-1)
) / (self.h * m_y)
backward_loss += _dummy
logging_loss += Huber_loss(Prob_diff_Z, self.delta).item()
else:
for y in range(self.n_classes):
Pr_Ytilde1_Y = normal_cdf(y_hat[:,y][Y==y].detach(), self.h, self.tau)
m_y = (Y==y).sum().item()
for z in self.sensitive_attrs:
Pr_Ytilde1_YZ = normal_cdf(y_hat[:,y][torch.logical_and(Y==y, Z==z)].detach(), self.h, self.tau)
m_zy = torch.logical_and(Y==y, Z==z).sum().item()
Prob_diff_Z = Pr_Ytilde1_YZ - Pr_Ytilde1_Y
_dummy = Huber_loss_derivative(Prob_diff_Z, self.delta)
_dummy *= \
torch.dot(
normal_pdf((self.tau - y_hat[:,y][torch.logical_and(Y==y, Z==z)].detach()) / self.h).view(-1),
y_hat[:,y][torch.logical_and(Y==y, Z==z)].view(-1)
) / (self.h * m_zy) -\
torch.dot(
normal_pdf((self.tau - y_hat[:,y][Y==y].detach()) / self.h).view(-1),
y_hat[:,y][Y==y].view(-1)
) / (self.h * m_y)
backward_loss += _dummy
logging_loss += Huber_loss(Prob_diff_Z, self.delta).item()
return backward_loss, logging_loss
def __call__(self, y_hat, Y, Z):
if self.fairness_notion == 'DP':
return self.DDP_loss(y_hat, Z)
else:
return self.DEO_loss(y_hat, Y, Z)
|
[
"torch.abs",
"torch.nn.CrossEntropyLoss",
"numpy.logical_and",
"math.sqrt",
"torch.exp",
"torch.nn.BCELoss",
"torch.no_grad",
"torch.logical_and"
] |
[((134, 158), 'torch.exp', 'torch.exp', (['(-0.5 * x ** 2)'], {}), '(-0.5 * x ** 2)\n', (143, 158), False, 'import torch\n'), ((159, 181), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (168, 181), False, 'import math\n'), ((388, 436), 'torch.exp', 'torch.exp', (['(-0.492 * x ** 2 - 0.2887 * x - 1.1893)'], {}), '(-0.492 * x ** 2 - 0.2887 * x - 1.1893)\n', (397, 436), False, 'import torch\n'), ((1111, 1146), 'numpy.logical_and', 'np.logical_and', (['(Ytilde == y)', '(Y == y)'], {}), '(Ytilde == y, Y == y)\n', (1125, 1146), True, 'import numpy as np\n'), ((1531, 1543), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1541, 1543), True, 'import torch.nn as nn\n'), ((2018, 2039), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2037, 2039), True, 'import torch.nn as nn\n'), ((1613, 1628), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1626, 1628), False, 'import torch\n'), ((2109, 2124), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2122, 2124), False, 'import torch\n'), ((559, 590), 'torch.abs', 'torch.abs', (['y_prime[y_prime < 0]'], {}), '(y_prime[y_prime < 0])\n', (568, 590), False, 'import torch\n'), ((1209, 1244), 'numpy.logical_and', 'np.logical_and', (['(Ytilde == y)', '(Z == z)'], {}), '(Ytilde == y, Z == z)\n', (1223, 1244), True, 'import numpy as np\n'), ((1302, 1344), 'numpy.logical_and', 'np.logical_and', (['Ytilde_y_given_Y_y', '(Z == z)'], {}), '(Ytilde_y_given_Y_y, Z == z)\n', (1316, 1344), True, 'import numpy as np\n'), ((1352, 1382), 'numpy.logical_and', 'np.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (1366, 1382), True, 'import numpy as np\n'), ((5614, 5647), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (5631, 5647), False, 'import torch\n'), ((5700, 5733), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (5717, 5733), False, 'import torch\n'), ((6827, 6860), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (6844, 6860), False, 'import torch\n'), ((6913, 6946), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (6930, 6946), False, 'import torch\n'), ((6103, 6136), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (6120, 6136), False, 'import torch\n'), ((7326, 7359), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (7343, 7359), False, 'import torch\n'), ((6007, 6040), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (6024, 6040), False, 'import torch\n'), ((7225, 7258), 'torch.logical_and', 'torch.logical_and', (['(Y == y)', '(Z == z)'], {}), '(Y == y, Z == z)\n', (7242, 7258), False, 'import torch\n')]
|
import warnings
import numpy as np
from einsteinpy.integrators import GeodesicIntegrator
from .utils import _P, _kerr, _kerrnewman, _sch
class Geodesic:
"""
Base Class for defining Geodesics
Working in Geometrized Units (M-Units),
with :math:`c = G = M = k_e = 1`
"""
def __init__(
self,
metric,
metric_params,
position,
momentum,
time_like=True,
return_cartesian=True,
**kwargs,
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
time_like : bool, optional
Determines type of Geodesic
``True`` for Time-like geodesics
``False`` for Null-like geodesics
Defaults to ``True``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. Momenta are dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
# Contravariant Metrics, defined so far
_METRICS = {
"Schwarzschild": _sch,
"Kerr": _kerr,
"KerrNewman": _kerrnewman,
}
if metric not in _METRICS:
raise NotImplementedError(
f"'{metric}' is unsupported. Currently, these metrics are supported:\
\n1. Schwarzschild\n2. Kerr\n3. KerrNewman"
)
self.metric_name = metric
self.metric = _METRICS[metric]
self.metric_params = metric_params
if metric == "Schwarzschild":
self.metric_params = (0.0,)
self.position = np.array([0.0, *position])
self.momentum = _P(
self.metric, metric_params, self.position, momentum, time_like
)
self.time_like = time_like
self.kind = "Time-like" if time_like else "Null-like"
self.coords = "Cartesian" if return_cartesian else "Spherical Polar"
self._trajectory = self.calculate_trajectory(**kwargs)
def __repr__(self):
return f"""Geodesic Object:(\n\
Type : ({self.kind}),\n\
Metric : ({self.metric_name}),\n\
Metric Parameters : ({self.metric_params}),\n\
Initial 4-Position : ({self.position}),\n\
Initial 4-Momentum : ({self.momentum}),\n\
Trajectory = (\n\
{self.trajectory}\n\
),\n\
Output Position Coordinate System = ({self.coords})\n\
))"""
def __str__(self):
return self.__repr__()
@property
def trajectory(self):
"""
Returns the trajectory of the test particle
"""
return self._trajectory
def calculate_trajectory(self, **kwargs):
"""
Calculate trajectory in spacetime
Parameters
----------
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Returns
-------
~numpy.ndarray
N-element numpy array, containing step count
~numpy.ndarray
Shape-(N, 8) numpy array, containing
(4-Position, 4-Momentum) for each step
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
g, g_prms = self.metric, self.metric_params
q0, p0 = self.position, self.momentum
tl = self.time_like
N = kwargs.get("steps", 50)
dl = kwargs.get("delta", 0.5)
rtol = kwargs.get("rtol", 1e-2)
atol = kwargs.get("atol", 1e-2)
order = kwargs.get("order", 2)
omega = kwargs.get("omega", 1.0)
sw = kwargs.get("suppress_warnings", False)
steps = np.arange(N)
geodint = GeodesicIntegrator(
metric=g,
metric_params=g_prms,
q0=q0,
p0=p0,
time_like=tl,
steps=N,
delta=dl,
rtol=rtol,
atol=atol,
order=order,
omega=omega,
suppress_warnings=sw,
)
for i in steps:
geodint.step()
vecs = np.array(geodint.results, dtype=float)
q1 = vecs[:, 0]
p1 = vecs[:, 1]
results = np.hstack((q1, p1))
# Ignoring
# q2 = vecs[:, 2]
# p2 = vecs[:, 3]
if self.coords == "Cartesian":
# Converting to Cartesian from Spherical Polar Coordinates
# Note that momenta cannot be converted this way,
# due to ambiguities in the signs of v_r and v_th (velocities)
t, r, th, ph = q1.T
pt, pr, pth, pph = p1.T
x = r * np.sin(th) * np.cos(ph)
y = r * np.sin(th) * np.sin(ph)
z = r * np.cos(th)
cart_results = np.vstack((t, x, y, z, pt, pr, pth, pph)).T
return steps, cart_results
return steps, results
class Nulllike(Geodesic):
"""
Class for defining Null-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
super().__init__(
metric=metric,
metric_params=metric_params,
position=position,
momentum=momentum,
time_like=False,
return_cartesian=return_cartesian,
**kwargs,
)
class Timelike(Geodesic):
"""
Class for defining Time-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
super().__init__(
metric=metric,
metric_params=metric_params,
position=position,
momentum=momentum,
time_like=True,
return_cartesian=return_cartesian,
**kwargs,
)
|
[
"numpy.hstack",
"einsteinpy.integrators.GeodesicIntegrator",
"numpy.array",
"numpy.cos",
"numpy.vstack",
"numpy.sin",
"numpy.arange"
] |
[((3607, 3633), 'numpy.array', 'np.array', (['[0.0, *position]'], {}), '([0.0, *position])\n', (3615, 3633), True, 'import numpy as np\n'), ((6840, 6852), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (6849, 6852), True, 'import numpy as np\n'), ((6872, 7048), 'einsteinpy.integrators.GeodesicIntegrator', 'GeodesicIntegrator', ([], {'metric': 'g', 'metric_params': 'g_prms', 'q0': 'q0', 'p0': 'p0', 'time_like': 'tl', 'steps': 'N', 'delta': 'dl', 'rtol': 'rtol', 'atol': 'atol', 'order': 'order', 'omega': 'omega', 'suppress_warnings': 'sw'}), '(metric=g, metric_params=g_prms, q0=q0, p0=p0, time_like=\n tl, steps=N, delta=dl, rtol=rtol, atol=atol, order=order, omega=omega,\n suppress_warnings=sw)\n', (6890, 7048), False, 'from einsteinpy.integrators import GeodesicIntegrator\n'), ((7263, 7301), 'numpy.array', 'np.array', (['geodint.results'], {'dtype': 'float'}), '(geodint.results, dtype=float)\n', (7271, 7301), True, 'import numpy as np\n'), ((7369, 7388), 'numpy.hstack', 'np.hstack', (['(q1, p1)'], {}), '((q1, p1))\n', (7378, 7388), True, 'import numpy as np\n'), ((7809, 7819), 'numpy.cos', 'np.cos', (['ph'], {}), '(ph)\n', (7815, 7819), True, 'import numpy as np\n'), ((7853, 7863), 'numpy.sin', 'np.sin', (['ph'], {}), '(ph)\n', (7859, 7863), True, 'import numpy as np\n'), ((7884, 7894), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (7890, 7894), True, 'import numpy as np\n'), ((7923, 7964), 'numpy.vstack', 'np.vstack', (['(t, x, y, z, pt, pr, pth, pph)'], {}), '((t, x, y, z, pt, pr, pth, pph))\n', (7932, 7964), True, 'import numpy as np\n'), ((7796, 7806), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (7802, 7806), True, 'import numpy as np\n'), ((7840, 7850), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (7846, 7850), True, 'import numpy as np\n')]
|
"""
This module defines an abstract base formatter.
!!! question "Formats"
Refer to the [Formats documentation](../../formats/index.md)
to learn about the supported output formats.
"""
from abc import ABC, abstractmethod
import numpy as np
from numpy.lib.recfunctions import unstructured_to_structured
class BaseFormatter(ABC):
"""
An abstract base formatter.
Attributes:
colorize (bool): Whether to color the text.
vcolor (Callable): The vectorized implementation of the `color` method.
Note:
The following methods must be overwritten:
- [`color`][picharsso.format.base.BaseFormatter.color]
- [`translate`][picharsso.format.base.BaseFormatter.translate]
- [`unify`][picharsso.format.base.BaseFormatter.unify]
"""
def __init__(self, colorize=False):
"""Initialization method.
Args:
colorize (Option[bool]): Whether to color the text.
"""
self.colorize = None
BaseFormatter.set(self, colorize=colorize)
self.vcolor = np.vectorize(self.color)
def __call__(self, text_matrix, image, resample):
"""Applies formatting and colorization on the `text_matrix`
and returns a single string.
Args:
text_matrix (numpy.ndarray): The subject text matrix,
with `shape = (<height>, <width>)`,
and `dtype = str`.
image (PIL.Image.Image): The subject image.
resample (int): The resampling filter.
Returns:
str: The formatted string of text with color (if specified).
"""
text_size = text_matrix.shape
# Apply any translations.
text_matrix = self.translate(text_matrix)
# Colorize if necessary
if self.colorize:
# Pool the colors from the original image by resizing it to the size of the text output.
# Using the vectorized `color` method, color each element in the `text_martix`.
# The vectorized operation takes a `str` from `text_matrix`
# and a `List[int, int, int]` from the pooled colors.
text_matrix = self.vcolor(
text_matrix,
unstructured_to_structured(
np.array(image.resize(text_size[::-1], resample=resample)).astype(
np.uint8
)
).astype("O"),
)
return self.unify(text_matrix)
@staticmethod
@abstractmethod
def color(text, color):
"""Applies `color` to a string of `text`.
Args:
text (str): The subject text.
color (Tuple[int, int, int]): The `RGB` value for the color.
Returns:
str: The colored text.
"""
@staticmethod
@abstractmethod
def translate(text_matrix):
"""Applies translatations to `text_matrix`.
Args:
text_matrix (numpy.ndarray): The subject text matrix,
with `shape = (<height>, <width>)`,
and `dtype = str`.
Returns:
numpy.ndarray: The translated text_matrix.
"""
@staticmethod
@abstractmethod
def unify(text_matrix):
"""Formats a `text_matrix` into a single string.
Args:
text_matrix (numpy.ndarray): The subject text matrix,
with `shape = (<height>, <width>)`,
and `dtype = str`.
Returns:
str: The formatted string of text art.
"""
def set(self, colorize=None):
"""Sets attributes of the formatter instance.
Args:
colorize (Optional[bool]): Sets `colorize`.
"""
if colorize is not None:
self.colorize = colorize
__all__ = ["BaseFormatter"]
|
[
"numpy.vectorize"
] |
[((1067, 1091), 'numpy.vectorize', 'np.vectorize', (['self.color'], {}), '(self.color)\n', (1079, 1091), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pylab as plt
from math import pi,floor,atan2,atan
from scipy.interpolate import splprep, splev
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
#########################################################################################
################################## FCT DEFINITION #######################################
#########################################################################################
def readModel(path,time):
length = 500
curv = np.transpose(np.loadtxt(path))
x = curv[0]
y = curv[1]
theta = curv[2]
time_model = np.linspace(0,length,len(x))
okay = np.where(np.abs(np.diff(x)) + np.abs(np.diff(y)) > 0)
x = x[okay]
y = y[okay]
tck, u = splprep([x, y], s=0)
unew = np.linspace(0,1,length)
data = splev(unew, tck)
x,y = data[0],data[1]
theta = np.interp(time,time_model,theta)
delta_y = np.diff(y)
delta_x = np.diff(x)
th_local = []
for i in range (len(delta_x)):
phi = atan2(delta_y[i],delta_x[i])
th_local.append(phi-theta[i])
return (x,y,th_local,theta)
def distanceBetweenCurvs(x_real,x_sim,y_real,y_sim):
distance = 0
length = len(x_real)
distance_fin = np.sqrt((x_sim[-1]-x_real[-1])**2+(y_sim[-1]-y_real[-1])**2)
okay = np.where(np.abs(np.diff(x_real)) + np.abs(np.diff(y_real)) > 0)
x_real = x_real[okay]
y_real = y_real[okay]
tck, u = splprep([x_real, y_real], s=0)
unew = np.linspace(0,1,length)
data = splev(unew, tck)
x_real,y_real = data[0],data[1]
for i in range (length):
distance += np.sqrt((x_sim[i]-x_real[i])**2+(y_sim[i]-y_real[i])**2)
# if i%25 == 0:
# # print(i, "sim",x_sim[i],y_sim[i])
# # print(np.sqrt((x_sim[i]-x_real[i])**2+(y_sim[i]-y_real[i])**2))
# plt.plot([x_sim[i],x_real[i]], [y_sim[i],y_real[i]], color = 'black', linewidth = 0.5)
# # # print(distance)
# plt.plot(x_sim,y_sim,color='blue')
# plt.plot(x_real,y_real,color='red')
# plt.plot(x_sim, y_sim,color='cyan',linestyle=':', marker='o')
# plt.plot(x_real, y_real,color='orange',linestyle=':', marker='o')
# plt.show()
# # print("dist_i :",distance/500)
return distance/length,distance_fin
def normalizeAngle(angle):
new_angle = angle
while new_angle > pi:
new_angle -= 2*pi
while new_angle < -pi:
new_angle += 2*pi
return new_angle
def angularDistanceBetweenCurvs(th_real,th_sim):
distance = 0
distance_fin = abs(pi/2-normalizeAngle(th_sim[-1]))
for i in range (len(th_real)-1):
distance += abs(normalizeAngle(th_real[i])-normalizeAngle(th_sim[i]))
# print(abs(th_real[i]-th_sim[i]))
# plt.plot([time[i],time[i]], [th_real[i],th_sim[i]], color = 'black', linewidth = 0.5)
# plt.plot(time,th_real,linestyle=':', marker='o')
# plt.plot(time,th_sim,linestyle=':', marker='o')
# print("--------",distance/len(th_real))
# plt.show()
return distance/len(th_sim),distance_fin
#########################################################################################
################################## MAIN #################################################
#########################################################################################
direction_list = ['N','E','S','O']
position_list = ['1500','4000','-0615','0615','1515','4015','-0640','0640','1540','4040']
path_human_list = []
for pos in position_list:
for direction in direction_list:
name_file = direction+pos+".dat"
path_human_list.append('data/Human/'+name_file)
init_pos_list = []
start_and_end = np.loadtxt("data/Human/StartAndEnd.dat")
for i in range(len(start_and_end)):
init_pos_list.append([floor(start_and_end[i][0]*1000)/1000,floor(start_and_end[i][1]*1000)/1000])
fin_pos = [0,0,1.57]
orientation_list = [1.57,0.0,-1.58,3.14]
path_clothoid_list,path_ddp_list = [],[]
i = 0
for pos in init_pos_list:
# name_file = 'Clothoid_from_'+str(pos[0])+','+str(pos[1])+','+str(orientation_list[i%4])+\
# '_to_'+str(fin_pos[0])+','+str(fin_pos[1])+','+str(fin_pos[2])+'_0.001_pos.dat'
# path_clothoid_list.append('data/Clothoid/'+name_file)
name_file = 'DdpResult_from_'+str(pos[0])+','+str(pos[1])+','+str(orientation_list[i%4])+\
'_to_'+str(fin_pos[0])+','+str(fin_pos[1])+','+str(fin_pos[2])+'_pos.dat'
path_ddp_list.append('data/DdpResult/'+name_file)
i += 1
init_pos_list = []
start_and_end = np.loadtxt("data/Human/DataIROS/StartAndEnd.dat")
for i in range(len(start_and_end)):
init_pos_list.append([floor(start_and_end[i][0]*1000)/1000,floor(start_and_end[i][1]*1000)/1000])
fin_pos = [0,0,1.57]
orientation_list = [1.57,0.0,-1.58,3.14]
path_clothoid_list= []
i = 0
for pos in init_pos_list:
name_file = 'DdpResult_from_'+str(pos[0])+','+str(pos[1])+','+str(orientation_list[i%4])+\
'_to_'+str(fin_pos[0])+','+str(fin_pos[1])+','+str(fin_pos[2])+'_pos.dat'
path_clothoid_list.append('data/DdpResult/DataIROS/'+name_file)
i += 1
time = np.arange(0,500,1)
fig = plt.figure()
count = 1
dist_clothoid_list, dist_ddp_list,angular_dist_clothoid_list, angular_dist_ddp_list = [],[],[],[]
dist_fin_ddp_list , angular_dist_fin_ddp_list = [],[]
dist_subjects_ddp_list , angular_dist_subjects_ddp_list = [],[]
for i in range (len(path_human_list)):
title = path_human_list[i][11:17]
#print(title)
#ax = plt.subplot(1,4,count)
ax = plt.subplot(4,10,count)
#if title == 'E1540.' or title == 'N-0615' or title == 'S4015.' or title == 'O0640.':
print(title,i,count)
human_data = np.loadtxt(path_human_list[i])
# (x_clothoid,y_clothoid,theta_clothoid) = readModel(path_clothoid_list[i],time)
(x_ddp,y_ddp,theta_local_ddp,theta_global_ddp) = readModel(path_ddp_list[i],time)
plt.plot(x_ddp,y_ddp,label='OC',color='red',linewidth=1.5)
plt.plot(human_data[6],human_data[7],label='Subjects',color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[12],human_data[13],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[18],human_data[19],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[24],human_data[25],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[30],human_data[31],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[36],human_data[37],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[42],human_data[43],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[48],human_data[49],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[54],human_data[55],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[60],human_data[61],color='lime',linewidth=0.75,alpha = 0.4)
plt.plot(human_data[0],human_data[1],label='Human average',color='green',linewidth=1.5)
if np.sum(human_data[5]) != 0:
arrow_len = 0.2
for i in range (len(human_data[0])):
if i%50 == 0:
plt.arrow(human_data[0][i], human_data[1][i], np.cos(human_data[5][i])*arrow_len, np.sin(human_data[5][i])*arrow_len, head_width=.03,color='green')
plt.arrow(x_ddp[i], y_ddp[i], np.cos(theta_global_ddp[i])*arrow_len, np.sin(theta_global_ddp[i])*arrow_len, head_width=.03,color='red')
plt.arrow(x_ddp[-1], y_ddp[-1], np.cos(theta_global_ddp[-1])*arrow_len, np.sin(theta_global_ddp[-1])*arrow_len, head_width=.03,color='red')
# plt.plot(time,v,color='orange')
# plt.plot([time[end]]*len(time),np.linspace(0,6,len(time)),color ='black')
# plt.plot([time[begin]]*len(time),np.linspace(0,6,len(time)),color ='black')
# plt.plot(time,human_data[5],linestyle=':',color ='black')
# plt.plot(time,theta_clothoid,color='red')
# plt.plot(time,theta_ddp,color='blue')
# plt.plot(time,theta_trunc,color='green')
# plt.plot(time,human_data[11],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[17],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[23],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[29],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[35],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[41],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[47],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[53],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[59],color='lime',linewidth=0.75,alpha = 0.4)
# plt.plot(time,human_data[65],color='lime',linewidth=0.75,alpha = 0.4)
# dist_clotho = distanceBetweenCurvs(human_data[0],x_clothoid,human_data[1],y_clothoid)
dist_ddp,dist_fin_ddp = distanceBetweenCurvs(human_data[0],x_ddp,human_data[1],y_ddp)
# dist_clothoid_list.append(dist_clotho)
dist_ddp_list.append(dist_ddp)
dist_fin_ddp_list.append(dist_fin_ddp)
for i in range (10):
dist_subjects_ddp_list.append(distanceBetweenCurvs(human_data[6+i*6],x_ddp,human_data[7+i*6],y_ddp)[0])
if np.sum(human_data[5]) != 0:
print("yes")
# angular_dist_clotho = angularDistanceBetweenCurvs(human_data[5],theta_clothoid)
angular_dist_ddp,angular_dist_fin_ddp = angularDistanceBetweenCurvs(human_data[4],theta_local_ddp)
else:
# angular_dist_clotho = 0
angular_dist_ddp,angular_dist_fin_ddp = 0,0
print("no",title)
# angular_dist_clothoid_list.append(angular_dist_clotho)
angular_dist_ddp_list.append(angular_dist_ddp)
angular_dist_fin_ddp_list.append(angular_dist_fin_ddp)
#print(i,path_human_list[i][62:68],dist_clotho,dist_ddp)
# plt.legend(fontsize = 'xx-large')
# plt.title(title)
plt.title("d_xy = " + str(floor(dist_ddp*10000)/10000) + " & d_eta = "+str(floor(angular_dist_ddp*10000)/10000), fontsize=18)
# plt.title('clotho :'+str(floor(angular_dist_clotho*100)/100) + \
# ' VS ddp :'+str(floor(angular_dist_ddp*100)/100))
# plt.title('Clothoid-Human d_xy='+str(floor(dist_clotho*100)/100) + \
# ' & d_th='+str(floor(angular_dist_clotho*100)/100)+ \
# ', OC-Human d_xy='+str(floor(dist_ddp*100)/100)+ \
# ' & d_th='+str(floor(angular_dist_ddp*100)/100))
# ax.set_xticklabels([])
# ax.set_yticklabels([])
plt.ylabel("y (m)")
plt.xlabel("x (m)")
#if count < 4:
count += 1
plt.show()
# path = "data/dist_clotho.dat"
# np.savetxt(path,dist_clothoid_list)
path = "data/dist_ddp.dat"
np.savetxt(path,dist_ddp_list)
path = "data/dist_fin_ddp.dat"
np.savetxt(path,dist_fin_ddp_list)
# path = "data/angular_dist_clotho.dat"
# np.savetxt(path,angular_dist_clothoid_list)
path = "data/angular_dist_ddp.dat"
np.savetxt(path,angular_dist_ddp_list)
path = "data/angular_dist_fin_ddp.dat"
np.savetxt(path,angular_dist_fin_ddp_list)
path = "data/dist_subjects_ddp.dat"
np.savetxt(path,dist_subjects_ddp_list)
|
[
"numpy.sqrt",
"math.floor",
"matplotlib.pylab.show",
"numpy.sin",
"numpy.arange",
"matplotlib.pylab.figure",
"numpy.diff",
"numpy.linspace",
"scipy.interpolate.splev",
"matplotlib.pylab.plot",
"matplotlib.pylab.xlabel",
"math.atan2",
"numpy.savetxt",
"numpy.interp",
"numpy.cos",
"scipy.interpolate.splprep",
"numpy.sum",
"matplotlib.pylab.subplot",
"numpy.loadtxt",
"matplotlib.pylab.ylabel"
] |
[((3451, 3491), 'numpy.loadtxt', 'np.loadtxt', (['"""data/Human/StartAndEnd.dat"""'], {}), "('data/Human/StartAndEnd.dat')\n", (3461, 3491), True, 'import numpy as np\n'), ((4257, 4306), 'numpy.loadtxt', 'np.loadtxt', (['"""data/Human/DataIROS/StartAndEnd.dat"""'], {}), "('data/Human/DataIROS/StartAndEnd.dat')\n", (4267, 4306), True, 'import numpy as np\n'), ((4808, 4828), 'numpy.arange', 'np.arange', (['(0)', '(500)', '(1)'], {}), '(0, 500, 1)\n', (4817, 4828), True, 'import numpy as np\n'), ((4834, 4846), 'matplotlib.pylab.figure', 'plt.figure', ([], {}), '()\n', (4844, 4846), True, 'import matplotlib.pylab as plt\n'), ((9839, 9849), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (9847, 9849), True, 'import matplotlib.pylab as plt\n'), ((9949, 9980), 'numpy.savetxt', 'np.savetxt', (['path', 'dist_ddp_list'], {}), '(path, dist_ddp_list)\n', (9959, 9980), True, 'import numpy as np\n'), ((10012, 10047), 'numpy.savetxt', 'np.savetxt', (['path', 'dist_fin_ddp_list'], {}), '(path, dist_fin_ddp_list)\n', (10022, 10047), True, 'import numpy as np\n'), ((10170, 10209), 'numpy.savetxt', 'np.savetxt', (['path', 'angular_dist_ddp_list'], {}), '(path, angular_dist_ddp_list)\n', (10180, 10209), True, 'import numpy as np\n'), ((10249, 10292), 'numpy.savetxt', 'np.savetxt', (['path', 'angular_dist_fin_ddp_list'], {}), '(path, angular_dist_fin_ddp_list)\n', (10259, 10292), True, 'import numpy as np\n'), ((10329, 10369), 'numpy.savetxt', 'np.savetxt', (['path', 'dist_subjects_ddp_list'], {}), '(path, dist_subjects_ddp_list)\n', (10339, 10369), True, 'import numpy as np\n'), ((736, 756), 'scipy.interpolate.splprep', 'splprep', (['[x, y]'], {'s': '(0)'}), '([x, y], s=0)\n', (743, 756), False, 'from scipy.interpolate import splprep, splev\n'), ((765, 790), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'length'], {}), '(0, 1, length)\n', (776, 790), True, 'import numpy as np\n'), ((797, 813), 'scipy.interpolate.splev', 'splev', (['unew', 'tck'], {}), '(unew, tck)\n', (802, 813), False, 'from scipy.interpolate import splprep, splev\n'), ((847, 881), 'numpy.interp', 'np.interp', (['time', 'time_model', 'theta'], {}), '(time, time_model, theta)\n', (856, 881), True, 'import numpy as np\n'), ((892, 902), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (899, 902), True, 'import numpy as np\n'), ((914, 924), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (921, 924), True, 'import numpy as np\n'), ((1178, 1248), 'numpy.sqrt', 'np.sqrt', (['((x_sim[-1] - x_real[-1]) ** 2 + (y_sim[-1] - y_real[-1]) ** 2)'], {}), '((x_sim[-1] - x_real[-1]) ** 2 + (y_sim[-1] - y_real[-1]) ** 2)\n', (1185, 1248), True, 'import numpy as np\n'), ((1369, 1399), 'scipy.interpolate.splprep', 'splprep', (['[x_real, y_real]'], {'s': '(0)'}), '([x_real, y_real], s=0)\n', (1376, 1399), False, 'from scipy.interpolate import splprep, splev\n'), ((1408, 1433), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'length'], {}), '(0, 1, length)\n', (1419, 1433), True, 'import numpy as np\n'), ((1440, 1456), 'scipy.interpolate.splev', 'splev', (['unew', 'tck'], {}), '(unew, tck)\n', (1445, 1456), False, 'from scipy.interpolate import splprep, splev\n'), ((5201, 5226), 'matplotlib.pylab.subplot', 'plt.subplot', (['(4)', '(10)', 'count'], {}), '(4, 10, count)\n', (5212, 5226), True, 'import matplotlib.pylab as plt\n'), ((5355, 5385), 'numpy.loadtxt', 'np.loadtxt', (['path_human_list[i]'], {}), '(path_human_list[i])\n', (5365, 5385), True, 'import numpy as np\n'), ((5554, 5616), 'matplotlib.pylab.plot', 'plt.plot', (['x_ddp', 'y_ddp'], {'label': '"""OC"""', 'color': '"""red"""', 'linewidth': '(1.5)'}), "(x_ddp, y_ddp, label='OC', color='red', linewidth=1.5)\n", (5562, 5616), True, 'import matplotlib.pylab as plt\n'), ((5617, 5718), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[6]', 'human_data[7]'], {'label': '"""Subjects"""', 'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[6], human_data[7], label='Subjects', color='lime',\n linewidth=0.75, alpha=0.4)\n", (5625, 5718), True, 'import matplotlib.pylab as plt\n'), ((5713, 5798), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[12]', 'human_data[13]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[12], human_data[13], color='lime', linewidth=0.75,\n alpha=0.4)\n", (5721, 5798), True, 'import matplotlib.pylab as plt\n'), ((5794, 5879), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[18]', 'human_data[19]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[18], human_data[19], color='lime', linewidth=0.75,\n alpha=0.4)\n", (5802, 5879), True, 'import matplotlib.pylab as plt\n'), ((5876, 5961), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[24]', 'human_data[25]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[24], human_data[25], color='lime', linewidth=0.75,\n alpha=0.4)\n", (5884, 5961), True, 'import matplotlib.pylab as plt\n'), ((5957, 6042), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[30]', 'human_data[31]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[30], human_data[31], color='lime', linewidth=0.75,\n alpha=0.4)\n", (5965, 6042), True, 'import matplotlib.pylab as plt\n'), ((6038, 6123), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[36]', 'human_data[37]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[36], human_data[37], color='lime', linewidth=0.75,\n alpha=0.4)\n", (6046, 6123), True, 'import matplotlib.pylab as plt\n'), ((6119, 6204), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[42]', 'human_data[43]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[42], human_data[43], color='lime', linewidth=0.75,\n alpha=0.4)\n", (6127, 6204), True, 'import matplotlib.pylab as plt\n'), ((6200, 6285), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[48]', 'human_data[49]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[48], human_data[49], color='lime', linewidth=0.75,\n alpha=0.4)\n", (6208, 6285), True, 'import matplotlib.pylab as plt\n'), ((6281, 6366), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[54]', 'human_data[55]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[54], human_data[55], color='lime', linewidth=0.75,\n alpha=0.4)\n", (6289, 6366), True, 'import matplotlib.pylab as plt\n'), ((6362, 6447), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[60]', 'human_data[61]'], {'color': '"""lime"""', 'linewidth': '(0.75)', 'alpha': '(0.4)'}), "(human_data[60], human_data[61], color='lime', linewidth=0.75,\n alpha=0.4)\n", (6370, 6447), True, 'import matplotlib.pylab as plt\n'), ((6443, 6538), 'matplotlib.pylab.plot', 'plt.plot', (['human_data[0]', 'human_data[1]'], {'label': '"""Human average"""', 'color': '"""green"""', 'linewidth': '(1.5)'}), "(human_data[0], human_data[1], label='Human average', color='green',\n linewidth=1.5)\n", (6451, 6538), True, 'import matplotlib.pylab as plt\n'), ((9769, 9788), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (9779, 9788), True, 'import matplotlib.pylab as plt\n'), ((9790, 9809), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (9800, 9809), True, 'import matplotlib.pylab as plt\n'), ((533, 549), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (543, 549), True, 'import numpy as np\n'), ((981, 1010), 'math.atan2', 'atan2', (['delta_y[i]', 'delta_x[i]'], {}), '(delta_y[i], delta_x[i])\n', (986, 1010), False, 'from math import pi, floor, atan2, atan\n'), ((1531, 1597), 'numpy.sqrt', 'np.sqrt', (['((x_sim[i] - x_real[i]) ** 2 + (y_sim[i] - y_real[i]) ** 2)'], {}), '((x_sim[i] - x_real[i]) ** 2 + (y_sim[i] - y_real[i]) ** 2)\n', (1538, 1597), True, 'import numpy as np\n'), ((6536, 6557), 'numpy.sum', 'np.sum', (['human_data[5]'], {}), '(human_data[5])\n', (6542, 6557), True, 'import numpy as np\n'), ((8618, 8639), 'numpy.sum', 'np.sum', (['human_data[5]'], {}), '(human_data[5])\n', (8624, 8639), True, 'import numpy as np\n'), ((6963, 6991), 'numpy.cos', 'np.cos', (['theta_global_ddp[-1]'], {}), '(theta_global_ddp[-1])\n', (6969, 6991), True, 'import numpy as np\n'), ((7003, 7031), 'numpy.sin', 'np.sin', (['theta_global_ddp[-1]'], {}), '(theta_global_ddp[-1])\n', (7009, 7031), True, 'import numpy as np\n'), ((3551, 3584), 'math.floor', 'floor', (['(start_and_end[i][0] * 1000)'], {}), '(start_and_end[i][0] * 1000)\n', (3556, 3584), False, 'from math import pi, floor, atan2, atan\n'), ((3588, 3621), 'math.floor', 'floor', (['(start_and_end[i][1] * 1000)'], {}), '(start_and_end[i][1] * 1000)\n', (3593, 3621), False, 'from math import pi, floor, atan2, atan\n'), ((4366, 4399), 'math.floor', 'floor', (['(start_and_end[i][0] * 1000)'], {}), '(start_and_end[i][0] * 1000)\n', (4371, 4399), False, 'from math import pi, floor, atan2, atan\n'), ((4403, 4436), 'math.floor', 'floor', (['(start_and_end[i][1] * 1000)'], {}), '(start_and_end[i][1] * 1000)\n', (4408, 4436), False, 'from math import pi, floor, atan2, atan\n'), ((662, 672), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (669, 672), True, 'import numpy as np\n'), ((683, 693), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (690, 693), True, 'import numpy as np\n'), ((1265, 1280), 'numpy.diff', 'np.diff', (['x_real'], {}), '(x_real)\n', (1272, 1280), True, 'import numpy as np\n'), ((1291, 1306), 'numpy.diff', 'np.diff', (['y_real'], {}), '(y_real)\n', (1298, 1306), True, 'import numpy as np\n'), ((9307, 9338), 'math.floor', 'floor', (['(angular_dist_ddp * 10000)'], {}), '(angular_dist_ddp * 10000)\n', (9312, 9338), False, 'from math import pi, floor, atan2, atan\n'), ((6688, 6712), 'numpy.cos', 'np.cos', (['human_data[5][i]'], {}), '(human_data[5][i])\n', (6694, 6712), True, 'import numpy as np\n'), ((6724, 6748), 'numpy.sin', 'np.sin', (['human_data[5][i]'], {}), '(human_data[5][i])\n', (6730, 6748), True, 'import numpy as np\n'), ((6824, 6851), 'numpy.cos', 'np.cos', (['theta_global_ddp[i]'], {}), '(theta_global_ddp[i])\n', (6830, 6851), True, 'import numpy as np\n'), ((6863, 6890), 'numpy.sin', 'np.sin', (['theta_global_ddp[i]'], {}), '(theta_global_ddp[i])\n', (6869, 6890), True, 'import numpy as np\n'), ((9258, 9281), 'math.floor', 'floor', (['(dist_ddp * 10000)'], {}), '(dist_ddp * 10000)\n', (9263, 9281), False, 'from math import pi, floor, atan2, atan\n')]
|
import os
import struct
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ply import lex, yacc
class EDR:
"""Reader for experimental data record (EDR) files from the PDS.
This object will ingest and store data from the EDR files, to be
processed later. It only ingests data into a convenient structure,
and de-compresses the science data, no processing occurs.
"""
def __init__(self, lbl=None):
"""Create an EDR object.
Create an EDR object, read in an EDR file if given.
Parameters
----------
lbl: str, optional
Path to an EDR label file which must be in the same directory as
the corresponding science and geometry files
Returns
-------
EDR
Notes
-----
"""
if lbl is None:
return
self.load(lbl)
def load(self, lbl):
"""Load a set of EDR files.
Read in the label, science, and geometry EDR files for a given
observation.
Parameters
----------
lbl: str
Path to an EDR label file which must be in the same directory as
the corresponding science and geometry files
Returns
-------
Notes
-----
"""
# Read label file
self.lbld = self.parseLBL(lbl)
# Science and aux file names
sci = lbl.replace(".lbl", "_f.dat")
geo = lbl.replace(".lbl", "_g.dat")
# Read science file
self.anc, self.ost, self.data = self.parseSci(sci, self.lbld)
# Read geometry file
self.geo = self.parseGeo(geo, self.lbld)
def parseGeo(self, file, lbld):
"""Read in a geometry data file.
Parameters
----------
file: str
Path to an geometry data file
lbld: dict
Dictionary containing data parsed from corresponding label
file, created by the parseLBL method
Returns
-------
Notes
-----
"""
# Set up geometry data frame
rec_t = np.dtype(
[
("SCET_FRAME_WHOLE", ">u4"),
("SCET_FRAME_FRAC", ">u2"),
("GEOMETRY_EPHEMERIS_TIME", ">f8"),
("GEOMETRY_EPOCH", "V23"),
("MARS_SOLAR_LONGITUDE", ">f8"),
("MARS_SUN_DISTANCE", ">f8"),
("ORBIT_NUMBER", ">u4"),
("TARGET_NAME", "V6"),
("TARGET_SC_POSITION_VECTOR", ">f8", 3),
("SPACECRAFT_ALTITUDE", ">f8"),
("SUB_SC_LONGITUDE", ">f8"),
("SUB_SC_LATITUDE", ">f8"),
("TARGET_SC_VELOCITY_VECTOR", ">f8", 3),
("TARGET_SC_RADIAL_VELOCITY", ">f8"),
("TARGET_SC_TANG_VELOCITY", ">f8"),
("LOCAL_TRUE_SOLAR_TIME", ">f8"),
("SOLAR_ZENITH_ANGLE", ">f8"),
("DIPOLE_UNIT_VECTOR", ">f8", 3),
("MONOPOLE_UNIT_VECTOR", ">f8", 3),
]
)
geoData = np.fromfile(file, dtype=rec_t)
return geoData
def decompress(self, trace, exp):
sign = (-1) ** (trace >> 7) # Sign bit
mantissa = 1+((trace & 0x7F)/(2.0**7))
#mantissa = trace & 0x7F
trace = sign * mantissa * (2 ** (exp - 127))
return trace
def deagc(self, data, agc):
# Take in an array of marsis data
# and a vector of AGC settings,
# then correct for agc
agc = agc & 0x07 # Only last three bits matter
agc = agc*4 + 2 # Gain in dB, per Orosei
data = data * 10**(agc/20)[np.newaxis, :]
return data
def parseSci(self, file, lbld):
# Set up ancillary data dataframe
rec_t = np.dtype(
[
("SCET_STAR_WHOLE", ">u4"),
("SCET_STAR_FRAC", ">u2"),
("OST_LINE_NUMBER", ">u2"),
("OST_LINE", "V12"),
("FRAME_NUMBER", ">u2"),
("ANCILLARY_DATA_HEADER", "V6"),
("FIRST_PRI_OF_FRAME", ">u4"),
("SCET_FRAME_WHOLE", ">u4"),
("SCET_FRAME_FRAC", ">u2"),
("SCET_PERICENTER_WHOLE", ">u4"),
("SCET_PERICENTER_FRAC", ">u2"),
("SCET_PAR_WHOLE", ">u4"),
("SCET_PAR_FRAC", ">u2"),
("H_SCET_PAR", ">f4"),
("VT_SCET_PAR", ">f4"),
("VR_SCET_PAR", ">f4"),
("N_0", ">u4"),
("DELTA_S_MIN", ">f4"),
("NB_MIN", ">u2"),
("M_OCOG", ">f4", 2),
("INDEX_OCOG", ">u2", 2),
("TRK_THRESHOLD", ">f4", 2),
("INI_IND_TRK_THRESHOLD", ">u2", 2),
("LAST_IND_TRK_THRESHOLD", ">u2", 2),
("INI_IND_FSRM", ">u2", 2),
("LAST_IND_FSRM", ">u2", 2),
("SPARE_4", ">u4", 3),
("DELTA_S_SCET_PAR", ">f4"),
("NB_SCET_PAR", ">u2"),
("NA_SCET_PAR", ">u2", 2),
("A2_INI_CM", ">f4", 2),
("A2_OPT", ">f4", 2),
("REF_CA_OPT", ">f4", 2),
("DELTA_T", ">u2", 2),
("SF", ">f4", 2),
("I_C", ">u2", 2),
("AGC_SA_FOR_NEXT_FRAME", ">f4", 2),
("AGC_SA_LEVELS_CURRENT_FRAME", ">u1", 2),
("RX_TRIG_SA_FOR_NEXT_FRAME", ">u2", 2),
("RX_TRIG_SA_PROGR", ">u2", 2),
("INI_IND_OCOG", ">u2"),
("LAST_IND_OCOG", ">u2"),
("OCOG", ">f4", 2),
("A", ">f4", 2),
("C_LOL", ">i2", 2),
("SPARE_5", ">u2", 3),
("MAX_RE_EXP_MINUS1_F1_DIP", ">u1"),
("MAX_IM_EXP_MINUS1_F1_DIP", ">u1"),
("MAX_RE_EXP_ZERO_F1_DIP", ">u1"),
("MAX_IM_EXP_ZERO_F1_DIP", ">u1"),
("MAX_RE_EXP_PLUS1_F1_DIP", ">u1"),
("MAX_IM_EXP_PLUS1_F1_DIP", ">u1"),
("MAX_RE_EXP_MINUS1_F2_DIP", ">u1"),
("MAX_IM_EXP_MINUS1_F2_DIP", ">u1"),
("MAX_RE_EXP_ZERO_F2_DIP", ">u1"),
("MAX_IM_EXP_ZERO_F2_DIP", ">u1"),
("MAX_RE_EXP_PLUS1_F2_DIP", ">u1"),
("MAX_IM_EXP_PLUS1_F2_DIP", ">u1"),
("SPARE_6", ">u1", 8),
("AGC_PIS_PT_VALUE", ">f4", 2),
("AGC_PIS_LEVELS", ">u1", 2),
("K_PIM", ">u1"),
("PIS_MAX_DATA_EXP", ">u1", 2),
("PROCESSING_PRF", ">f4"),
("SPARE_7", ">u1"),
("REAL_ECHO_MINUS1_F1_DIP", ">u1", 512),
("IMAG_ECHO_MINUS1_F1_DIP", ">u1", 512),
("REAL_ECHO_ZERO_F1_DIP", ">u1", 512),
("IMAG_ECHO_ZERO_F1_DIP", ">u1", 512),
("REAL_ECHO_PLUS1_F1_DIP", ">u1", 512),
("IMAG_ECHO_PLUS1_F1_DIP", ">u1", 512),
("REAL_ECHO_MINUS1_F2_DIP", ">u1", 512),
("IMAG_ECHO_MINUS1_F2_DIP", ">u1", 512),
("REAL_ECHO_ZERO_F2_DIP", ">u1", 512),
("IMAG_ECHO_ZERO_F2_DIP", ">u1", 512),
("REAL_ECHO_PLUS1_F2_DIP", ">u1", 512),
("IMAG_ECHO_PLUS1_F2_DIP", ">u1", 512),
("PIS_F1", ">i2", 128),
("PIS_F2", ">i2", 128),
]
)
telTab = np.fromfile(file, dtype=rec_t)
# Decode OST line bit fields - this is incomplete
df = pd.DataFrame()
ost = telTab["OST_LINE"]
ost = np.array(ost.tolist()) # weird but it works to get from void to bytes_
df["SPARE_0"] = np.vectorize(lambda s: s[0])(ost)
df["MODE_DURATION"] = np.vectorize(
lambda s: np.frombuffer(s[0:4], dtype=">u4") & 0x00FFFFFF
)(ost)
df["SPARE_1"] = np.vectorize(
lambda s: np.frombuffer(s[4:5], dtype=">u1") & 0xC0
)(ost)
df["MODE_SELECTION"] = np.vectorize(
lambda s: np.frombuffer(s[4:5], dtype=">u1") >> 2 & 0x0F
)(ost)
df["DCG_CONFIGURATION_LO"] = np.vectorize(
lambda s: np.frombuffer(s[4:5], dtype=">u1") & 0x03
)(ost)
df["DCG_CONFIGURATION_HI"] = np.vectorize(
lambda s: np.frombuffer(s[5:6], dtype=">u1") >> 6
)(ost)
# Decompress data and make radargrams
moded = {
"SS3_TRK_CMP": [
"MINUS1_F1",
"ZERO_F1",
"PLUS1_F1",
"MINUS1_F2",
"ZERO_F2",
"PLUS1_F2",
]
}
mode = lbld["INSTRUMENT_MODE_ID"].replace('"', "")
if mode not in moded.keys():
print("Unhandled mode, exiting")
print(mode)
sys.exit()
datad = {}
for rg in moded[mode]:
block = np.zeros((512, len(telTab)), dtype=np.complex64)
for i in range(len(telTab)):
expIM = telTab["MAX_IM_EXP_" + rg + "_DIP"][i]
expRE = telTab["MAX_RE_EXP_" + rg + "_DIP"][i]
trIM = telTab["IMAG_ECHO_" + rg + "_DIP"][i]
trRE = telTab["REAL_ECHO_" + rg + "_DIP"][i]
trRE = self.decompress(trRE, expRE)
trIM = self.decompress(trIM, expIM)
trace = trRE + 1j * trIM
band = int(rg.split("_")[1][1])
block[:, i] = trace
if("F1" in rg):
block = self.deagc(block, telTab["AGC_SA_LEVELS_CURRENT_FRAME"][:,0])
elif("F2" in rg):
block = self.deagc(block, telTab["AGC_SA_LEVELS_CURRENT_FRAME"][:,1])
datad[rg] = block
return telTab, df, datad
def buildDict(self, pdata, i):
dd = {}
while i < len(pdata):
key, val = pdata[i]
if key == "OBJECT":
c = 0
name = val + str(c)
while name in dd.keys():
c += 1
name = val + str(c)
i, dd[name] = self.buildDict(pdata, i + 1)
continue
if key == "END_OBJECT":
return i + 1, dd
dd[key] = val
i += 1
return dd
def parseLBL(self, lbl):
# Parse the label file with lex and yacc
# Heavily based on https://github.com/mkelley/pds3
# lexer def ###
tokens = [
"DSID",
"WORD",
"STRING",
"COMMENT",
"POINTER",
"DATE",
"INT",
"REAL",
"UNIT",
"END",
]
literals = ["(", ")", ",", "=", "{", "}"]
def t_DSID(t):
r"MEX-M-MARSIS-2-EDR(-EXT[0-9])?-V[0-9].0" # Dataset ID
return t
def t_WORD(t):
r"[A-Z][A-Z0-9:_]+"
if t.value == "END":
t.type = "END"
return t
t_STRING = r'"[^"]+"'
def t_COMMENT(t):
r"/\*.+\*/"
pass
t_POINTER = r"\^[A-Z0-9_]+"
t_DATE = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(.[0-9]{3})?"
t_INT = r"[+-]?[0-9]+"
t_REAL = r"[+-]?[0-9]+\.[0-9]+([Ee][+-]?[0-9]+)?"
t_UNIT = r"<[\w*^\-/]+>"
t_ignore = " \t\r\n"
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
# ## parser def ## #
def p_label(p):
""" label : record
| label record
| label END"""
if len(p) == 2:
# record
p[0] = [p[1]]
elif p[2] == "END":
# label END
p[0] = p[1]
else:
# label record
p[0] = p[1] + [p[2]]
def p_record(p):
"""record : WORD '=' value
| POINTER '=' INT
| POINTER '=' STRING
| POINTER '=' '(' STRING ',' INT ')'"""
p[0] = (p[1], p[3])
def p_value(p):
"""value : STRING
| DATE
| WORD
| DSID
| number
| number UNIT
| sequence"""
# Just chuck the units for now
p[0] = p[1]
def p_number(p):
"""number : INT
| REAL"""
p[0] = p[1]
def p_sequence(p):
"""sequence : '(' value ')'
| '(' sequence_values ')'
| '{' value '}'
| '{' sequence_values '}'"""
p[0] = p[2]
def p_sequence_values(p):
"""sequence_values : value ','
| sequence_values value ','
| sequence_values value"""
if p[2] == ",":
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_error(p):
if p:
print("Syntax error at '%s'" % p.value)
else:
print("Syntax error at EOF")
parser = yacc.yacc()
# ## parse the label ## #
fd = open(lbl, "r")
data = fd.read()
fd.close()
result = parser.parse(data, lexer=lexer, debug=False)
return self.buildDict(result, 0)
|
[
"ply.yacc.yacc",
"numpy.fromfile",
"ply.lex.lex",
"sys.exit",
"pandas.DataFrame",
"numpy.frombuffer",
"numpy.dtype",
"numpy.vectorize"
] |
[((2156, 2818), 'numpy.dtype', 'np.dtype', (["[('SCET_FRAME_WHOLE', '>u4'), ('SCET_FRAME_FRAC', '>u2'), (\n 'GEOMETRY_EPHEMERIS_TIME', '>f8'), ('GEOMETRY_EPOCH', 'V23'), (\n 'MARS_SOLAR_LONGITUDE', '>f8'), ('MARS_SUN_DISTANCE', '>f8'), (\n 'ORBIT_NUMBER', '>u4'), ('TARGET_NAME', 'V6'), (\n 'TARGET_SC_POSITION_VECTOR', '>f8', 3), ('SPACECRAFT_ALTITUDE', '>f8'),\n ('SUB_SC_LONGITUDE', '>f8'), ('SUB_SC_LATITUDE', '>f8'), (\n 'TARGET_SC_VELOCITY_VECTOR', '>f8', 3), ('TARGET_SC_RADIAL_VELOCITY',\n '>f8'), ('TARGET_SC_TANG_VELOCITY', '>f8'), ('LOCAL_TRUE_SOLAR_TIME',\n '>f8'), ('SOLAR_ZENITH_ANGLE', '>f8'), ('DIPOLE_UNIT_VECTOR', '>f8', 3),\n ('MONOPOLE_UNIT_VECTOR', '>f8', 3)]"], {}), "([('SCET_FRAME_WHOLE', '>u4'), ('SCET_FRAME_FRAC', '>u2'), (\n 'GEOMETRY_EPHEMERIS_TIME', '>f8'), ('GEOMETRY_EPOCH', 'V23'), (\n 'MARS_SOLAR_LONGITUDE', '>f8'), ('MARS_SUN_DISTANCE', '>f8'), (\n 'ORBIT_NUMBER', '>u4'), ('TARGET_NAME', 'V6'), (\n 'TARGET_SC_POSITION_VECTOR', '>f8', 3), ('SPACECRAFT_ALTITUDE', '>f8'),\n ('SUB_SC_LONGITUDE', '>f8'), ('SUB_SC_LATITUDE', '>f8'), (\n 'TARGET_SC_VELOCITY_VECTOR', '>f8', 3), ('TARGET_SC_RADIAL_VELOCITY',\n '>f8'), ('TARGET_SC_TANG_VELOCITY', '>f8'), ('LOCAL_TRUE_SOLAR_TIME',\n '>f8'), ('SOLAR_ZENITH_ANGLE', '>f8'), ('DIPOLE_UNIT_VECTOR', '>f8', 3),\n ('MONOPOLE_UNIT_VECTOR', '>f8', 3)])\n", (2164, 2818), True, 'import numpy as np\n'), ((3138, 3168), 'numpy.fromfile', 'np.fromfile', (['file'], {'dtype': 'rec_t'}), '(file, dtype=rec_t)\n', (3149, 3168), True, 'import numpy as np\n'), ((3854, 6393), 'numpy.dtype', 'np.dtype', (["[('SCET_STAR_WHOLE', '>u4'), ('SCET_STAR_FRAC', '>u2'), ('OST_LINE_NUMBER',\n '>u2'), ('OST_LINE', 'V12'), ('FRAME_NUMBER', '>u2'), (\n 'ANCILLARY_DATA_HEADER', 'V6'), ('FIRST_PRI_OF_FRAME', '>u4'), (\n 'SCET_FRAME_WHOLE', '>u4'), ('SCET_FRAME_FRAC', '>u2'), (\n 'SCET_PERICENTER_WHOLE', '>u4'), ('SCET_PERICENTER_FRAC', '>u2'), (\n 'SCET_PAR_WHOLE', '>u4'), ('SCET_PAR_FRAC', '>u2'), ('H_SCET_PAR',\n '>f4'), ('VT_SCET_PAR', '>f4'), ('VR_SCET_PAR', '>f4'), ('N_0', '>u4'),\n ('DELTA_S_MIN', '>f4'), ('NB_MIN', '>u2'), ('M_OCOG', '>f4', 2), (\n 'INDEX_OCOG', '>u2', 2), ('TRK_THRESHOLD', '>f4', 2), (\n 'INI_IND_TRK_THRESHOLD', '>u2', 2), ('LAST_IND_TRK_THRESHOLD', '>u2', 2\n ), ('INI_IND_FSRM', '>u2', 2), ('LAST_IND_FSRM', '>u2', 2), ('SPARE_4',\n '>u4', 3), ('DELTA_S_SCET_PAR', '>f4'), ('NB_SCET_PAR', '>u2'), (\n 'NA_SCET_PAR', '>u2', 2), ('A2_INI_CM', '>f4', 2), ('A2_OPT', '>f4', 2),\n ('REF_CA_OPT', '>f4', 2), ('DELTA_T', '>u2', 2), ('SF', '>f4', 2), (\n 'I_C', '>u2', 2), ('AGC_SA_FOR_NEXT_FRAME', '>f4', 2), (\n 'AGC_SA_LEVELS_CURRENT_FRAME', '>u1', 2), ('RX_TRIG_SA_FOR_NEXT_FRAME',\n '>u2', 2), ('RX_TRIG_SA_PROGR', '>u2', 2), ('INI_IND_OCOG', '>u2'), (\n 'LAST_IND_OCOG', '>u2'), ('OCOG', '>f4', 2), ('A', '>f4', 2), ('C_LOL',\n '>i2', 2), ('SPARE_5', '>u2', 3), ('MAX_RE_EXP_MINUS1_F1_DIP', '>u1'),\n ('MAX_IM_EXP_MINUS1_F1_DIP', '>u1'), ('MAX_RE_EXP_ZERO_F1_DIP', '>u1'),\n ('MAX_IM_EXP_ZERO_F1_DIP', '>u1'), ('MAX_RE_EXP_PLUS1_F1_DIP', '>u1'),\n ('MAX_IM_EXP_PLUS1_F1_DIP', '>u1'), ('MAX_RE_EXP_MINUS1_F2_DIP', '>u1'),\n ('MAX_IM_EXP_MINUS1_F2_DIP', '>u1'), ('MAX_RE_EXP_ZERO_F2_DIP', '>u1'),\n ('MAX_IM_EXP_ZERO_F2_DIP', '>u1'), ('MAX_RE_EXP_PLUS1_F2_DIP', '>u1'),\n ('MAX_IM_EXP_PLUS1_F2_DIP', '>u1'), ('SPARE_6', '>u1', 8), (\n 'AGC_PIS_PT_VALUE', '>f4', 2), ('AGC_PIS_LEVELS', '>u1', 2), ('K_PIM',\n '>u1'), ('PIS_MAX_DATA_EXP', '>u1', 2), ('PROCESSING_PRF', '>f4'), (\n 'SPARE_7', '>u1'), ('REAL_ECHO_MINUS1_F1_DIP', '>u1', 512), (\n 'IMAG_ECHO_MINUS1_F1_DIP', '>u1', 512), ('REAL_ECHO_ZERO_F1_DIP', '>u1',\n 512), ('IMAG_ECHO_ZERO_F1_DIP', '>u1', 512), ('REAL_ECHO_PLUS1_F1_DIP',\n '>u1', 512), ('IMAG_ECHO_PLUS1_F1_DIP', '>u1', 512), (\n 'REAL_ECHO_MINUS1_F2_DIP', '>u1', 512), ('IMAG_ECHO_MINUS1_F2_DIP',\n '>u1', 512), ('REAL_ECHO_ZERO_F2_DIP', '>u1', 512), (\n 'IMAG_ECHO_ZERO_F2_DIP', '>u1', 512), ('REAL_ECHO_PLUS1_F2_DIP', '>u1',\n 512), ('IMAG_ECHO_PLUS1_F2_DIP', '>u1', 512), ('PIS_F1', '>i2', 128), (\n 'PIS_F2', '>i2', 128)]"], {}), "([('SCET_STAR_WHOLE', '>u4'), ('SCET_STAR_FRAC', '>u2'), (\n 'OST_LINE_NUMBER', '>u2'), ('OST_LINE', 'V12'), ('FRAME_NUMBER', '>u2'),\n ('ANCILLARY_DATA_HEADER', 'V6'), ('FIRST_PRI_OF_FRAME', '>u4'), (\n 'SCET_FRAME_WHOLE', '>u4'), ('SCET_FRAME_FRAC', '>u2'), (\n 'SCET_PERICENTER_WHOLE', '>u4'), ('SCET_PERICENTER_FRAC', '>u2'), (\n 'SCET_PAR_WHOLE', '>u4'), ('SCET_PAR_FRAC', '>u2'), ('H_SCET_PAR',\n '>f4'), ('VT_SCET_PAR', '>f4'), ('VR_SCET_PAR', '>f4'), ('N_0', '>u4'),\n ('DELTA_S_MIN', '>f4'), ('NB_MIN', '>u2'), ('M_OCOG', '>f4', 2), (\n 'INDEX_OCOG', '>u2', 2), ('TRK_THRESHOLD', '>f4', 2), (\n 'INI_IND_TRK_THRESHOLD', '>u2', 2), ('LAST_IND_TRK_THRESHOLD', '>u2', 2\n ), ('INI_IND_FSRM', '>u2', 2), ('LAST_IND_FSRM', '>u2', 2), ('SPARE_4',\n '>u4', 3), ('DELTA_S_SCET_PAR', '>f4'), ('NB_SCET_PAR', '>u2'), (\n 'NA_SCET_PAR', '>u2', 2), ('A2_INI_CM', '>f4', 2), ('A2_OPT', '>f4', 2),\n ('REF_CA_OPT', '>f4', 2), ('DELTA_T', '>u2', 2), ('SF', '>f4', 2), (\n 'I_C', '>u2', 2), ('AGC_SA_FOR_NEXT_FRAME', '>f4', 2), (\n 'AGC_SA_LEVELS_CURRENT_FRAME', '>u1', 2), ('RX_TRIG_SA_FOR_NEXT_FRAME',\n '>u2', 2), ('RX_TRIG_SA_PROGR', '>u2', 2), ('INI_IND_OCOG', '>u2'), (\n 'LAST_IND_OCOG', '>u2'), ('OCOG', '>f4', 2), ('A', '>f4', 2), ('C_LOL',\n '>i2', 2), ('SPARE_5', '>u2', 3), ('MAX_RE_EXP_MINUS1_F1_DIP', '>u1'),\n ('MAX_IM_EXP_MINUS1_F1_DIP', '>u1'), ('MAX_RE_EXP_ZERO_F1_DIP', '>u1'),\n ('MAX_IM_EXP_ZERO_F1_DIP', '>u1'), ('MAX_RE_EXP_PLUS1_F1_DIP', '>u1'),\n ('MAX_IM_EXP_PLUS1_F1_DIP', '>u1'), ('MAX_RE_EXP_MINUS1_F2_DIP', '>u1'),\n ('MAX_IM_EXP_MINUS1_F2_DIP', '>u1'), ('MAX_RE_EXP_ZERO_F2_DIP', '>u1'),\n ('MAX_IM_EXP_ZERO_F2_DIP', '>u1'), ('MAX_RE_EXP_PLUS1_F2_DIP', '>u1'),\n ('MAX_IM_EXP_PLUS1_F2_DIP', '>u1'), ('SPARE_6', '>u1', 8), (\n 'AGC_PIS_PT_VALUE', '>f4', 2), ('AGC_PIS_LEVELS', '>u1', 2), ('K_PIM',\n '>u1'), ('PIS_MAX_DATA_EXP', '>u1', 2), ('PROCESSING_PRF', '>f4'), (\n 'SPARE_7', '>u1'), ('REAL_ECHO_MINUS1_F1_DIP', '>u1', 512), (\n 'IMAG_ECHO_MINUS1_F1_DIP', '>u1', 512), ('REAL_ECHO_ZERO_F1_DIP', '>u1',\n 512), ('IMAG_ECHO_ZERO_F1_DIP', '>u1', 512), ('REAL_ECHO_PLUS1_F1_DIP',\n '>u1', 512), ('IMAG_ECHO_PLUS1_F1_DIP', '>u1', 512), (\n 'REAL_ECHO_MINUS1_F2_DIP', '>u1', 512), ('IMAG_ECHO_MINUS1_F2_DIP',\n '>u1', 512), ('REAL_ECHO_ZERO_F2_DIP', '>u1', 512), (\n 'IMAG_ECHO_ZERO_F2_DIP', '>u1', 512), ('REAL_ECHO_PLUS1_F2_DIP', '>u1',\n 512), ('IMAG_ECHO_PLUS1_F2_DIP', '>u1', 512), ('PIS_F1', '>i2', 128), (\n 'PIS_F2', '>i2', 128)])\n", (3862, 6393), True, 'import numpy as np\n'), ((7556, 7586), 'numpy.fromfile', 'np.fromfile', (['file'], {'dtype': 'rec_t'}), '(file, dtype=rec_t)\n', (7567, 7586), True, 'import numpy as np\n'), ((7659, 7673), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7671, 7673), True, 'import pandas as pd\n'), ((11637, 11646), 'ply.lex.lex', 'lex.lex', ([], {}), '()\n', (11644, 11646), False, 'from ply import lex, yacc\n'), ((13445, 13456), 'ply.yacc.yacc', 'yacc.yacc', ([], {}), '()\n', (13454, 13456), False, 'from ply import lex, yacc\n'), ((7817, 7845), 'numpy.vectorize', 'np.vectorize', (['(lambda s: s[0])'], {}), '(lambda s: s[0])\n', (7829, 7845), True, 'import numpy as np\n'), ((8949, 8959), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8957, 8959), False, 'import sys\n'), ((7917, 7951), 'numpy.frombuffer', 'np.frombuffer', (['s[0:4]'], {'dtype': '""">u4"""'}), "(s[0:4], dtype='>u4')\n", (7930, 7951), True, 'import numpy as np\n'), ((8040, 8074), 'numpy.frombuffer', 'np.frombuffer', (['s[4:5]'], {'dtype': '""">u1"""'}), "(s[4:5], dtype='>u1')\n", (8053, 8074), True, 'import numpy as np\n'), ((8299, 8333), 'numpy.frombuffer', 'np.frombuffer', (['s[4:5]'], {'dtype': '""">u1"""'}), "(s[4:5], dtype='>u1')\n", (8312, 8333), True, 'import numpy as np\n'), ((8429, 8463), 'numpy.frombuffer', 'np.frombuffer', (['s[5:6]'], {'dtype': '""">u1"""'}), "(s[5:6], dtype='>u1')\n", (8442, 8463), True, 'import numpy as np\n'), ((8164, 8198), 'numpy.frombuffer', 'np.frombuffer', (['s[4:5]'], {'dtype': '""">u1"""'}), "(s[4:5], dtype='>u1')\n", (8177, 8198), True, 'import numpy as np\n')]
|
from typing import Tuple
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def label_encoder(c: str) -> np.ndarray:
lc = LabelEncoder()
return lc.fit_transform(c)
def load_dataset() -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
path = "../../input/tabular-playground-series-apr-2021/"
train = pd.read_csv(path + "train.csv")
test = pd.read_csv(path + "test.csv")
pseudo_label = pd.read_csv("../../res/AutoWoE_submission_combo.csv")
test["Survived"] = [x for x in pseudo_label.Survived]
# Calcule SameFirstName
train["FirstName"] = train["Name"].apply(lambda x: x.split(", ")[0])
train["n"] = 1
gb = train.groupby("FirstName")
df_names = gb["n"].sum()
train["SameFirstName"] = train["FirstName"].apply(lambda x: df_names[x])
test["FirstName"] = test["Name"].apply(lambda x: x.split(", ")[0])
test["n"] = 1
gb = test.groupby("FirstName")
df_names = gb["n"].sum()
test["SameFirstName"] = test["FirstName"].apply(lambda x: df_names[x])
# To preprocess
data = pd.concat([train, test], axis=0)
# Before fill missing
data["AnyMissing"] = np.where(data.isnull().any(axis=1) == 1, 1, 0)
# Family
data["FamilySize"] = data["SibSp"] + data["Parch"] + 1
data["IsAlone"] = np.where(data["FamilySize"] <= 1, 1, 0)
# Cabin
data["Has_Cabin"] = data["Cabin"].apply(lambda x: 0 if type(x) == float else 1)
data["Cabin"] = data["Cabin"].fillna("X").map(lambda x: x[0].strip())
cabin_map = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "T": 1, "X": 8}
data["Cabin"] = data["Cabin"].str[0].fillna("X").replace(cabin_map)
# Embarked
# map_Embarked = train.Embarked.mode().item()
data["Embarked"] = data["Embarked"].fillna("No")
conditions = [
(data["Embarked"] == "S"),
(data["Embarked"] == "Q"),
(data["Embarked"] == "C"),
(data["Embarked"] == "No"),
]
choices = [0, 1, 2, -1]
data["Embarked"] = np.select(conditions, choices)
data["Embarked"] = data["Embarked"].astype(int)
# Name
data["SecondName"] = data.Name.str.split(", ", 1, expand=True)[1] # to try
data["IsFirstNameDublicated"] = np.where(data.FirstName.duplicated(), 1, 0)
# Fare
data["Fare"] = data["Fare"].fillna(train["Fare"].median())
# train['CategoricalFare'] = pd.qcut(train['Fare'], 4)
# [(0.679, 10.04] < (10.04, 24.46] < (24.46, 33.5] < (33.5, 744.66]]
# From original Titanic:
conditions = [
(data["Fare"] <= 7.91),
((data["Fare"] > 7.91) & (data["Fare"] <= 14.454)),
((data["Fare"] > 14.454) & (data["Fare"] <= 31)),
(data["Fare"] > 31),
]
choices = [0, 1, 2, 3]
data["Fare"] = np.select(conditions, choices)
data["Fare"] = data["Fare"].astype(int)
# Fix Ticket
# data['TicketNum'] = data.Ticket.str.extract(r'(\d+)').\
# astype('float64', copy=False) # to_try
data["Ticket"] = (
data.Ticket.str.replace(r"\.", "", regex=True)
.str.replace(r"(\d+)", "", regex=True)
.str.replace(" ", "", regex=True)
.replace(r"^\s*$", "X", regex=True)
.fillna("X")
)
data["Ticket"] = data["Ticket"].astype("category").cat.codes # to_try
# Age
conditions = [
((data.Sex == "female") & (data.Pclass == 1) & (data.Age.isnull())),
((data.Sex == "male") & (data.Pclass == 1) & (data.Age.isnull())),
((data.Sex == "female") & (data.Pclass == 2) & (data.Age.isnull())),
((data.Sex == "male") & (data.Pclass == 2) & (data.Age.isnull())),
((data.Sex == "female") & (data.Pclass == 3) & (data.Age.isnull())),
((data.Sex == "male") & (data.Pclass == 3) & (data.Age.isnull())),
]
choices = (
data[["Age", "Pclass", "Sex"]].dropna().groupby(["Pclass", "Sex"]).mean()["Age"]
)
data["Age"] = np.select(conditions, choices)
conditions = [
(data["Age"].le(16)),
(data["Age"].gt(16) & data["Age"].le(32)),
(data["Age"].gt(32) & data["Age"].le(48)),
(data["Age"].gt(48) & data["Age"].le(64)),
(data["Age"].gt(64)),
]
choices = [0, 1, 2, 3, 4]
data["Age"] = np.select(conditions, choices)
# Sex
data["Sex"] = np.where(data["Sex"] == "male", 1, 0)
# Drop columns
data = data.drop(["Name", "n"], axis=1)
# Splitting into train and test
train = data.iloc[: train.shape[0]]
test = data.iloc[train.shape[0] :].drop(columns=["Survived"])
target = "Survived"
features_selected = [
"Pclass",
"Sex",
"Age",
"Embarked",
"Parch",
"SibSp",
"Fare",
"Cabin",
"Ticket",
"IsAlone",
"SameFirstName",
]
X = data.drop(target, axis=1)
X = X[features_selected]
y = data[target]
test = test[features_selected]
return X, y, test
|
[
"sklearn.preprocessing.LabelEncoder",
"numpy.select",
"pandas.read_csv",
"numpy.where",
"pandas.concat"
] |
[((164, 178), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (176, 178), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((356, 387), 'pandas.read_csv', 'pd.read_csv', (["(path + 'train.csv')"], {}), "(path + 'train.csv')\n", (367, 387), True, 'import pandas as pd\n'), ((399, 429), 'pandas.read_csv', 'pd.read_csv', (["(path + 'test.csv')"], {}), "(path + 'test.csv')\n", (410, 429), True, 'import pandas as pd\n'), ((450, 503), 'pandas.read_csv', 'pd.read_csv', (['"""../../res/AutoWoE_submission_combo.csv"""'], {}), "('../../res/AutoWoE_submission_combo.csv')\n", (461, 503), True, 'import pandas as pd\n'), ((1087, 1119), 'pandas.concat', 'pd.concat', (['[train, test]'], {'axis': '(0)'}), '([train, test], axis=0)\n', (1096, 1119), True, 'import pandas as pd\n'), ((1314, 1353), 'numpy.where', 'np.where', (["(data['FamilySize'] <= 1)", '(1)', '(0)'], {}), "(data['FamilySize'] <= 1, 1, 0)\n", (1322, 1353), True, 'import numpy as np\n'), ((2022, 2052), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (2031, 2052), True, 'import numpy as np\n'), ((2764, 2794), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (2773, 2794), True, 'import numpy as np\n'), ((3920, 3950), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (3929, 3950), True, 'import numpy as np\n'), ((4239, 4269), 'numpy.select', 'np.select', (['conditions', 'choices'], {}), '(conditions, choices)\n', (4248, 4269), True, 'import numpy as np\n'), ((4299, 4336), 'numpy.where', 'np.where', (["(data['Sex'] == 'male')", '(1)', '(0)'], {}), "(data['Sex'] == 'male', 1, 0)\n", (4307, 4336), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.stats import chi2
from tqdm import tqdm as tqdm
from ...common import (
Gaussian,
GaussianDensity,
HypothesisReduction,
normalize_log_weights,
)
from ...configs import SensorModelConfig
from ...measurement_models import MeasurementModel
from ...motion_models import MotionModel
from .base_single_object_tracker import SingleObjectTracker
class GaussSumTracker(SingleObjectTracker):
def __init__(
self,
meas_model: MeasurementModel,
sensor_model: SensorModelConfig,
motion_model: MotionModel,
M,
merging_threshold,
P_G,
w_min,
*args,
**kwargs,
) -> None:
self.meas_model = meas_model
self.sensor_model = sensor_model
self.motion_model = motion_model
self.w_min = w_min
self.P_G = P_G
self.gating_size = chi2.ppf(P_G, df=self.meas_model.d)
self.M = M
self.merging_threshold = merging_threshold
self.hypotheses_weight = None
self.multi_hypotheses_bank = None
super(GaussSumTracker).__init__()
def estimate(self, initial_state: Gaussian, measurements, verbose=False):
"""Tracks a single object using Gauss sum filtering
For each filter recursion iteration implemented next steps:
1) for each hypothesis, create missed detection hypothesis
2) for each hypothesis, perform ellipsoidal gating
and only create object detection hypotheses for detections
inside the gate
3) normalise hypotheses weights
4) prune hypotheses with small weights and then re-normalise the weights
5) hypothese merging
6) cap the number of the hypotheses and then re-normalise the weights
7) extract object state estimate using the most probably
hypothesis estimation
8) for each hypothesis, perform prediction
"""
prev_state = initial_state
estimations = [None for x in range(len(measurements))]
self.hypotheses_weight = [np.log(1.0)]
self.multi_hypotheses_bank = [initial_state]
for timestep, measurements_in_scene in tqdm(enumerate(measurements)):
estimations[timestep] = self.estimation_step(
predicted_state=prev_state,
current_measurements=np.array(measurements_in_scene),
)
prev_state = GaussianDensity.predict(state=estimations[timestep], motion_model=self.motion_model)
return tuple(estimations)
def estimation_step(self, predicted_state: Gaussian, current_measurements: np.ndarray):
new_hypotheses, new_weights = [], []
w_theta_factor = np.log(self.sensor_model.P_D / self.sensor_model.intensity_c)
w_theta_0 = np.log(1 - self.sensor_model.P_D) # misdetection
for _old_idx, (curr_weight, curr_hypothesis) in enumerate(
zip(self.hypotheses_weight, self.multi_hypotheses_bank)
):
# 1) for each hypothesis, create missed detection hypothesis
new_hypotheses.append(curr_hypothesis)
new_weights.append(w_theta_0 + curr_weight)
# 2) for each hypothesis, perform ellipsoidal gating
# and only create object detection hypotheses for detection
# inside the gate
z_ingate, _ = GaussianDensity.ellipsoidal_gating(
curr_hypothesis,
current_measurements,
self.meas_model,
self.gating_size,
)
predicted_likelihood = GaussianDensity.predicted_likelihood(curr_hypothesis, z_ingate, self.meas_model)
# for each measurement create detection hypotheses
for idx, meausurement in z_ingate:
new_hypotheses.append(GaussianDensity.update(curr_hypothesis, meausurement, self.meas_model))
new_weights.append(predicted_likelihood[idx] + w_theta_factor)
self.hypotheses_weight.extend(new_weights)
self.multi_hypotheses_bank.extend(new_hypotheses)
assert len(self.hypotheses_weight) == len(self.multi_hypotheses_bank)
# 3.normalise hypotheses weights
self.hypotheses_weight, _ = normalize_log_weights(self.hypotheses_weight)
# 4. Prune hypotheses with small weights and then re-normalise the weights
self.hypotheses_weight, self.multi_hypotheses_bank = HypothesisReduction.prune(
self.hypotheses_weight, self.multi_hypotheses_bank, threshold=self.w_min
)
self.hypotheses_weight, _ = normalize_log_weights(self.hypotheses_weight)
# 5. Hypotheses merging and normalize
self.hypotheses_weight, self.multi_hypotheses_bank = HypothesisReduction.merge(
self.hypotheses_weight,
self.multi_hypotheses_bank,
threshold=self.merging_threshold,
)
self.hypotheses_weight, _ = normalize_log_weights(self.hypotheses_weight)
# 6. Cap the number of the hypotheses and then re-normalise the weights
self.hypotheses_weight, self.multi_hypotheses_bank = HypothesisReduction.cap(
self.hypotheses_weight, self.multi_hypotheses_bank, top_k=self.M
)
self.hypotheses_weight, _ = normalize_log_weights(self.hypotheses_weight)
# 7. Get object state from the most probable hypothesis
if self.multi_hypotheses_bank:
current_step_state = self.multi_hypotheses_bank[np.argmax(self.hypotheses_weight)]
estimation = current_step_state
else:
estimation = predicted_state
# 8. For each hypotheses do prediction
self.updated_states = [
GaussianDensity.predict(hypothesis, self.motion_model) for hypothesis in self.multi_hypotheses_bank
]
self.multi_hypotheses_bank = self.updated_states
return estimation
@property
def method(self):
return "gauss sum filter"
|
[
"numpy.log",
"numpy.array",
"scipy.stats.chi2.ppf",
"numpy.argmax"
] |
[((887, 922), 'scipy.stats.chi2.ppf', 'chi2.ppf', (['P_G'], {'df': 'self.meas_model.d'}), '(P_G, df=self.meas_model.d)\n', (895, 922), False, 'from scipy.stats import chi2\n'), ((2707, 2768), 'numpy.log', 'np.log', (['(self.sensor_model.P_D / self.sensor_model.intensity_c)'], {}), '(self.sensor_model.P_D / self.sensor_model.intensity_c)\n', (2713, 2768), True, 'import numpy as np\n'), ((2789, 2822), 'numpy.log', 'np.log', (['(1 - self.sensor_model.P_D)'], {}), '(1 - self.sensor_model.P_D)\n', (2795, 2822), True, 'import numpy as np\n'), ((2069, 2080), 'numpy.log', 'np.log', (['(1.0)'], {}), '(1.0)\n', (2075, 2080), True, 'import numpy as np\n'), ((5475, 5508), 'numpy.argmax', 'np.argmax', (['self.hypotheses_weight'], {}), '(self.hypotheses_weight)\n', (5484, 5508), True, 'import numpy as np\n'), ((2353, 2384), 'numpy.array', 'np.array', (['measurements_in_scene'], {}), '(measurements_in_scene)\n', (2361, 2384), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
import tensorflow as tf
import lib.weighted_layers_v2 as wl
from lib.weighted_resblock import MixtureWeight
class WeightedConv2DTest(tf.test.TestCase):
"""WeightedConv2D test class."""
def setUp(self):
"""Sets default parameters."""
super(WeightedConv2DTest, self).setUp()
self.kernel_size = 3
self.activation = 'relu'
self.filters = 40
self.input_channel = 20
self.num_templates = 10
self.kernel = np.random.rand(self.num_templates, self.kernel_size,
self.kernel_size, self.input_channel,
self.filters)
self.bias = np.random.rand(self.num_templates, self.filters)
self.kernel_init = tf.constant_initializer(self.kernel)
self.bias_init = tf.constant_initializer(self.bias)
self.padding = 'same'
xi_init = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
self.xi = MixtureWeight(num_templates=self.num_templates,
initializer=xi_init)
def _create_default_w_conv(self):
"""Creates an instance of WeightedConv2D with dedault parameters."""
return wl.WeightedConv2D(
filters=self.filters, activation=self.activation, padding=self.padding,
kernel_size=self.kernel_size, num_templates=self.num_templates,
kernel_initializer=self.kernel_init, bias_initializer=self.bias_init)
def _get_default_inputs(self, in_shape):
"""returns default layer inputs."""
layer_inputs = tf.Variable(np.random.rand(*in_shape), dtype=tf.float32)
return [layer_inputs, self.xi(None)[0]]
def test_output_shape(self):
"""checks if the shape of the output tensor is correct."""
w_conv = self._create_default_w_conv()
input_shape = (32, 16, 16, self.input_channel)
inputs = self._get_default_inputs(input_shape)
output = w_conv(inputs)
expected_shape = (32, 16, 16, self.filters)
self.assertAllEqual(expected_shape, output.shape)
def test_output_values(self):
"""checks if the output tensor is computed correctly."""
w_conv = self._create_default_w_conv()
w_conv.activation = None
input_shape = (32, 16, 16, self.input_channel)
inputs = self._get_default_inputs(input_shape)
w_output = w_conv(inputs)
# the output of weighted convolution should be same as linear combination of
# outputs of regular convolution with template weights in case when no
# activation is used.
expected_output = tf.zeros_like(w_output)
conv = tf.keras.layers.Conv2D(filters=self.filters, activation=None,
padding=self.padding,
kernel_size=self.kernel_size)
conv.build(input_shape)
for t in range(self.num_templates):
conv.kernel = self.kernel[t]
conv.bias = self.bias[t]
conv_out = conv(inputs[0])
expected_output += inputs[1][t]*conv_out
self.assertAllClose(expected_output, w_output, rtol=1e-05)
class WeightedDepthwiseConv2DTest(tf.test.TestCase):
"""Weighted depthwise convolution test class."""
def setUp(self):
"""Sets default parameters."""
super(WeightedDepthwiseConv2DTest, self).setUp()
self.kernel_size = 3
self.activation = 'relu'
self.depth_multiplier = 2
self.input_channel = 20
self.num_templates = 10
self.kernel = np.random.rand(self.num_templates, self.kernel_size,
self.kernel_size, self.input_channel,
self.depth_multiplier).astype(np.float32)
self.bias = np.random.rand(
self.num_templates,
self.input_channel * self.depth_multiplier).astype(np.float32)
self.kernel_init = tf.constant_initializer(self.kernel)
self.bias_init = tf.constant_initializer(self.bias)
self.padding = 'same'
self.xi_initializer = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
self.xi = MixtureWeight(num_templates=self.num_templates,
initializer=self.xi_initializer)
def _create_default_depth_conv(self):
"""Creates a WeightedDepthwiseConv2D instance with default parameters."""
return wl.WeightedDepthwiseConv2D(
depth_multiplier=self.depth_multiplier, activation=self.activation,
padding=self.padding, kernel_size=self.kernel_size,
num_templates=self.num_templates, bias_initializer=self.bias_init,
depthwise_initializer=self.kernel_init)
def _get_default_inputs(self, in_shape):
"""returns default layer inputs."""
layer_inputs = tf.Variable(np.random.rand(*in_shape), dtype=tf.float32)
return [layer_inputs, self.xi(None)[0]]
def test_output_shape(self):
"""checks if the shape of the output tensor is correct."""
w_d_conv = self._create_default_depth_conv()
input_shape = (32, 64, 64, self.input_channel)
inputs = self._get_default_inputs(input_shape)
output = w_d_conv(inputs)
expected_shape = (32, 64, 64, self.input_channel*self.depth_multiplier)
self.assertAllEqual(expected_shape, output.shape)
def test_output_value(self):
"""checks if the value of the output tensor is correct."""
w_d_conv = self._create_default_depth_conv()
w_d_conv.activation = None
input_shape = (32, 16, 16, self.input_channel)
inputs = self._get_default_inputs(input_shape)
w_d_output = w_d_conv(inputs)
# the output of weighted convolution should be same as linear combination of
# outputs of regular convolution with template weights in case when no
# activation is used.
expected_output = tf.zeros_like(w_d_output)
conv = tf.keras.layers.DepthwiseConv2D(
depth_multiplier=self.depth_multiplier, activation=None,
padding=self.padding, kernel_size=self.kernel_size)
conv.build(input_shape)
for t in range(self.num_templates):
conv.depthwise_kernel = self.kernel[t]
conv.bias = self.bias[t]
conv_out = conv(inputs[0])
expected_output += inputs[1][t]*conv_out
self.assertAllClose(expected_output, w_d_output, rtol=1e-05)
class WeightedBatchNormalizationTest(tf.test.TestCase):
""""WeightedBatchNormalizationSeparate test class."""
def setUp(self):
"""Sets default parameters."""
self.num_templates = 10
self.input_channels = 40
self.gamma_template = np.random.rand(
self.num_templates, self.input_channels).astype(np.float32)
self.beta_template = np.random.rand(
self.num_templates, self.input_channels).astype(np.float32)
self.beta_init = tf.constant_initializer(self.beta_template)
self.gamma_init = tf.constant_initializer(self.gamma_template)
self.xi_initializer = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
self.xi = MixtureWeight(num_templates=self.num_templates,
initializer=self.xi_initializer)
def test_output_shape(self):
"""checks if the output shape is same as input shape."""
input_shape = (256, 16, 16, self.input_channels)
inputs = tf.random.normal(input_shape)
bn = wl.WeightedBatchNormalizationSeparate(
num_templates=self.num_templates, gamma_initializer=self.gamma_init,
beta_initializer=self.beta_init)
outputs = bn([inputs, self.xi(None)[0]], training=True)
self.assertAllEqual(input_shape, outputs.shape)
def test_output_moments(self):
"""checks if the output moments match to the mixture of moments."""
input_shape = (256, 16, 16, self.input_channels)
inputs = tf.random.normal(input_shape, mean=2.5, stddev=8.0)
bn = wl.WeightedBatchNormalizationSeparate(
num_templates=self.num_templates, gamma_initializer=self.gamma_init,
beta_initializer=self.beta_init)
outputs = bn([inputs, self.xi(None)[0]], training=True)
reduction_axes = range(len(input_shape) - 1)
mean, var = tf.nn.moments(outputs, reduction_axes)
reshaped_mix_w = tf.reshape(self.xi(None), [self.num_templates, 1])
mix_gamma = tf.reduce_sum(reshaped_mix_w * self.gamma_template, axis=0)
mix_beta = tf.reduce_sum(reshaped_mix_w * self.beta_template, axis=0)
self.assertAllClose(mean, mix_beta, rtol=1e-03)
self.assertAllClose(tf.math.sqrt(var), mix_gamma, rtol=1e-03)
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
|
[
"tensorflow.random.normal",
"lib.weighted_layers_v2.WeightedBatchNormalizationSeparate",
"lib.weighted_layers_v2.WeightedDepthwiseConv2D",
"numpy.random.rand",
"tensorflow.keras.layers.Conv2D",
"tensorflow.reduce_sum",
"tensorflow.nn.moments",
"tensorflow.math.sqrt",
"tensorflow.zeros_like",
"lib.weighted_layers_v2.WeightedConv2D",
"tensorflow.constant_initializer",
"unittest.main",
"lib.weighted_resblock.MixtureWeight",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.random_uniform_initializer"
] |
[((8216, 8272), 'unittest.main', 'unittest.main', ([], {'argv': "['first-arg-is-ignored']", 'exit': '(False)'}), "(argv=['first-arg-is-ignored'], exit=False)\n", (8229, 8272), False, 'import unittest\n'), ((472, 581), 'numpy.random.rand', 'np.random.rand', (['self.num_templates', 'self.kernel_size', 'self.kernel_size', 'self.input_channel', 'self.filters'], {}), '(self.num_templates, self.kernel_size, self.kernel_size, self\n .input_channel, self.filters)\n', (486, 581), True, 'import numpy as np\n'), ((659, 707), 'numpy.random.rand', 'np.random.rand', (['self.num_templates', 'self.filters'], {}), '(self.num_templates, self.filters)\n', (673, 707), True, 'import numpy as np\n'), ((731, 767), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.kernel'], {}), '(self.kernel)\n', (754, 767), True, 'import tensorflow as tf\n'), ((789, 823), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.bias'], {}), '(self.bias)\n', (812, 823), True, 'import tensorflow as tf\n'), ((864, 917), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(0.0)', 'maxval': '(1.0)'}), '(minval=0.0, maxval=1.0)\n', (893, 917), True, 'import tensorflow as tf\n'), ((932, 1000), 'lib.weighted_resblock.MixtureWeight', 'MixtureWeight', ([], {'num_templates': 'self.num_templates', 'initializer': 'xi_init'}), '(num_templates=self.num_templates, initializer=xi_init)\n', (945, 1000), False, 'from lib.weighted_resblock import MixtureWeight\n'), ((1150, 1388), 'lib.weighted_layers_v2.WeightedConv2D', 'wl.WeightedConv2D', ([], {'filters': 'self.filters', 'activation': 'self.activation', 'padding': 'self.padding', 'kernel_size': 'self.kernel_size', 'num_templates': 'self.num_templates', 'kernel_initializer': 'self.kernel_init', 'bias_initializer': 'self.bias_init'}), '(filters=self.filters, activation=self.activation, padding\n =self.padding, kernel_size=self.kernel_size, num_templates=self.\n num_templates, kernel_initializer=self.kernel_init, bias_initializer=\n self.bias_init)\n', (1167, 1388), True, 'import lib.weighted_layers_v2 as wl\n'), ((2478, 2501), 'tensorflow.zeros_like', 'tf.zeros_like', (['w_output'], {}), '(w_output)\n', (2491, 2501), True, 'import tensorflow as tf\n'), ((2513, 2631), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'self.filters', 'activation': 'None', 'padding': 'self.padding', 'kernel_size': 'self.kernel_size'}), '(filters=self.filters, activation=None, padding=self.\n padding, kernel_size=self.kernel_size)\n', (2535, 2631), True, 'import tensorflow as tf\n'), ((3698, 3734), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.kernel'], {}), '(self.kernel)\n', (3721, 3734), True, 'import tensorflow as tf\n'), ((3756, 3790), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.bias'], {}), '(self.bias)\n', (3779, 3790), True, 'import tensorflow as tf\n'), ((3843, 3896), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(0.0)', 'maxval': '(1.0)'}), '(minval=0.0, maxval=1.0)\n', (3872, 3896), True, 'import tensorflow as tf\n'), ((3911, 3996), 'lib.weighted_resblock.MixtureWeight', 'MixtureWeight', ([], {'num_templates': 'self.num_templates', 'initializer': 'self.xi_initializer'}), '(num_templates=self.num_templates, initializer=self.xi_initializer\n )\n', (3924, 3996), False, 'from lib.weighted_resblock import MixtureWeight\n'), ((4150, 4417), 'lib.weighted_layers_v2.WeightedDepthwiseConv2D', 'wl.WeightedDepthwiseConv2D', ([], {'depth_multiplier': 'self.depth_multiplier', 'activation': 'self.activation', 'padding': 'self.padding', 'kernel_size': 'self.kernel_size', 'num_templates': 'self.num_templates', 'bias_initializer': 'self.bias_init', 'depthwise_initializer': 'self.kernel_init'}), '(depth_multiplier=self.depth_multiplier,\n activation=self.activation, padding=self.padding, kernel_size=self.\n kernel_size, num_templates=self.num_templates, bias_initializer=self.\n bias_init, depthwise_initializer=self.kernel_init)\n', (4176, 4417), True, 'import lib.weighted_layers_v2 as wl\n'), ((5563, 5588), 'tensorflow.zeros_like', 'tf.zeros_like', (['w_d_output'], {}), '(w_d_output)\n', (5576, 5588), True, 'import tensorflow as tf\n'), ((5600, 5744), 'tensorflow.keras.layers.DepthwiseConv2D', 'tf.keras.layers.DepthwiseConv2D', ([], {'depth_multiplier': 'self.depth_multiplier', 'activation': 'None', 'padding': 'self.padding', 'kernel_size': 'self.kernel_size'}), '(depth_multiplier=self.depth_multiplier,\n activation=None, padding=self.padding, kernel_size=self.kernel_size)\n', (5631, 5744), True, 'import tensorflow as tf\n'), ((6511, 6554), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.beta_template'], {}), '(self.beta_template)\n', (6534, 6554), True, 'import tensorflow as tf\n'), ((6577, 6621), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['self.gamma_template'], {}), '(self.gamma_template)\n', (6600, 6621), True, 'import tensorflow as tf\n'), ((6648, 6701), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(0.0)', 'maxval': '(1.0)'}), '(minval=0.0, maxval=1.0)\n', (6677, 6701), True, 'import tensorflow as tf\n'), ((6716, 6801), 'lib.weighted_resblock.MixtureWeight', 'MixtureWeight', ([], {'num_templates': 'self.num_templates', 'initializer': 'self.xi_initializer'}), '(num_templates=self.num_templates, initializer=self.xi_initializer\n )\n', (6729, 6801), False, 'from lib.weighted_resblock import MixtureWeight\n'), ((6984, 7013), 'tensorflow.random.normal', 'tf.random.normal', (['input_shape'], {}), '(input_shape)\n', (7000, 7013), True, 'import tensorflow as tf\n'), ((7023, 7166), 'lib.weighted_layers_v2.WeightedBatchNormalizationSeparate', 'wl.WeightedBatchNormalizationSeparate', ([], {'num_templates': 'self.num_templates', 'gamma_initializer': 'self.gamma_init', 'beta_initializer': 'self.beta_init'}), '(num_templates=self.num_templates,\n gamma_initializer=self.gamma_init, beta_initializer=self.beta_init)\n', (7060, 7166), True, 'import lib.weighted_layers_v2 as wl\n'), ((7464, 7515), 'tensorflow.random.normal', 'tf.random.normal', (['input_shape'], {'mean': '(2.5)', 'stddev': '(8.0)'}), '(input_shape, mean=2.5, stddev=8.0)\n', (7480, 7515), True, 'import tensorflow as tf\n'), ((7525, 7668), 'lib.weighted_layers_v2.WeightedBatchNormalizationSeparate', 'wl.WeightedBatchNormalizationSeparate', ([], {'num_templates': 'self.num_templates', 'gamma_initializer': 'self.gamma_init', 'beta_initializer': 'self.beta_init'}), '(num_templates=self.num_templates,\n gamma_initializer=self.gamma_init, beta_initializer=self.beta_init)\n', (7562, 7668), True, 'import lib.weighted_layers_v2 as wl\n'), ((7807, 7845), 'tensorflow.nn.moments', 'tf.nn.moments', (['outputs', 'reduction_axes'], {}), '(outputs, reduction_axes)\n', (7820, 7845), True, 'import tensorflow as tf\n'), ((7934, 7993), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(reshaped_mix_w * self.gamma_template)'], {'axis': '(0)'}), '(reshaped_mix_w * self.gamma_template, axis=0)\n', (7947, 7993), True, 'import tensorflow as tf\n'), ((8009, 8067), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(reshaped_mix_w * self.beta_template)'], {'axis': '(0)'}), '(reshaped_mix_w * self.beta_template, axis=0)\n', (8022, 8067), True, 'import tensorflow as tf\n'), ((1514, 1539), 'numpy.random.rand', 'np.random.rand', (['*in_shape'], {}), '(*in_shape)\n', (1528, 1539), True, 'import numpy as np\n'), ((4552, 4577), 'numpy.random.rand', 'np.random.rand', (['*in_shape'], {}), '(*in_shape)\n', (4566, 4577), True, 'import numpy as np\n'), ((8144, 8161), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['var'], {}), '(var)\n', (8156, 8161), True, 'import tensorflow as tf\n'), ((3344, 3462), 'numpy.random.rand', 'np.random.rand', (['self.num_templates', 'self.kernel_size', 'self.kernel_size', 'self.input_channel', 'self.depth_multiplier'], {}), '(self.num_templates, self.kernel_size, self.kernel_size, self\n .input_channel, self.depth_multiplier)\n', (3358, 3462), True, 'import numpy as np\n'), ((3559, 3637), 'numpy.random.rand', 'np.random.rand', (['self.num_templates', '(self.input_channel * self.depth_multiplier)'], {}), '(self.num_templates, self.input_channel * self.depth_multiplier)\n', (3573, 3637), True, 'import numpy as np\n'), ((6297, 6352), 'numpy.random.rand', 'np.random.rand', (['self.num_templates', 'self.input_channels'], {}), '(self.num_templates, self.input_channels)\n', (6311, 6352), True, 'import numpy as np\n'), ((6406, 6461), 'numpy.random.rand', 'np.random.rand', (['self.num_templates', 'self.input_channels'], {}), '(self.num_templates, self.input_channels)\n', (6420, 6461), True, 'import numpy as np\n')]
|
"""
http://yutori-datascience.hatenablog.com/entry/2014/12/10/123157
"""
from numba import cuda
import numpy as np
from numba import double
from numba.decorators import jit
from numba import guvectorize
import time
import math
@jit
def pairwise_numba(X,D):
M,N=X.shape[0],X.shape[1]
for i in range(M):
for j in range(M):
d=0.0
for k in range(N):
tmp=X[i,k]-X[j,k]
d+=tmp *tmp
D[i,j]=np.sqrt(d)
@jit('void(f8[:,:],f8[:,:])')
def pairwise_numba_with_type(X,D):
M,N=X.shape[0],X.shape[1]
for i in range(M):
for j in range(M):
d=0.0
for k in range(N):
tmp=X[i,k]-X[j,k]
d+=tmp *tmp
D[i,j]=np.sqrt(d)
@guvectorize(['void(f8[:, :], f8[:, :])'], '(x, y)->(x, x)')
def pairwise_vectorize(X, D):
M = X.shape[0]
N = X.shape[1]
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = np.sqrt(d)
def pairwise_python(X,D):
M,N=X.shape[0],X.shape[1]
for i in range(M):
for j in range(M):
d=0.0
for k in range(N):
tmp=X[i,k]-X[j,k]
d+=tmp *tmp
D[i,j]=np.sqrt(d)
@cuda.jit('void(f8[:, :], f8[:, :])')
def pairwise_numba_cuda1(X, D):
M = X.shape[0]
N = X.shape[1]
i, j = cuda.grid(2)
if i < M and j < M:
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = math.sqrt(d)
def measure_time(func,X,D):
start=time.time()
func(X,D)
end=time.time()
print("elapsed time",end-start)
def main():
griddim = (100, 100)
blockdim =(16, 16)
SIZE=5000
X=np.random.random((SIZE,3))
D=np.empty((SIZE,SIZE))
measure_time(pairwise_python,X,D)
measure_time(pairwise_numba,X,D)
measure_time(pairwise_numba_with_type,X,D)
measure_time(pairwise_vectorize,X, D)
start=time.time()
pairwise_numba_cuda1[griddim, blockdim](X, D)
end=time.time()
print("elapsed gpu=",end-start)
if __name__ == '__main__':
main()
|
[
"numpy.sqrt",
"numpy.random.random",
"numba.decorators.jit",
"numba.cuda.grid",
"math.sqrt",
"numba.cuda.jit",
"numba.guvectorize",
"numpy.empty",
"time.time"
] |
[((422, 450), 'numba.decorators.jit', 'jit', (['"""void(f8[:,:],f8[:,:])"""'], {}), "('void(f8[:,:],f8[:,:])')\n", (425, 450), False, 'from numba.decorators import jit\n'), ((647, 706), 'numba.guvectorize', 'guvectorize', (["['void(f8[:, :], f8[:, :])']", '"""(x, y)->(x, x)"""'], {}), "(['void(f8[:, :], f8[:, :])'], '(x, y)->(x, x)')\n", (658, 706), False, 'from numba import guvectorize\n'), ((1167, 1203), 'numba.cuda.jit', 'cuda.jit', (['"""void(f8[:, :], f8[:, :])"""'], {}), "('void(f8[:, :], f8[:, :])')\n", (1175, 1203), False, 'from numba import cuda\n'), ((1290, 1302), 'numba.cuda.grid', 'cuda.grid', (['(2)'], {}), '(2)\n', (1299, 1302), False, 'from numba import cuda\n'), ((1518, 1529), 'time.time', 'time.time', ([], {}), '()\n', (1527, 1529), False, 'import time\n'), ((1546, 1557), 'time.time', 'time.time', ([], {}), '()\n', (1555, 1557), False, 'import time\n'), ((1660, 1687), 'numpy.random.random', 'np.random.random', (['(SIZE, 3)'], {}), '((SIZE, 3))\n', (1676, 1687), True, 'import numpy as np\n'), ((1690, 1712), 'numpy.empty', 'np.empty', (['(SIZE, SIZE)'], {}), '((SIZE, SIZE))\n', (1698, 1712), True, 'import numpy as np\n'), ((1873, 1884), 'time.time', 'time.time', ([], {}), '()\n', (1882, 1884), False, 'import time\n'), ((1937, 1948), 'time.time', 'time.time', ([], {}), '()\n', (1946, 1948), False, 'import time\n'), ((1469, 1481), 'math.sqrt', 'math.sqrt', (['d'], {}), '(d)\n', (1478, 1481), False, 'import math\n'), ((409, 419), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (416, 419), True, 'import numpy as np\n'), ((634, 644), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (641, 644), True, 'import numpy as np\n'), ((969, 979), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (976, 979), True, 'import numpy as np\n'), ((1154, 1164), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (1161, 1164), True, 'import numpy as np\n')]
|
# Copyright 2020 (c) Aalto University - All Rights Reserved
# ELEC-E8125 - Reinforcement Learning Course
# AALTO UNIVERSITY
#
#############################################################
import numpy as np
from time import sleep
from sailing import SailingGridworld
epsilon = 10e-4 # TODO: Use this criteria for Task 3
# Set up the environment
env = SailingGridworld(rock_penalty=-2)
value_est = np.zeros((env.w, env.h))
env.draw_values(value_est)
if __name__ == "__main__":
# Reset the environment
state = env.reset()
# Compute state values and the policy
# TODO: Compute the value function and policy (Tasks 1, 2 and 3)
value_est, policy = np.zeros((env.w, env.h)), np.zeros((env.w, env.h)) # 15x10 grid
gamma = 0.9 # discount factor value
number_iterations = 100
actions = [0,1,2,3]
value_est_history = []
policy_history = []
for i in range(number_iterations):
print("iteration step: ", i)
env.clear_text() # remove previously drawn values
value_est_copy = value_est.copy() # copy to make sure that values are only taken from current state. In next iteration the upadted value_est will be retrieved
policy_copy = policy.copy()
value_est_history.append(value_est_copy)
policy_history.append(policy_copy)
for x in range(env.w): # loop through all states/cell in the environment
for y in range(env.h):
state_values_of_actions = [] # keep track of calculated state values of each action
# calculate new state value for every action and add to list above
#for transitions in env.transitions[x,y]: # the four different actions are 1) .LEFT 2) .DOWN 3) .RIGHT 4) .UP - each transition is a list with three tuples (state, reward, done, probability)
for action in actions:
transitions = env.transitions[x,y,action]
state_value_of_action = 0
for transition in transitions:
state_next = transition[0]
reward = transition[1]
done = transition[2]
probability = transition[3]
if (state_next == None):
state_value_of_action += 0
else:
state_value_of_action += probability * (reward + gamma * value_est_copy[state_next])
state_values_of_actions.append(state_value_of_action)
# update value_est and policy
value_est[x][y] = np.max(state_values_of_actions)
policy[x][y] = np.argmax(state_values_of_actions)
max_diff_val = np.max(abs(abs(value_est) - abs(value_est_copy)))
if (max_diff_val < epsilon):
print("Converged! Value state converged in iteration: ", i+1)
break
max_diff_policy = np.max(abs(policy_copy - policy))
if (max_diff_policy < epsilon):
print("Converged! Policy converged in iteration: ", i+1)
#env.draw_values(value_est) # draw the new calculated values after every iteration
#env.draw_actions(policy)
#env.render()
# Just for my understanding how the data is stored/provided
#print(env.transitions[3,3][0]) # gives us one of the four actions
#print(env.transitions[3,3][0][0]) # gives us one of the three tuples
#print(env.transitions[3, 3][0][0].state) # access entries of tuple
#print(env.transitions[3, 3][0][0][3])
#print(env.transitions[6,3])
# Show the values and the policy
env.draw_values(value_est)
env.draw_actions(policy)
env.render()
sleep(1)
# Save the state values and the policy
fnames = "values.npy", "policy.npy"
np.save(fnames[0], value_est)
np.save(fnames[1], policy)
print("Saved state values and policy to", *fnames)
# Run a single episode
# TODO: Run multiple episodes and compute the discounted returns (Task 4)
number_episodes = 1000
discounted_return_history = []
for i in range(number_episodes):
done = False
counter_discount = 0
discounted_return = 0
while not done:
# Select a random action
# TODO: Use the policy to take the optimal action (Task 2)
action = policy[state]
# Step the environment
state, reward, done, _ = env.step(action)
# calculate discounted reward
discounted_return += reward * gamma**counter_discount
counter_discount += 1
# Render and sleep
#env.render()
#sleep(0.5)
discounted_return_history.append(discounted_return)
state = env.reset()
print("discounted return (initial state) - mean: ", np.mean(discounted_return_history))
print("discounted return (initial state) - std: ", np.std(discounted_return_history))
|
[
"numpy.mean",
"numpy.argmax",
"time.sleep",
"sailing.SailingGridworld",
"numpy.max",
"numpy.zeros",
"numpy.std",
"numpy.save"
] |
[((356, 389), 'sailing.SailingGridworld', 'SailingGridworld', ([], {'rock_penalty': '(-2)'}), '(rock_penalty=-2)\n', (372, 389), False, 'from sailing import SailingGridworld\n'), ((402, 426), 'numpy.zeros', 'np.zeros', (['(env.w, env.h)'], {}), '((env.w, env.h))\n', (410, 426), True, 'import numpy as np\n'), ((3714, 3722), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (3719, 3722), False, 'from time import sleep\n'), ((3811, 3840), 'numpy.save', 'np.save', (['fnames[0]', 'value_est'], {}), '(fnames[0], value_est)\n', (3818, 3840), True, 'import numpy as np\n'), ((3845, 3871), 'numpy.save', 'np.save', (['fnames[1]', 'policy'], {}), '(fnames[1], policy)\n', (3852, 3871), True, 'import numpy as np\n'), ((671, 695), 'numpy.zeros', 'np.zeros', (['(env.w, env.h)'], {}), '((env.w, env.h))\n', (679, 695), True, 'import numpy as np\n'), ((697, 721), 'numpy.zeros', 'np.zeros', (['(env.w, env.h)'], {}), '((env.w, env.h))\n', (705, 721), True, 'import numpy as np\n'), ((4844, 4878), 'numpy.mean', 'np.mean', (['discounted_return_history'], {}), '(discounted_return_history)\n', (4851, 4878), True, 'import numpy as np\n'), ((4935, 4968), 'numpy.std', 'np.std', (['discounted_return_history'], {}), '(discounted_return_history)\n', (4941, 4968), True, 'import numpy as np\n'), ((2614, 2645), 'numpy.max', 'np.max', (['state_values_of_actions'], {}), '(state_values_of_actions)\n', (2620, 2645), True, 'import numpy as np\n'), ((2677, 2711), 'numpy.argmax', 'np.argmax', (['state_values_of_actions'], {}), '(state_values_of_actions)\n', (2686, 2711), True, 'import numpy as np\n')]
|
""" Includes two functions which use shortest path policies
1) run_sss_curriculum - trains a PyMARL agent using experiences gathered
while following an epsilon greedy shortest path policy.
2) mean_sss_time - returns the mean time taken to complete a map while following
an epsilon greedy shortest path policy.
"""
import datetime
import os
from os.path import dirname, abspath
import time
#from sympy import EX
import yaml
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger, log_mac_weights
import numpy as np
import random
from logging import getLogger, INFO
from rapport_topological.navigation import construct_shortest_path_policy
from rapport_models.markov.state import State
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
from src.components.episode_buffer import EpisodeBatch
from runners import AsyncEpisodeRunner
from main import recursive_dict_update
from run import args_sanity_check
from torch.utils.tensorboard import SummaryWriter
def load_configs():
""" Load configuration dictionaries from default locations
"""
# Get the defaults from default.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
try:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
# Get qmix params from qmix.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "algs", "qmix.yaml"), "r") as f:
try:
alg_dict = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
# Get camas params from camas.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "envs", "camas.yaml"), "r") as f:
try:
env_dict = yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
config_dict = recursive_dict_update(config_dict, alg_dict)
config_dict = recursive_dict_update(config_dict, env_dict)
return config_dict
class SSS_Runner(AsyncEpisodeRunner):
""" PyMARL Episode Runner for gathering shortest path based experience episodes
"""
debug = False
def __init__(self, args, logger, epsilon_mean=0.15, epsilon_var=0.1):
super().__init__(args, logger)
self.epsilon_mean = epsilon_mean
self.epsilon_var = epsilon_var
self.epsilon = self._draw_epsilon()
self.env.reset()
self.policies = {agent: construct_shortest_path_policy(self.env._tm, self.env._goal_states[agent])
for agent in self.env.agents}
def run(self) -> EpisodeBatch:
""" Returns an transistions for one episode for an agent acting in an
epsilon greedy fashion while following its shortest path.
"""
if self.debug: print('*** reset environment ***')
self.reset()
self.epsilon = self._draw_epsilon()
terminated = False
episode_return = 0
#self.mac.init_hidden(batch_size=self.batch_size) # NOTE not sure what this is
obs, reward, done, info = self.env.last()
k = 0
while not terminated:
k += 1
pre_transition_data = self.env.get_pretran_data()
if self.debug:
print(f'-- step {k} \nState: {self.env.state()}, Agent: {self.env.agent_selection}, Time: {self.env.sim_time()}')
print(f"Pre transition data: {pre_transition_data}")
self.batch.update(pre_transition_data, ts=self.t)
#actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=False)
#print(f'actions {actions}, type {type(actions)}, size {actions.size()}')
#print('my selector: ', self.select_actions())
actions = self.select_actions()
action = actions[0][self.env.agent_idx()].item()
if action == 4:
self.env.step(None) # terminated action to update env correctly
else:
self.env.step(action)
obs, reward, done, env_info = self.env.last()
if done:
if self.debug: print(f'{self.env.agent_selection} done!')
if len(self.env.agents) == 1: terminated = True
if self.debug: print(f'Actions: {actions}\nReward {reward}, Time {self.env.sim_time()}')
episode_return += reward
post_transition_data = {
"actions": actions,
"reward": [(reward,)],
"terminated": [[(terminated),]], # NOTE used to be: [(terminated != env_info.get("episode_limit", False),)] # env info here is info from step()
}
self.batch.update(post_transition_data, ts=self.t)
self.t += 1
if self.t == self.episode_limit:
terminated = True
pre_transition_data = self.env.get_pretran_data()
self.batch.update(pre_transition_data, ts=self.t)
actions = self.select_actions()
self.batch.update({"actions": actions}, ts=self.t)
self.t_env += self.t
return self.batch
def select_actions(self) -> th.Tensor:
""" Choose the action to stay on the shorest path or a random action
depending on epsilon test.
"""
acts = th.ones(1, self.args.n_agents, dtype=int)*4
# choose action for agent acting
agent = self.env.agent_selection
agent_loc = self.env._agent_location[agent]
agent_idx = self.env.agent_name_mapping[self.env.agent_selection]
if self.debug: print(f'choosing action for {agent}, loc: {agent_loc}, idx: {agent_idx}')
if random.uniform(0, 1) > self.epsilon: # exploit
camas_act = self.policies[agent]._state_action_map[State({'loc': agent_loc})]
if self.debug: print(f'exploiting, camas act {camas_act}')
if camas_act is None:
action = 4
else:
action = self.env.to_gym_action(agent_loc, camas_act)
else: # explore
avail_actions = self.batch["avail_actions"][:, self.t]
action = random.choice([i for i, x in enumerate(avail_actions[0, agent_idx]) if x==1])
if self.debug: print(f'random, action {action}, avail agent acts {avail_actions[0, agent_idx]}')
acts[0, agent_idx] = action
if self.debug: print(f'acts {acts}')
return acts
def _draw_epsilon(self):
epsilon = np.random.normal(self.epsilon_mean, self.epsilon_var)
if epsilon < 0: epsilon = 0
return epsilon
def episode_makespan(self):
return self.env.sim_time()
def run_sss_curriculum(args,
logger,
num_episodes,
cycle_after,
max_train_steps,
test_makespan_cutoff,
test_episodes=20,
epsilon_mean=0.25,
epsilon_var=0.15,
log_freq=10000,
agent_weight_log_freq=20000):
"""Trains a PyMARL method using shortest path experiences and saves the result
to the results/model directory
Args:
num_episodes (int): number of experience episodes to gather
max_train_steps (int): number of steps to train the model for
test_episodes (int): number of episodes to evaluate the model on once training is complete
"""
def _gather_data(_num_episodes, _buffer, _sss_runner, _logger, _iteration=0):
_start_time = time.time()
_logger.console_logger.info(f'...gathering {_num_episodes} of data, iteration: {_iteration}...')
ep_rewards = np.zeros(_num_episodes)
ep_epsilons = np.zeros(_num_episodes)
ep_times = np.zeros(_num_episodes)
ep_step_count = np.zeros(_num_episodes)
for k in range(_num_episodes):
episode_batch = _sss_runner.run()
_buffer.insert_episode_batch(episode_batch)
ep_rewards[k] = th.sum(episode_batch["reward"])
ep_epsilons[k] = _sss_runner.epsilon
ep_times[k] = _sss_runner.episode_makespan()
ep_step_count[k] = _sss_runner.t
if k % log_freq == 0:
_logger.console_logger.info(f'...{k} episodes complete, mean time {np.mean(ep_times)} ({np.std(ep_times)}), mean step count {np.mean(ep_step_count)} ({np.std(ep_step_count)})...')
_logger.console_logger.info(f'...mean rewards {np.mean(ep_rewards)} ({np.std(ep_rewards)}), mean epsilon {np.mean(ep_epsilons)} ({np.std(ep_epsilons)})')
save_curriculum_data([ep_rewards, ep_epsilons, ep_times, ep_step_count], _iteration)
data_gathering_time = time.time() - _start_time
_logger.console_logger.info(f'...time to gather {_num_episodes} episodes: {datetime.timedelta(seconds=data_gathering_time)}, mean time {np.mean(ep_times)} ({np.std(ep_times)}), mean step count {np.mean(ep_step_count)} ({np.std(ep_step_count)})...')
_logger.console_logger.info(f'...mean rewards {np.mean(ep_rewards)} ({np.std(ep_rewards)}), mean epsilon {np.mean(ep_epsilons)} ({np.std(ep_epsilons)})')
def _test_env(_runner, _test_episdoes):
""" Test environment using `_runner`
Returns:
tt: test sim times
sc: test step counts
gc: test reached goal %'s
"""
tt, sc, gc = [], [], []
for _ in range(_test_episdoes):
_runner.run(test_mode=True)
tt.append(_runner.env.sim_time())
sc.append(_runner.env.step_count())
gc.append(_runner.env.agents_at_goal())
return tt, sc, gc
def _save_model(_args, _logger, _learner, label):
_logger.console_logger.info('...saving model...')
_save_path = os.path.join("curriculum", _args.unique_token, str(label))
os.makedirs(_save_path, exist_ok=True)
_logger.console_logger.info("Saving models to {}".format(_save_path))
_learner.save_models(_save_path)
print(' -- Env args', args.env_args)
start_time = time.time()
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "curriculum_tb")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(args.unique_token)
logger.setup_tb(tb_exp_direc)
args.log_interval = log_freq
args.learner_log_interval = log_freq
main_runner = r_REGISTRY[args.runner](args=args, logger=logger)
sss_runner = SSS_Runner(args, logger, epsilon_mean=epsilon_mean, epsilon_var=epsilon_var)
# Set up schemes and groups
env_info = sss_runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runners the scheme
main_runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
sss_runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
## --- Save config ---
config_save_path = os.path.join("curriculum", args.unique_token)
os.makedirs(config_save_path, exist_ok=True)
with open(os.path.join(config_save_path, "config.yaml"), 'w') as outp: # NOTE this has not been tested
yaml.dump(args, outp)
## --- Gather Data ---
_gather_data(num_episodes, buffer, sss_runner, logger)
## --- Train Network ---
logger.console_logger.info(f'...training network...')
for i in range(max_train_steps):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, i, i)
if (i % cycle_after == 0) and (i > 0): # Gather new data with freq `cycle_after`
_gather_data(num_episodes, buffer, sss_runner, logger, i/cycle_after)
if i % log_freq == 0:
tt, sc, gc = _test_env(main_runner, test_episodes)
logger.log_stat("Test_mean_sim_time", np.mean(tt), i)
logger.log_stat("Test_mean_step_count", np.mean(sc), i)
logger.log_stat("Test_mean_goal_found", np.mean(gc), i)
logger.console_logger.info(f'...logging at step {i}, mean sim time {np.mean(tt)}...')
if np.mean(tt) < test_makespan_cutoff:
tt, _, _ = _test_env(main_runner, test_episodes)
if np.mean(tt) < test_makespan_cutoff:
logger.console_logger.info(f'Training passed evaluation at step {i}. Mean makespan: {np.mean(tt)}, cutoff: {test_makespan_cutoff}')
break
if i % agent_weight_log_freq == 0:
log_mac_weights(logger, mac, i)
_save_model(args, logger, learner, i)
_gather_data(num_episodes, buffer, sss_runner, logger, 1000)
# -- Train for 20e3 more steps --
for i in range(20000):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, i, i)
if i % log_freq == 0:
tt, sc, gc = _test_env(main_runner, test_episodes)
logger.log_stat("Test_mean_sim_time", np.mean(tt), i)
logger.log_stat("Test_mean_step_count", np.mean(sc), i)
logger.log_stat("Test_mean_goal_found", np.mean(gc), i)
logger.console_logger.info(f'...logging at step {i}, mean sim time {np.mean(tt)}...')
if i % agent_weight_log_freq == 0:
log_mac_weights(logger, mac, i)
tdelta = time.time()-start_time
logger.console_logger.info(f'...time taken for training: {datetime.timedelta(seconds=tdelta)}...')
## --- Evaluate final agent ---
logger.console_logger.info(f'...evaluating final agent...')
tt, sc, gc = _test_env(main_runner, test_episodes)
logger.log_stat("Test_mean_sim_time", np.mean(tt), i)
logger.log_stat("Test_mean_step_count", np.mean(sc), i)
logger.log_stat("Test_mean_goal_found", np.mean(gc), i)
logger.console_logger.info(f'-- evaluation av test time: {np.mean(tt)} ({np.var(tt)}), av step count {np.mean(sc)} ({np.var(sc)}), percentage at goal {np.mean(gc)} ({np.var(gc)}), {len(sc)} episodes')
_save_model(args, logger, learner, i+1)
def save_curriculum_data(array_to_save, iteration=0):
save_path = os.path.join(args.local_results_path, "curriculum", "ep_data", args.unique_token)
os.makedirs(save_path, exist_ok=True)
np.save('{}/ep_data_{}.npy'.format(save_path, iteration), array_to_save, allow_pickle=True)
def mean_sss_time(args, logger, num_episodes, epsilon_mean):
"""Runs a PyMARL-Camas map using an epsilon greedy shortest path policy
Args:
num_episodes (int): number of episodes to run for
epislon (int): epsilon to use in action selection
"""
print(' -- Env args', args.env_args)
start_time = time.time()
sss_runner = SSS_Runner(args, logger, epsilon_mean=epsilon_mean, epsilon_var=0.0)
# Set up schemes and groups
env_info = sss_runner.get_env_info()
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.state_shape = env_info["state_shape"]
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runners the scheme
#main_runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
sss_runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
logger.console_logger.info(f'...running {num_episodes} episodes...')
episode_times = []
step_count = []
rewards = []
for i in range(num_episodes):
batch = sss_runner.run()
episode_times.append(sss_runner.env.sim_time())
step_count.append(sss_runner.t)
rewards.append(th.sum(batch["reward"]))
if i % 50 == 0:
logger.console_logger.info(f'...{i} episodes complete...')
print(f'Mean sim time for {num_episodes} on {args.env_args["map_name"]} and an epsilon of {epsilon_mean}: {np.mean(episode_times)} ({np.var(episode_times)})')
print(f'mean step count {np.mean(step_count)} ({np.var(step_count)}), mean reward: {np.mean(rewards)} ({np.var(rewards)})')
return np.mean(episode_times), np.mean(step_count)
def load_default_params(map_name="bruno"):
pass
#TODO
if __name__ == "__main__":
## *** Curriculum specific variables ***
num_episodes = int(5e3)
cycle_after = int(2.5e4)
train_steps_max = int(5e5)
test_episodes = 20
test_makespan_cutoff = 60
console_logger = getLogger()
logger = Logger(console_logger)
config_dict = load_configs() # NOTE should sanity check
args = SN(**config_dict) # gives attribute access to namespace
if args.use_cuda:
args.use_cuda = th.cuda.is_available() # Check that cuda is valid
args.device = "cuda" if args.use_cuda else "cpu"
if args.use_cuda: logger.console_logger.info('... Using CUDA...')
args.unique_token = "cur<PASSWORD>{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
#args.batch_size = 64
logger.console_logger.setLevel(INFO)
if args.prioritised_replay:
logger.console_logger.warning('Turning PER off')
args.prioritised_replay = False # not implemented
#mean_sss_time(args, logger, 200, 0.0)
if num_episodes > args.buffer_size:
args.buffer_size = num_episodes
print(f'Buffer size now {args.buffer_size}')
run_sss_curriculum(args, logger, num_episodes, cycle_after, train_steps_max, test_makespan_cutoff,
test_episodes=test_episodes, log_freq=int(2e4), agent_weight_log_freq=int(4e4))
|
[
"logging.getLogger",
"yaml.load",
"main.recursive_dict_update",
"rapport_topological.navigation.construct_shortest_path_policy",
"torch.cuda.is_available",
"torch.sum",
"components.transforms.OneHot",
"datetime.timedelta",
"numpy.mean",
"components.episode_buffer.ReplayBuffer",
"numpy.random.normal",
"random.uniform",
"types.SimpleNamespace",
"yaml.dump",
"utils.logging.log_mac_weights",
"os.path.dirname",
"numpy.std",
"time.time",
"rapport_models.markov.state.State",
"os.makedirs",
"os.path.join",
"utils.logging.Logger",
"datetime.datetime.now",
"numpy.zeros",
"os.path.abspath",
"numpy.var",
"torch.ones"
] |
[((2258, 2302), 'main.recursive_dict_update', 'recursive_dict_update', (['config_dict', 'alg_dict'], {}), '(config_dict, alg_dict)\n', (2279, 2302), False, 'from main import recursive_dict_update\n'), ((2321, 2365), 'main.recursive_dict_update', 'recursive_dict_update', (['config_dict', 'env_dict'], {}), '(config_dict, env_dict)\n', (2342, 2365), False, 'from main import recursive_dict_update\n'), ((10781, 10792), 'time.time', 'time.time', ([], {}), '()\n', (10790, 10792), False, 'import time\n'), ((12049, 12213), 'components.episode_buffer.ReplayBuffer', 'ReplayBuffer', (['scheme', 'groups', 'args.buffer_size', "(env_info['episode_limit'] + 1)"], {'preprocess': 'preprocess', 'device': "('cpu' if args.buffer_cpu_only else args.device)"}), "(scheme, groups, args.buffer_size, env_info['episode_limit'] + \n 1, preprocess=preprocess, device='cpu' if args.buffer_cpu_only else\n args.device)\n", (12061, 12213), False, 'from components.episode_buffer import ReplayBuffer\n'), ((12738, 12783), 'os.path.join', 'os.path.join', (['"""curriculum"""', 'args.unique_token'], {}), "('curriculum', args.unique_token)\n", (12750, 12783), False, 'import os\n'), ((12788, 12832), 'os.makedirs', 'os.makedirs', (['config_save_path'], {'exist_ok': '(True)'}), '(config_save_path, exist_ok=True)\n', (12799, 12832), False, 'import os\n'), ((16418, 16504), 'os.path.join', 'os.path.join', (['args.local_results_path', '"""curriculum"""', '"""ep_data"""', 'args.unique_token'], {}), "(args.local_results_path, 'curriculum', 'ep_data', args.\n unique_token)\n", (16430, 16504), False, 'import os\n'), ((16504, 16541), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (16515, 16541), False, 'import os\n'), ((16979, 16990), 'time.time', 'time.time', ([], {}), '()\n', (16988, 16990), False, 'import time\n'), ((17872, 18036), 'components.episode_buffer.ReplayBuffer', 'ReplayBuffer', (['scheme', 'groups', 'args.buffer_size', "(env_info['episode_limit'] + 1)"], {'preprocess': 'preprocess', 'device': "('cpu' if args.buffer_cpu_only else args.device)"}), "(scheme, groups, args.buffer_size, env_info['episode_limit'] + \n 1, preprocess=preprocess, device='cpu' if args.buffer_cpu_only else\n args.device)\n", (17884, 18036), False, 'from components.episode_buffer import ReplayBuffer\n'), ((19628, 19639), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (19637, 19639), False, 'from logging import getLogger, INFO\n'), ((19653, 19675), 'utils.logging.Logger', 'Logger', (['console_logger'], {}), '(console_logger)\n', (19659, 19675), False, 'from utils.logging import Logger, log_mac_weights\n'), ((19753, 19770), 'types.SimpleNamespace', 'SN', ([], {}), '(**config_dict)\n', (19755, 19770), True, 'from types import SimpleNamespace as SN\n'), ((7107, 7160), 'numpy.random.normal', 'np.random.normal', (['self.epsilon_mean', 'self.epsilon_var'], {}), '(self.epsilon_mean, self.epsilon_var)\n', (7123, 7160), True, 'import numpy as np\n'), ((8212, 8223), 'time.time', 'time.time', ([], {}), '()\n', (8221, 8223), False, 'import time\n'), ((8350, 8373), 'numpy.zeros', 'np.zeros', (['_num_episodes'], {}), '(_num_episodes)\n', (8358, 8373), True, 'import numpy as np\n'), ((8396, 8419), 'numpy.zeros', 'np.zeros', (['_num_episodes'], {}), '(_num_episodes)\n', (8404, 8419), True, 'import numpy as np\n'), ((8439, 8462), 'numpy.zeros', 'np.zeros', (['_num_episodes'], {}), '(_num_episodes)\n', (8447, 8462), True, 'import numpy as np\n'), ((8487, 8510), 'numpy.zeros', 'np.zeros', (['_num_episodes'], {}), '(_num_episodes)\n', (8495, 8510), True, 'import numpy as np\n'), ((10559, 10597), 'os.makedirs', 'os.makedirs', (['_save_path'], {'exist_ok': '(True)'}), '(_save_path, exist_ok=True)\n', (10570, 10597), False, 'import os\n'), ((12949, 12970), 'yaml.dump', 'yaml.dump', (['args', 'outp'], {}), '(args, outp)\n', (12958, 12970), False, 'import yaml\n'), ((15620, 15631), 'time.time', 'time.time', ([], {}), '()\n', (15629, 15631), False, 'import time\n'), ((15948, 15959), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (15955, 15959), True, 'import numpy as np\n'), ((16008, 16019), 'numpy.mean', 'np.mean', (['sc'], {}), '(sc)\n', (16015, 16019), True, 'import numpy as np\n'), ((16068, 16079), 'numpy.mean', 'np.mean', (['gc'], {}), '(gc)\n', (16075, 16079), True, 'import numpy as np\n'), ((19270, 19292), 'numpy.mean', 'np.mean', (['episode_times'], {}), '(episode_times)\n', (19277, 19292), True, 'import numpy as np\n'), ((19294, 19313), 'numpy.mean', 'np.mean', (['step_count'], {}), '(step_count)\n', (19301, 19313), True, 'import numpy as np\n'), ((19856, 19878), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (19876, 19878), True, 'import torch as th\n'), ((1444, 1480), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1453, 1480), False, 'import yaml\n'), ((1765, 1801), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1774, 1801), False, 'import yaml\n'), ((2089, 2125), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (2098, 2125), False, 'import yaml\n'), ((2860, 2934), 'rapport_topological.navigation.construct_shortest_path_policy', 'construct_shortest_path_policy', (['self.env._tm', 'self.env._goal_states[agent]'], {}), '(self.env._tm, self.env._goal_states[agent])\n', (2890, 2934), False, 'from rapport_topological.navigation import construct_shortest_path_policy\n'), ((5877, 5918), 'torch.ones', 'th.ones', (['(1)', 'self.args.n_agents'], {'dtype': 'int'}), '(1, self.args.n_agents, dtype=int)\n', (5884, 5918), True, 'import torch as th\n'), ((6256, 6276), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (6270, 6276), False, 'import random\n'), ((8693, 8724), 'torch.sum', 'th.sum', (["episode_batch['reward']"], {}), "(episode_batch['reward'])\n", (8699, 8724), True, 'import torch as th\n'), ((9399, 9410), 'time.time', 'time.time', ([], {}), '()\n', (9408, 9410), False, 'import time\n'), ((10915, 10948), 'os.path.join', 'os.path.join', (['tb_logs_direc', '"""{}"""'], {}), "(tb_logs_direc, '{}')\n", (10927, 10948), False, 'import os\n'), ((12847, 12892), 'os.path.join', 'os.path.join', (['config_save_path', '"""config.yaml"""'], {}), "(config_save_path, 'config.yaml')\n", (12859, 12892), False, 'import os\n'), ((14541, 14572), 'utils.logging.log_mac_weights', 'log_mac_weights', (['logger', 'mac', 'i'], {}), '(logger, mac, i)\n', (14556, 14572), False, 'from utils.logging import Logger, log_mac_weights\n'), ((15570, 15601), 'utils.logging.log_mac_weights', 'log_mac_weights', (['logger', 'mac', 'i'], {}), '(logger, mac, i)\n', (15585, 15601), False, 'from utils.logging import Logger, log_mac_weights\n'), ((18835, 18858), 'torch.sum', 'th.sum', (["batch['reward']"], {}), "(batch['reward'])\n", (18841, 18858), True, 'import torch as th\n'), ((1340, 1365), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1355, 1365), False, 'import os\n'), ((1659, 1684), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1674, 1684), False, 'import os\n'), ((1982, 2007), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1997, 2007), False, 'import os\n'), ((6367, 6392), 'rapport_models.markov.state.State', 'State', (["{'loc': agent_loc}"], {}), "({'loc': agent_loc})\n", (6372, 6392), False, 'from rapport_models.markov.state import State\n'), ((10847, 10864), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (10854, 10864), False, 'from os.path import dirname, abspath\n'), ((11992, 12022), 'components.transforms.OneHot', 'OneHot', ([], {'out_dim': 'args.n_actions'}), '(out_dim=args.n_actions)\n', (11998, 12022), False, 'from components.transforms import OneHot\n'), ((13865, 13876), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (13872, 13876), True, 'import numpy as np\n'), ((13933, 13944), 'numpy.mean', 'np.mean', (['sc'], {}), '(sc)\n', (13940, 13944), True, 'import numpy as np\n'), ((14001, 14012), 'numpy.mean', 'np.mean', (['gc'], {}), '(gc)\n', (14008, 14012), True, 'import numpy as np\n'), ((14143, 14154), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (14150, 14154), True, 'import numpy as np\n'), ((15256, 15267), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (15263, 15267), True, 'import numpy as np\n'), ((15324, 15335), 'numpy.mean', 'np.mean', (['sc'], {}), '(sc)\n', (15331, 15335), True, 'import numpy as np\n'), ((15392, 15403), 'numpy.mean', 'np.mean', (['gc'], {}), '(gc)\n', (15399, 15403), True, 'import numpy as np\n'), ((15705, 15739), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'tdelta'}), '(seconds=tdelta)\n', (15723, 15739), False, 'import datetime\n'), ((16146, 16157), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (16153, 16157), True, 'import numpy as np\n'), ((16161, 16171), 'numpy.var', 'np.var', (['tt'], {}), '(tt)\n', (16167, 16171), True, 'import numpy as np\n'), ((16190, 16201), 'numpy.mean', 'np.mean', (['sc'], {}), '(sc)\n', (16197, 16201), True, 'import numpy as np\n'), ((16205, 16215), 'numpy.var', 'np.var', (['sc'], {}), '(sc)\n', (16211, 16215), True, 'import numpy as np\n'), ((16239, 16250), 'numpy.mean', 'np.mean', (['gc'], {}), '(gc)\n', (16246, 16250), True, 'import numpy as np\n'), ((16254, 16264), 'numpy.var', 'np.var', (['gc'], {}), '(gc)\n', (16260, 16264), True, 'import numpy as np\n'), ((17815, 17845), 'components.transforms.OneHot', 'OneHot', ([], {'out_dim': 'args.n_actions'}), '(out_dim=args.n_actions)\n', (17821, 17845), False, 'from components.transforms import OneHot\n'), ((19075, 19097), 'numpy.mean', 'np.mean', (['episode_times'], {}), '(episode_times)\n', (19082, 19097), True, 'import numpy as np\n'), ((19101, 19122), 'numpy.var', 'np.var', (['episode_times'], {}), '(episode_times)\n', (19107, 19122), True, 'import numpy as np\n'), ((19156, 19175), 'numpy.mean', 'np.mean', (['step_count'], {}), '(step_count)\n', (19163, 19175), True, 'import numpy as np\n'), ((19179, 19197), 'numpy.var', 'np.var', (['step_count'], {}), '(step_count)\n', (19185, 19197), True, 'import numpy as np\n'), ((19215, 19231), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (19222, 19231), True, 'import numpy as np\n'), ((19235, 19250), 'numpy.var', 'np.var', (['rewards'], {}), '(rewards)\n', (19241, 19250), True, 'import numpy as np\n'), ((20094, 20117), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (20115, 20117), False, 'import datetime\n'), ((9508, 9555), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'data_gathering_time'}), '(seconds=data_gathering_time)\n', (9526, 9555), False, 'import datetime\n'), ((9569, 9586), 'numpy.mean', 'np.mean', (['ep_times'], {}), '(ep_times)\n', (9576, 9586), True, 'import numpy as np\n'), ((9590, 9606), 'numpy.std', 'np.std', (['ep_times'], {}), '(ep_times)\n', (9596, 9606), True, 'import numpy as np\n'), ((9627, 9649), 'numpy.mean', 'np.mean', (['ep_step_count'], {}), '(ep_step_count)\n', (9634, 9649), True, 'import numpy as np\n'), ((9653, 9674), 'numpy.std', 'np.std', (['ep_step_count'], {}), '(ep_step_count)\n', (9659, 9674), True, 'import numpy as np\n'), ((9737, 9756), 'numpy.mean', 'np.mean', (['ep_rewards'], {}), '(ep_rewards)\n', (9744, 9756), True, 'import numpy as np\n'), ((9760, 9778), 'numpy.std', 'np.std', (['ep_rewards'], {}), '(ep_rewards)\n', (9766, 9778), True, 'import numpy as np\n'), ((9796, 9816), 'numpy.mean', 'np.mean', (['ep_epsilons'], {}), '(ep_epsilons)\n', (9803, 9816), True, 'import numpy as np\n'), ((9820, 9839), 'numpy.std', 'np.std', (['ep_epsilons'], {}), '(ep_epsilons)\n', (9826, 9839), True, 'import numpy as np\n'), ((14263, 14274), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (14270, 14274), True, 'import numpy as np\n'), ((14097, 14108), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (14104, 14108), True, 'import numpy as np\n'), ((15488, 15499), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (15495, 15499), True, 'import numpy as np\n'), ((8993, 9010), 'numpy.mean', 'np.mean', (['ep_times'], {}), '(ep_times)\n', (9000, 9010), True, 'import numpy as np\n'), ((9014, 9030), 'numpy.std', 'np.std', (['ep_times'], {}), '(ep_times)\n', (9020, 9030), True, 'import numpy as np\n'), ((9051, 9073), 'numpy.mean', 'np.mean', (['ep_step_count'], {}), '(ep_step_count)\n', (9058, 9073), True, 'import numpy as np\n'), ((9077, 9098), 'numpy.std', 'np.std', (['ep_step_count'], {}), '(ep_step_count)\n', (9083, 9098), True, 'import numpy as np\n'), ((9169, 9188), 'numpy.mean', 'np.mean', (['ep_rewards'], {}), '(ep_rewards)\n', (9176, 9188), True, 'import numpy as np\n'), ((9192, 9210), 'numpy.std', 'np.std', (['ep_rewards'], {}), '(ep_rewards)\n', (9198, 9210), True, 'import numpy as np\n'), ((9228, 9248), 'numpy.mean', 'np.mean', (['ep_epsilons'], {}), '(ep_epsilons)\n', (9235, 9248), True, 'import numpy as np\n'), ((9252, 9271), 'numpy.std', 'np.std', (['ep_epsilons'], {}), '(ep_epsilons)\n', (9258, 9271), True, 'import numpy as np\n'), ((14404, 14415), 'numpy.mean', 'np.mean', (['tt'], {}), '(tt)\n', (14411, 14415), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
def test_drawdown_and_returns_series():
index_range = pd.date_range(start=datetime(2000, 1, 1), periods=4, freq='AS-JAN')
wealth_index = pd.Series(data=[0.4, 0.3, 0.2, 0.5], index=index_range)
dd = wealth_index.drawdown
assert dd is not None
drawdown_df = dd.data
assert drawdown_df is not None
assert isinstance(drawdown_df, pd.Series)
assert drawdown_df.dtypes == 'float64'
assert drawdown_df.name == 'Drawdown'
np.testing.assert_almost_equal(drawdown_df['2000-01-01'], 0.0)
np.testing.assert_almost_equal(drawdown_df['2001-01-01'], -0.25)
np.testing.assert_almost_equal(drawdown_df['2002-01-01'], -0.5)
np.testing.assert_almost_equal(drawdown_df['2003-01-01'], 0.0)
def test_max_drawdown():
index_range = pd.date_range(start=datetime(2000, 1, 1), periods=4, freq='AS-JAN')
wealth_index = pd.Series(data=[0.4, 0.3, 0.2, 0.5], index=index_range)
assert wealth_index.drawdown.max_drawdown == -0.5
def test_durations():
index_range = pd.date_range(start=datetime(2000, 1, 1), periods=9, freq='AS-JAN')
wealth_index = pd.Series(data=[0.4, 0.3, 0.2, 0.5, 0.4, 0.4, 0.3, 0.3, 0.5], index=index_range)
durations = wealth_index.drawdown.durations
assert isinstance(durations, pd.Series)
assert durations.dtypes == 'timedelta64[ns]'
assert durations.name == 'Durations'
assert len(durations) == 2
assert durations['2003-01-01'] == timedelta(days=1096)
assert durations['2008-01-01'] == timedelta(days=1826)
|
[
"pandas.Series",
"numpy.testing.assert_almost_equal",
"datetime.timedelta",
"datetime.datetime"
] |
[((227, 282), 'pandas.Series', 'pd.Series', ([], {'data': '[0.4, 0.3, 0.2, 0.5]', 'index': 'index_range'}), '(data=[0.4, 0.3, 0.2, 0.5], index=index_range)\n', (236, 282), True, 'import pandas as pd\n'), ((536, 598), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["drawdown_df['2000-01-01']", '(0.0)'], {}), "(drawdown_df['2000-01-01'], 0.0)\n", (566, 598), True, 'import numpy as np\n'), ((603, 667), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["drawdown_df['2001-01-01']", '(-0.25)'], {}), "(drawdown_df['2001-01-01'], -0.25)\n", (633, 667), True, 'import numpy as np\n'), ((672, 735), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["drawdown_df['2002-01-01']", '(-0.5)'], {}), "(drawdown_df['2002-01-01'], -0.5)\n", (702, 735), True, 'import numpy as np\n'), ((740, 802), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["drawdown_df['2003-01-01']", '(0.0)'], {}), "(drawdown_df['2003-01-01'], 0.0)\n", (770, 802), True, 'import numpy as np\n'), ((935, 990), 'pandas.Series', 'pd.Series', ([], {'data': '[0.4, 0.3, 0.2, 0.5]', 'index': 'index_range'}), '(data=[0.4, 0.3, 0.2, 0.5], index=index_range)\n', (944, 990), True, 'import pandas as pd\n'), ((1174, 1259), 'pandas.Series', 'pd.Series', ([], {'data': '[0.4, 0.3, 0.2, 0.5, 0.4, 0.4, 0.3, 0.3, 0.5]', 'index': 'index_range'}), '(data=[0.4, 0.3, 0.2, 0.5, 0.4, 0.4, 0.3, 0.3, 0.5], index=index_range\n )\n', (1183, 1259), True, 'import pandas as pd\n'), ((1507, 1527), 'datetime.timedelta', 'timedelta', ([], {'days': '(1096)'}), '(days=1096)\n', (1516, 1527), False, 'from datetime import datetime, timedelta\n'), ((1566, 1586), 'datetime.timedelta', 'timedelta', ([], {'days': '(1826)'}), '(days=1826)\n', (1575, 1586), False, 'from datetime import datetime, timedelta\n'), ((160, 180), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (168, 180), False, 'from datetime import datetime, timedelta\n'), ((868, 888), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (876, 888), False, 'from datetime import datetime, timedelta\n'), ((1107, 1127), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (1115, 1127), False, 'from datetime import datetime, timedelta\n')]
|
import os, json, base64, cv2, glob
import numpy as np
import matplotlib.pyplot as plt
from coco import CocoConfig
from Mask.config import Config
import Mask.utils as utils
import Mask.model as modellib
import Mask.visualize as visualize
from convert_file import load_image
def init():
np.set_printoptions(threshold=np.inf)
def run(input_df):
data = json.loads(input_df)
im = load_image(image64=data)
config = CocoConfig()
model = modellib.MaskRCNN(mode="inference", model_dir="./models/", config=config)
model.load_weights(filepath="./models/mask_rcnn_moles_0090.h5", by_name=True)
class_names = ["BG", "malignant", "benign"]
# predict the mask, bounding box and class of the image
r = model.detect([im])[0]
prediction = None
for idx, val in enumerate(class_names):
if idx == r["class_ids"]:
prediction = val
print(val)
else:
continue
return prediction
|
[
"json.loads",
"Mask.model.MaskRCNN",
"convert_file.load_image",
"coco.CocoConfig",
"numpy.set_printoptions"
] |
[((290, 327), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (309, 327), True, 'import numpy as np\n'), ((359, 379), 'json.loads', 'json.loads', (['input_df'], {}), '(input_df)\n', (369, 379), False, 'import os, json, base64, cv2, glob\n'), ((389, 413), 'convert_file.load_image', 'load_image', ([], {'image64': 'data'}), '(image64=data)\n', (399, 413), False, 'from convert_file import load_image\n'), ((428, 440), 'coco.CocoConfig', 'CocoConfig', ([], {}), '()\n', (438, 440), False, 'from coco import CocoConfig\n'), ((454, 527), 'Mask.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'model_dir': '"""./models/"""', 'config': 'config'}), "(mode='inference', model_dir='./models/', config=config)\n", (471, 527), True, 'import Mask.model as modellib\n')]
|
import streamlit as st
import pandas as pd
import altair as alt
import pickle
import numpy as np
from map import create_map
from airdata import AirData
from utils import parse_time, parse_time_hms
from vega_datasets import data
#st.set_page_config(layout="wide")
# Getting data ready, Refresh every hour (same data when user refreshes within an hour)
@st.cache(ttl=60 * 60, suppress_st_warning=True)
def get_AD_data():
ad = AirData()
flight_df = ad.get_flights_df()
flight_df = ad.add_time_to_df(flight_df)
return ad, flight_df
# Cache to prevent computation on every rerun
@st.cache
def save_AD_data(df):
return df.to_csv().encode('utf-8')
ad, flight_df = get_AD_data()
# Definitions for flight delay
## Prepare data
# load in files
origin = pickle.load(open('flight-price/DestState.sav','rb'))
dest = pickle.load(open('flight-price/DestState.sav','rb'))
air = pickle.load(open('flight-price/AirlineCompany.sav','rb'))
miles_dic = pickle.load(open('flight-price/miles_dic.sav','rb'))
quarter_dic= {'Spring':'Q1','Summer':'Q2','Fall':'Q3','Winter':'Q4'}
df_viz = pd.read_csv('flight-price/df_viz.csv').iloc[:,:]
# fit the prediction model, get prediction and prediction interval
def get_pi(X):
all_models = pickle.load(open('flight-price/all_models.sav', 'rb'))
lb = all_models[0].predict(X)
pred = all_models[2].predict(X)
ub = all_models[1].predict(X)
return (round(np.exp(lb[0]),2), round(np.exp(pred[0]),2), round(np.exp(ub[0]),2))
# load data for non ML visual
def load_data_viz():
return pd.read_csv('flight-price/train_viz.csv').iloc[:,:]
# visual for price comparison
@st.cache
def get_slice_ogstate(df, ogstate=None):
labels = pd.Series([1] * len(df), index=df.index)
labels &= df['OriginState'] == ogstate
return labels
def get_slice_destate(df, destate=None):
labels = pd.Series([1] * len(df), index=df.index)
labels &= df['DestState'] == destate
return labels
def get_slice_membership(df, ogstate=None, destate=None, quarter=None,airline=None):
labels = pd.Series([1] * len(df), index=df.index)
if ogstate:
labels &= df['OriginState'] == ogstate
if destate is not None:
labels &= df['DestState'] == destate
if quarter:
labels &= df['Quarter'].isin(quarter)
if airline:
labels &= df['AirlineCompany'].isin(airline)
return labels
#-------------------- Price Heat Map-------------------------------------------
def load_data(url):
file = url
df = pd.read_csv(file)
return df
def get_season(df, quarter):
sub = df[df['Quarter']== quarter]
return sub
menu_selection = st.sidebar.radio("Menu", ["Introduction","Flight Map", "Flight Delay Analysis",
"Flight Price Analysis"])
if menu_selection == 'Introduction':
#col1, col2, col3,col4 = st.columns([0.5,1,2,1])
#col2.image("image/flight-logo.jpg", width=150)
#col3.markdown("<h1 style='text-align: left; color: #072F5F;'>Flight Traffic Brain</h1>",
# unsafe_allow_html=True)
col1, col2, col3 = st.columns([0.5,1,4])
col2.image("image/flight-logo.jpg", width=150)
col3.markdown("<h1 style='text-align: left; color: #072F5F;'>Flight Traffic Brain</h1>",
unsafe_allow_html=True)
text = "<p style='font-size:18px'>Nowadays, air traffic control has become a complicated task as there are\
more and more flights and airlines. There has also been rising cases of flight delays possibly due to poor\
management and massive volume of traffic. While air traffic is important to manage from the perspective of\
airports and airlines, flight prices are crucial for customers who usually make decisions of their travel\
plans based on them. In this project we hope to help airports better manage airlines and control airline\
traffic and passengers make wiser decisions about airline flights.</p>"
st.write(text, unsafe_allow_html=True)
text = "<p style='font-size:18px'>A <span style='color: #1167b1'> real-time map of flights </span> with interactive information such as speed and altitude can help the specialists\
to make better decisions. Meanwhile, an <span style='color: #1167b1'> interactive network graph </span> that shows the connections between airports and\
flights can also improve the handling of dependencies among the traffic. A <span style='color: #1167b1'> data visualization section of delay time </span>\
can also enable users to analyze different flights in real time and in more detail. By filtering the flight according to their\
departure airport, the users can not only view the delay time of different flights, but also have a high-level overview of\
the delay information of flights of different airlines. This information will help airport specialists to better communicate\
with the airports and passengers, and make better decisions in terms of resource distribution. In addition, a <span style='color: #1167b1'> \
machine learning model </span> using historical data to <span style='color: #1167b1'> predict flight price </span> can help passengers\
estimate the potential fare of flight of their interest. An <span style='color: #1167b1'> interactive platform with visualizations of airline comparisons </span> can also allow\
them to compare different flight prices by modifying parameters of interest, thus helping optimize their travel plan.</p>"
st.write(text, unsafe_allow_html=True)
text = "<br><br><br>This project was created by [<NAME>](<EMAIL>), [<NAME>](<EMAIL>), \
[<NAME>](<EMAIL>) and [<NAME>](<EMAIL>) for the [Interactive Data Science](https://dig.cmu.edu/ids2022) course at\
[Carnegie Mellon University](https://www.cmu.edu)"
st.write(text, unsafe_allow_html=True)
elif menu_selection == "Flight Map":
st.title("Real-time Flight Data Visualization")
# ------------ Map starts ---------------------
with st.sidebar.expander("Analysis for flights/airports"):
st.write("This is an analysis tool from the perspective of flights or airports")
to_show = st.selectbox("Data to look at", ["flight", "airport"])
if to_show == "flight":
field = st.selectbox("Variable of interest", ["heading", "altitude", "ground_speed"])
else:
field = st.selectbox("Variable of interest", ["origin_airport_iata", "destination_airport_iata"])
st.write("This is a map of real-time flights and airports. The blue circles are \
the airport, while the red squares are the flights. You can utilize \
the tool bar on the left tab to explore the data. You can also \
move your mouse over the map to see more information.")
map_air = create_map(flight_df, field, to_show)
st.altair_chart(map_air,use_container_width=True)
st.sidebar.title("Note")
st.sidebar.write("This visualization consists of three components.\
The first component is a map that shows real-time flights and airports\
in the U.S. The second component, linked to the first component, \
is an analysis tool for the real-time flight and airport data. \
The third component displays the time information of a flight.")
st.sidebar.download_button("Download real-time data", data=save_AD_data(flight_df),
file_name='airdata.csv', mime='text/csv')
# ------------ Map ends ---------------------
# ------------ Flight time starts ---------------------
st.write("Here we display the time information of a flight.")
option = st.selectbox("Which flight number are you looking into?",
flight_df['number'].sort_values())
# Get the corresponding flight row in the dataframe
option_row = flight_df[flight_df['number'] == option]
option_id = option_row.id.values[0]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
# Display scheduled and actual time for departual and arrival using metric
col1, col2 = st.columns(2)
col1.metric("Scheduled departure time",
parse_time(option_time['scheduled']['departure']))
if option_time['real']['departure'] and option_time['scheduled']['departure']:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
else:
depart_delta = None
col2.metric("Actual departure time",
parse_time(option_time['real']['departure']),
parse_time_hms(depart_delta),
delta_color='inverse')
col3, col4 = st.columns(2)
col3.metric("Scheduled arrival time", parse_time(option_time['scheduled']['arrival']))
arrival_time = option_time['real']['arrival']
if not arrival_time:
arrival_time = option_time['estimated']['arrival']
col4.metric("Estimated/Actual arrival time", parse_time(arrival_time))
# Note that some flights are not displayed due to... so the number of routes
# may appear larger than...
# ------------ Flight time ends ---------------------
elif menu_selection == "Flight Delay Analysis":
# ------------ Delay Analysis starts ---------------------
st.title("Flight Delay Analysis")
st.sidebar.title("Note")
st.sidebar.write("This flight delay analysis consists of four parts: \
The first part is a data slicing tool that allows the users to filter any flight data according to the different departure airport.\
The second part lists out all the flights flying from the selected departure airport, and displays the relevant delay time information of the flights. \
The third part displays a stripplot graph to allow the users to visually compare the different departure delay time of flights of different airlines.\
The last part compares the average delay time of different airlines. ")
ad = AirData()
flight_df = ad.get_flights_df()
st.header("Slice Data")
st.write("You can filter the airline data by choosing the different departure airport.")
with st.expander("Airports"):
origin_airport_list = flight_df['origin_airport_iata'].drop_duplicates()
option1 = st.selectbox("Departure Airport:",
(origin_airport_list))
flight_df_selected1 = flight_df[(flight_df['origin_airport_iata'] == option1)]
st.header("Data Visualization")
with st.expander("Flight delay from different departure airports"):
st.write("This data indicates all the current flights coming from the departure airport and their related delay times.")
index = 0
for row in flight_df_selected1.iterrows():
flight_number = flight_df_selected1['number'].values[index]
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
elif option_time['real']['arrival'] is None:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
arrive_delta = None
col1, col2, col3 = st.columns(3)
col1.metric("Flight number",
flight_number)
col2.metric("Departure delay",
parse_time_hms(depart_delta))
col3.metric("Arrival delay",
arrive_delta)
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
arrive_delta = option_time['real']['arrival'] - option_time['scheduled']['arrival']
col1, col2, col3 = st.columns(3)
col1.metric("Flight number",
flight_number)
col2.metric("Departure delay",
parse_time_hms(depart_delta))
col3.metric("Arrival delay",
parse_time_hms(arrive_delta))
index = index + 1
with st.expander("Flight delay of different airlines"):
st.write("This data compares the punctuality and departure delay times between different airlines.")
depart_delay = []
index = 0
for row in flight_df_selected1.iterrows():
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
depart_delta = parse_time_hms(depart_delta)
depart_delay.append(depart_delta)
index = index + 1
flight_df_selected1['depart_delay'] = depart_delay
stripplot = alt.Chart(flight_df_selected1, width=640).mark_circle(size=30).encode(
x=alt.X(
'depart_delay',
title='Departure delay',
scale=alt.Scale()),
y=alt.Y(
'airline_iata',
title='Airline iata'),
color=alt.Color('airline_iata', legend=alt.Legend(orient="right")),
tooltip=['number', 'airline_iata', 'depart_delay']
).transform_calculate(
jitter='sqrt(-2*log(random()))*cos(2*PI*random())'
).configure_facet(
spacing=0
).configure_view(
stroke=None
)
stripplot
with st.expander("Compare average departure delay of different airlines"):
depart_delay = []
index = 0
for row in flight_df_selected1.iterrows():
option_id = flight_df_selected1['id'].values[index]
option_detail = ad.get_flight_details(option_id)
option_time = option_detail['time']
if option_time['real']['departure'] is None:
continue
else:
depart_delta = option_time['real']['departure'] - option_time['scheduled']['departure']
# depart_delta = parse_time_hms(depart_delta)
depart_delay.append(depart_delta)
index = index + 1
flight_df_selected1['depart_delay'] = depart_delay
average_delay = []
airline_average_delay_parsed = []
index = 0
for row in flight_df_selected1.iterrows():
ite_airline = flight_df_selected1['airline_iata'].values[index]
airline_data = flight_df_selected1[flight_df_selected1['airline_iata'] == ite_airline]
airline_average_delay = airline_data['depart_delay'].mean()
average_delay_parsed = parse_time_hms(airline_average_delay)
average_delay_parsed = str(average_delay_parsed).rstrip(':0')
airline_average_delay = round(airline_average_delay, 2)
# airline_average_delay = parse_time_hms(airline_average_delay)
average_delay.append(airline_average_delay)
airline_average_delay_parsed.append(average_delay_parsed)
index = index + 1
flight_df_selected1['airline_average_delay'] = average_delay
flight_df_selected1['average_delay_parsed'] = airline_average_delay_parsed
flight_df_selected2 = flight_df_selected1.drop_duplicates(subset=['airline_iata'], keep='first')
flight_df_selected2 = flight_df_selected2.sort_values(by=['airline_average_delay'], ascending=False)
barchart = alt.Chart(flight_df_selected2, width=640).mark_bar().encode(
x=alt.X('airline_average_delay', axis=alt.Axis(labels=False)),
y=alt.Y('airline_iata', sort=alt.EncodingSortField(field="airline_average_delay", op="count", order='ascending')),
tooltip=['airline_iata', 'average_delay_parsed']
)
text = barchart.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text='average_delay_parsed'
)
(barchart + text).properties(height=900)
barchart + text
index = 0
for row in flight_df_selected2.iterrows():
ite_airline = flight_df_selected2['airline_iata'].values[index]
ite_delay = flight_df_selected2['average_delay_parsed'].values[index]
# ite_delay = parse_time_hms(ite_delay)
ite_delay = str(ite_delay).rstrip(':0')
col1, col2 = st.columns(2)
col1.metric("Airline",
ite_airline)
col2.metric("Average departure delay",
ite_delay)
index = index + 1
# ------------ Delay Analysis ends ---------------------
else:
# ------------------------ Flight price prediction starts ------------------------------
## Price Prediction
st.title("Flight Price Analysis")
# 1. ML prediction
st.header("Flight Price Prediction")
st.write("Tell us your intended flight information and get predicted flight price value and range.")
X_train=pd.read_csv('flight-price/X_train.csv')
features = list(X_train.columns)
del X_train
df_pred = pd.DataFrame(0, index=np.arange(1), columns=features)
col1, col2 = st.columns([3, 2])
with col2:
og = st.selectbox('Origin', np.array(origin),index=30)
de = st.selectbox('Destination', np.array(dest),index=4)
season = st.selectbox('Season', ['Spring','Summer','Fall','Winter'])
airline = st.selectbox('Airline Company', np.array(air))
numT = st.slider('Number of tickets', 1, 15, 1)
if og != "Virgin Islands":
df_pred[f'o{og}'] = 1
else:
df_pred['oU.S. Virgin Islands']=1
if de != "Virgin Islands":
df_pred[f'd{de}'] = 1
else:
df_pred['dU.S. Virgin Islands']=1
if season!='Spring':
df_pred[quarter_dic[season]] = 1
if airline[-3:-1]!='AA':
df_pred[airline[-3:-1]] = 1
df_pred['NumTicketsOrdered'] = numT
if og!=de:
try:
miles = miles_dic[(og,de)]
except:
miles = miles_dic[(de,og)]
df_pred['log_miles']=np.log(miles)
else:
st.markdown(" ")
if og!=de:
low, mean, high = get_pi(pd.DataFrame(df_pred))
with col1:
st.subheader("Predicted Price per Ticket")
st.metric("Low", f'${low}',"+$",delta_color="inverse")
st.metric("Mean", f'${mean}')
st.metric("High", f'${high}',"-$",delta_color="inverse")
df_interval = pd.DataFrame([[low,mean,high]],columns=['Low','Mean','High'])
st.write("See where your flight falls in the historical price distribution (2018)")
with st.expander("See price distribution"):
# plot price dist
bar = alt.Chart(df_viz).mark_bar(opacity=0.3,tooltip = True).encode(
alt.X('PricePerTicket:Q',title="Price per Ticket ($)"),#scale=alt.Scale(type='log')),
alt.Y('count()',title='Raw Frequency Count')
).properties(
title='Unit Price Distribution',
width=600,
height=400
#).transform_filter(
).interactive()
mean = alt.Chart(df_interval).mark_rule(color='purple',tooltip=True).encode(
x='Mean:Q',
size=alt.value(4),
)
low = alt.Chart(df_interval.sample(1)).mark_rule(color='darkblue',tooltip=True).encode(
x='Low:Q',
size=alt.value(2),
#strokeDash='Quarter'
)
high = alt.Chart(df_interval.sample(1)).mark_rule(color='darkblue',tooltip=True).encode(
x='High:Q',
size=alt.value(2),
#strokeDash='Quarter'
)
price_chart = bar + mean + low+ high
st.altair_chart(price_chart,use_container_width=True)
else:
with col1:
st.metric(" ", 'Not Available')
st.markdown("**Please choose a different origin or destination!**")
# ------------------------ Flight price prediction ends ------------------------------
# ------------------------ Flight price comparison starts ------------------------------
## Price comparison
st.header("Check the historical information of the flight you are interested in")
st.write('We will look at some historical data in 2018.')
df = load_data_viz()
cols = st.columns(4)
with cols[0]:
ogs = sorted(df['OriginState'].unique())
ogstate = st.selectbox('Origin State', ogs,index=ogs.index('New York'))
with cols[1]:
des = sorted(df['DestState'].unique())
destate = st.selectbox('Destination State', des,index=des.index('California'))
with cols[2]:
quarter = st.multiselect('Quarter',sorted(df['Quarter'].unique()))
with cols[3]:
airline = st.multiselect('Airline Company', sorted(df['AirlineCompany'].unique()))
slice_labels = get_slice_membership(df, ogstate, destate, quarter,airline)
slice_labels.name = "slice_membership"
df_show = df[slice_labels].iloc[:,:][['PricePerTicket','og','dest','Quarter','AirlineCompany']].sort_values(by='PricePerTicket')
df_show = df_show.rename(columns={'PricePerTicket':'Price per Ticket ($)','og':'Origin','dest':'Destination'}).reset_index(drop=True)
df_show['Price per Ticket ($)'] = df_show['Price per Ticket ($)'].apply(lambda x: "{:.2f}".format(x))
if df_show.empty:
st.metric(" ", "No Historical Data Available")
st.write("Please deselect some quarter/airline options or change origin/destination state.")
else:
st.dataframe(data=df_show)
# ------------------------ Flight price comparison starts ------------------------------ --------
df = load_data('flight-price/train_viz.csv')
st.header("Choose the season you want to travel, find the most economical route and airline")
quarter = st.selectbox('Season(Quarter)', sorted(df['Quarter'].unique()))
season_df = get_season(df,quarter)
# Take top 20 frequency states
statelist = ['California','Florida','Texas','New York','Georgia','Illinois','Nevada','Virginia','Massachusetts',
'Washington','Pennsylvania','Arizona','New Jersey','Minnesota','Michigan','Missouri','Maryland','Hawaii']
heat_price = season_df[season_df['OriginState'].isin(statelist) ]
heat_price = heat_price[heat_price['DestState'].isin(statelist) ]
# Take average price and miles per route
heat_price = heat_price.groupby(['OriginState','DestState'])[['PricePerTicket','Miles']].mean().reset_index()
# Drop the invalid value(origin = destination)
heat_price = heat_price[heat_price['OriginState'] != heat_price['DestState']]
pts = alt.selection(type="multi", encodings=['x','y'])
heat = alt.Chart(heat_price).mark_rect().encode(
x='OriginState:O',
y='DestState:O',
color=alt.condition(pts,'PricePerTicket:Q', alt.ColorValue("grey")),
tooltip=['OriginState', 'DestState', 'PricePerTicket','Miles']
).add_selection(pts)
box = alt.Chart(df).mark_boxplot(extent='min-max').encode(
x='AirlineCompany:O',
y='PricePerTicket:Q',
color=alt.Color('AirlineCompany')
).properties(
width=500,
height=300,
).transform_filter(
pts
)
st.altair_chart(alt.vconcat(heat,box),use_container_width=True)
st.header("Compare the price of different destination based on the origin you choose")
origin = st.selectbox('Origin', sorted(df['OriginState'].unique()))
def origin_data(origin,df):
subset = df[df['OriginState']==origin]
subset = subset.groupby(['OriginState','DestState'])[['PricePerTicket','Miles']].mean().reset_index()
merged = subset.merge(data.income().groupby('name').mean().reset_index(), how = 'inner', left_on ='DestState', right_on= 'name')
return merged
subset = origin_data(origin,df)
pts = alt.selection(type="multi", encodings=['x','y'])
heat_bar = alt.Chart(subset).mark_rect().encode(
x='DestState:O',
y='OriginState:O',
color=alt.condition(pts,'PricePerTicket:Q', alt.ColorValue("grey")),
tooltip=['DestState']
).add_selection(pts)
states = alt.topo_feature(data.us_10m.url, 'states')
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
width=600,
height=400
).project('albersUsa')
foreground = alt.Chart(subset).mark_geoshape().encode(
shape='geo:G',
color=alt.condition(pts, 'name:N', alt.value('lightgray')),
tooltip=['OriginState', 'DestState', 'PricePerTicket','Miles']
).transform_lookup(
lookup='id',
from_=alt.LookupData(data=states, key='id'),
as_='geo'
).properties(
width=600,
height=400,
).project(
type='albersUsa'
)
map = background + foreground
st.altair_chart(alt.vconcat(heat_bar, map),use_container_width=True)
st.sidebar.title("Note")
st.sidebar.write("This flight price analysis consists of four parts.\
The first part is a flight customization section with predicted price range \
against historical price distribution.\
The second part presents a table of customized historical flights of interest. \
The third part displays the historical average flight price by route and airline company.\
The last part shows the available destination on the map and other information based on a chosen origin. ")
|
[
"pandas.read_csv",
"utils.parse_time_hms",
"altair.Chart",
"numpy.log",
"streamlit.sidebar.expander",
"utils.parse_time",
"numpy.array",
"altair.X",
"altair.Y",
"altair.Legend",
"streamlit.metric",
"map.create_map",
"streamlit.header",
"numpy.arange",
"streamlit.title",
"streamlit.sidebar.title",
"altair.value",
"streamlit.cache",
"streamlit.sidebar.write",
"streamlit.expander",
"altair.topo_feature",
"vega_datasets.data.income",
"numpy.exp",
"altair.vconcat",
"pandas.DataFrame",
"streamlit.columns",
"altair.LookupData",
"streamlit.markdown",
"streamlit.altair_chart",
"altair.EncodingSortField",
"altair.Axis",
"streamlit.write",
"altair.selection",
"altair.ColorValue",
"streamlit.dataframe",
"streamlit.subheader",
"streamlit.selectbox",
"altair.Color",
"altair.Scale",
"airdata.AirData",
"streamlit.sidebar.radio",
"streamlit.slider"
] |
[((358, 405), 'streamlit.cache', 'st.cache', ([], {'ttl': '(60 * 60)', 'suppress_st_warning': '(True)'}), '(ttl=60 * 60, suppress_st_warning=True)\n', (366, 405), True, 'import streamlit as st\n'), ((2654, 2764), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Menu"""', "['Introduction', 'Flight Map', 'Flight Delay Analysis', 'Flight Price Analysis'\n ]"], {}), "('Menu', ['Introduction', 'Flight Map',\n 'Flight Delay Analysis', 'Flight Price Analysis'])\n", (2670, 2764), True, 'import streamlit as st\n'), ((434, 443), 'airdata.AirData', 'AirData', ([], {}), '()\n', (441, 443), False, 'from airdata import AirData\n'), ((2518, 2535), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (2529, 2535), True, 'import pandas as pd\n'), ((3106, 3129), 'streamlit.columns', 'st.columns', (['[0.5, 1, 4]'], {}), '([0.5, 1, 4])\n', (3116, 3129), True, 'import streamlit as st\n'), ((3952, 3990), 'streamlit.write', 'st.write', (['text'], {'unsafe_allow_html': '(True)'}), '(text, unsafe_allow_html=True)\n', (3960, 3990), True, 'import streamlit as st\n'), ((5486, 5524), 'streamlit.write', 'st.write', (['text'], {'unsafe_allow_html': '(True)'}), '(text, unsafe_allow_html=True)\n', (5494, 5524), True, 'import streamlit as st\n'), ((5803, 5841), 'streamlit.write', 'st.write', (['text'], {'unsafe_allow_html': '(True)'}), '(text, unsafe_allow_html=True)\n', (5811, 5841), True, 'import streamlit as st\n'), ((1095, 1133), 'pandas.read_csv', 'pd.read_csv', (['"""flight-price/df_viz.csv"""'], {}), "('flight-price/df_viz.csv')\n", (1106, 1133), True, 'import pandas as pd\n'), ((5892, 5939), 'streamlit.title', 'st.title', (['"""Real-time Flight Data Visualization"""'], {}), "('Real-time Flight Data Visualization')\n", (5900, 5939), True, 'import streamlit as st\n'), ((6477, 6789), 'streamlit.write', 'st.write', (['"""This is a map of real-time flights and airports. The blue circles are the airport, while the red squares are the flights. You can utilize the tool bar on the left tab to explore the data. You can also move your mouse over the map to see more information."""'], {}), "(\n 'This is a map of real-time flights and airports. The blue circles are the airport, while the red squares are the flights. You can utilize the tool bar on the left tab to explore the data. You can also move your mouse over the map to see more information.'\n )\n", (6485, 6789), True, 'import streamlit as st\n'), ((6800, 6837), 'map.create_map', 'create_map', (['flight_df', 'field', 'to_show'], {}), '(flight_df, field, to_show)\n', (6810, 6837), False, 'from map import create_map\n'), ((6843, 6893), 'streamlit.altair_chart', 'st.altair_chart', (['map_air'], {'use_container_width': '(True)'}), '(map_air, use_container_width=True)\n', (6858, 6893), True, 'import streamlit as st\n'), ((6898, 6922), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Note"""'], {}), "('Note')\n", (6914, 6922), True, 'import streamlit as st\n'), ((6928, 7298), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""This visualization consists of three components. The first component is a map that shows real-time flights and airports in the U.S. The second component, linked to the first component, is an analysis tool for the real-time flight and airport data. The third component displays the time information of a flight."""'], {}), "(\n 'This visualization consists of three components. The first component is a map that shows real-time flights and airports in the U.S. The second component, linked to the first component, is an analysis tool for the real-time flight and airport data. The third component displays the time information of a flight.'\n )\n", (6944, 7298), True, 'import streamlit as st\n'), ((7570, 7631), 'streamlit.write', 'st.write', (['"""Here we display the time information of a flight."""'], {}), "('Here we display the time information of a flight.')\n", (7578, 7631), True, 'import streamlit as st\n'), ((8112, 8125), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (8122, 8125), True, 'import streamlit as st\n'), ((8664, 8677), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (8674, 8677), True, 'import streamlit as st\n'), ((1423, 1436), 'numpy.exp', 'np.exp', (['lb[0]'], {}), '(lb[0])\n', (1429, 1436), True, 'import numpy as np\n'), ((1447, 1462), 'numpy.exp', 'np.exp', (['pred[0]'], {}), '(pred[0])\n', (1453, 1462), True, 'import numpy as np\n'), ((1473, 1486), 'numpy.exp', 'np.exp', (['ub[0]'], {}), '(ub[0])\n', (1479, 1486), True, 'import numpy as np\n'), ((1557, 1598), 'pandas.read_csv', 'pd.read_csv', (['"""flight-price/train_viz.csv"""'], {}), "('flight-price/train_viz.csv')\n", (1568, 1598), True, 'import pandas as pd\n'), ((6002, 6054), 'streamlit.sidebar.expander', 'st.sidebar.expander', (['"""Analysis for flights/airports"""'], {}), "('Analysis for flights/airports')\n", (6021, 6054), True, 'import streamlit as st\n'), ((6064, 6149), 'streamlit.write', 'st.write', (['"""This is an analysis tool from the perspective of flights or airports"""'], {}), "('This is an analysis tool from the perspective of flights or airports'\n )\n", (6072, 6149), True, 'import streamlit as st\n'), ((6163, 6217), 'streamlit.selectbox', 'st.selectbox', (['"""Data to look at"""', "['flight', 'airport']"], {}), "('Data to look at', ['flight', 'airport'])\n", (6175, 6217), True, 'import streamlit as st\n'), ((8187, 8236), 'utils.parse_time', 'parse_time', (["option_time['scheduled']['departure']"], {}), "(option_time['scheduled']['departure'])\n", (8197, 8236), False, 'from utils import parse_time, parse_time_hms\n'), ((8514, 8558), 'utils.parse_time', 'parse_time', (["option_time['real']['departure']"], {}), "(option_time['real']['departure'])\n", (8524, 8558), False, 'from utils import parse_time, parse_time_hms\n'), ((8576, 8604), 'utils.parse_time_hms', 'parse_time_hms', (['depart_delta'], {}), '(depart_delta)\n', (8590, 8604), False, 'from utils import parse_time, parse_time_hms\n'), ((8720, 8767), 'utils.parse_time', 'parse_time', (["option_time['scheduled']['arrival']"], {}), "(option_time['scheduled']['arrival'])\n", (8730, 8767), False, 'from utils import parse_time, parse_time_hms\n'), ((8952, 8976), 'utils.parse_time', 'parse_time', (['arrival_time'], {}), '(arrival_time)\n', (8962, 8976), False, 'from utils import parse_time, parse_time_hms\n'), ((9259, 9292), 'streamlit.title', 'st.title', (['"""Flight Delay Analysis"""'], {}), "('Flight Delay Analysis')\n", (9267, 9292), True, 'import streamlit as st\n'), ((9298, 9322), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Note"""'], {}), "('Note')\n", (9314, 9322), True, 'import streamlit as st\n'), ((9328, 9941), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""This flight delay analysis consists of four parts: The first part is a data slicing tool that allows the users to filter any flight data according to the different departure airport. The second part lists out all the flights flying from the selected departure airport, and displays the relevant delay time information of the flights. The third part displays a stripplot graph to allow the users to visually compare the different departure delay time of flights of different airlines. The last part compares the average delay time of different airlines. """'], {}), "(\n 'This flight delay analysis consists of four parts: The first part is a data slicing tool that allows the users to filter any flight data according to the different departure airport. The second part lists out all the flights flying from the selected departure airport, and displays the relevant delay time information of the flights. The third part displays a stripplot graph to allow the users to visually compare the different departure delay time of flights of different airlines. The last part compares the average delay time of different airlines. '\n )\n", (9344, 9941), True, 'import streamlit as st\n'), ((9951, 9960), 'airdata.AirData', 'AirData', ([], {}), '()\n', (9958, 9960), False, 'from airdata import AirData\n'), ((10002, 10025), 'streamlit.header', 'st.header', (['"""Slice Data"""'], {}), "('Slice Data')\n", (10011, 10025), True, 'import streamlit as st\n'), ((10030, 10128), 'streamlit.write', 'st.write', (['"""You can filter the airline data by choosing the different departure airport."""'], {}), "(\n 'You can filter the airline data by choosing the different departure airport.'\n )\n", (10038, 10128), True, 'import streamlit as st\n'), ((10434, 10465), 'streamlit.header', 'st.header', (['"""Data Visualization"""'], {}), "('Data Visualization')\n", (10443, 10465), True, 'import streamlit as st\n'), ((17100, 17133), 'streamlit.title', 'st.title', (['"""Flight Price Analysis"""'], {}), "('Flight Price Analysis')\n", (17108, 17133), True, 'import streamlit as st\n'), ((17166, 17202), 'streamlit.header', 'st.header', (['"""Flight Price Prediction"""'], {}), "('Flight Price Prediction')\n", (17175, 17202), True, 'import streamlit as st\n'), ((17207, 17317), 'streamlit.write', 'st.write', (['"""Tell us your intended flight information and get predicted flight price value and range."""'], {}), "(\n 'Tell us your intended flight information and get predicted flight price value and range.'\n )\n", (17215, 17317), True, 'import streamlit as st\n'), ((17326, 17365), 'pandas.read_csv', 'pd.read_csv', (['"""flight-price/X_train.csv"""'], {}), "('flight-price/X_train.csv')\n", (17337, 17365), True, 'import pandas as pd\n'), ((17505, 17523), 'streamlit.columns', 'st.columns', (['[3, 2]'], {}), '([3, 2])\n', (17515, 17523), True, 'import streamlit as st\n'), ((20841, 20927), 'streamlit.header', 'st.header', (['"""Check the historical information of the flight you are interested in"""'], {}), "(\n 'Check the historical information of the flight you are interested in')\n", (20850, 20927), True, 'import streamlit as st\n'), ((20927, 20984), 'streamlit.write', 'st.write', (['"""We will look at some historical data in 2018."""'], {}), "('We will look at some historical data in 2018.')\n", (20935, 20984), True, 'import streamlit as st\n'), ((21023, 21036), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (21033, 21036), True, 'import streamlit as st\n'), ((22460, 22563), 'streamlit.header', 'st.header', (['"""Choose the season you want to travel, find the most economical route and airline"""'], {}), "(\n 'Choose the season you want to travel, find the most economical route and airline'\n )\n", (22469, 22563), True, 'import streamlit as st\n'), ((23395, 23444), 'altair.selection', 'alt.selection', ([], {'type': '"""multi"""', 'encodings': "['x', 'y']"}), "(type='multi', encodings=['x', 'y'])\n", (23408, 23444), True, 'import altair as alt\n'), ((24078, 24174), 'streamlit.header', 'st.header', (['"""Compare the price of different destination based on the origin you choose"""'], {}), "(\n 'Compare the price of different destination based on the origin you choose'\n )\n", (24087, 24174), True, 'import streamlit as st\n'), ((24633, 24682), 'altair.selection', 'alt.selection', ([], {'type': '"""multi"""', 'encodings': "['x', 'y']"}), "(type='multi', encodings=['x', 'y'])\n", (24646, 24682), True, 'import altair as alt\n'), ((24935, 24978), 'altair.topo_feature', 'alt.topo_feature', (['data.us_10m.url', '"""states"""'], {}), "(data.us_10m.url, 'states')\n", (24951, 24978), True, 'import altair as alt\n'), ((25737, 25761), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Note"""'], {}), "('Note')\n", (25753, 25761), True, 'import streamlit as st\n'), ((25767, 26274), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""This flight price analysis consists of four parts. The first part is a flight customization section with predicted price range against historical price distribution. The second part presents a table of customized historical flights of interest. The third part displays the historical average flight price by route and airline company. The last part shows the available destination on the map and other information based on a chosen origin. """'], {}), "(\n 'This flight price analysis consists of four parts. The first part is a flight customization section with predicted price range against historical price distribution. The second part presents a table of customized historical flights of interest. The third part displays the historical average flight price by route and airline company. The last part shows the available destination on the map and other information based on a chosen origin. '\n )\n", (25783, 26274), True, 'import streamlit as st\n'), ((6270, 6347), 'streamlit.selectbox', 'st.selectbox', (['"""Variable of interest"""', "['heading', 'altitude', 'ground_speed']"], {}), "('Variable of interest', ['heading', 'altitude', 'ground_speed'])\n", (6282, 6347), True, 'import streamlit as st\n'), ((6382, 6475), 'streamlit.selectbox', 'st.selectbox', (['"""Variable of interest"""', "['origin_airport_iata', 'destination_airport_iata']"], {}), "('Variable of interest', ['origin_airport_iata',\n 'destination_airport_iata'])\n", (6394, 6475), True, 'import streamlit as st\n'), ((10128, 10151), 'streamlit.expander', 'st.expander', (['"""Airports"""'], {}), "('Airports')\n", (10139, 10151), True, 'import streamlit as st\n'), ((10252, 10307), 'streamlit.selectbox', 'st.selectbox', (['"""Departure Airport:"""', 'origin_airport_list'], {}), "('Departure Airport:', origin_airport_list)\n", (10264, 10307), True, 'import streamlit as st\n'), ((10475, 10536), 'streamlit.expander', 'st.expander', (['"""Flight delay from different departure airports"""'], {}), "('Flight delay from different departure airports')\n", (10486, 10536), True, 'import streamlit as st\n'), ((10546, 10676), 'streamlit.write', 'st.write', (['"""This data indicates all the current flights coming from the departure airport and their related delay times."""'], {}), "(\n 'This data indicates all the current flights coming from the departure airport and their related delay times.'\n )\n", (10554, 10676), True, 'import streamlit as st\n'), ((12200, 12249), 'streamlit.expander', 'st.expander', (['"""Flight delay of different airlines"""'], {}), "('Flight delay of different airlines')\n", (12211, 12249), True, 'import streamlit as st\n'), ((12259, 12369), 'streamlit.write', 'st.write', (['"""This data compares the punctuality and departure delay times between different airlines."""'], {}), "(\n 'This data compares the punctuality and departure delay times between different airlines.'\n )\n", (12267, 12369), True, 'import streamlit as st\n'), ((13727, 13795), 'streamlit.expander', 'st.expander', (['"""Compare average departure delay of different airlines"""'], {}), "('Compare average departure delay of different airlines')\n", (13738, 13795), True, 'import streamlit as st\n'), ((17697, 17759), 'streamlit.selectbox', 'st.selectbox', (['"""Season"""', "['Spring', 'Summer', 'Fall', 'Winter']"], {}), "('Season', ['Spring', 'Summer', 'Fall', 'Winter'])\n", (17709, 17759), True, 'import streamlit as st\n'), ((17837, 17877), 'streamlit.slider', 'st.slider', (['"""Number of tickets"""', '(1)', '(15)', '(1)'], {}), "('Number of tickets', 1, 15, 1)\n", (17846, 17877), True, 'import streamlit as st\n'), ((19036, 19124), 'streamlit.write', 'st.write', (['"""See where your flight falls in the historical price distribution (2018)"""'], {}), "(\n 'See where your flight falls in the historical price distribution (2018)')\n", (19044, 19124), True, 'import streamlit as st\n'), ((22100, 22146), 'streamlit.metric', 'st.metric', (['""" """', '"""No Historical Data Available"""'], {}), "(' ', 'No Historical Data Available')\n", (22109, 22146), True, 'import streamlit as st\n'), ((22155, 22257), 'streamlit.write', 'st.write', (['"""Please deselect some quarter/airline options or change origin/destination state."""'], {}), "(\n 'Please deselect some quarter/airline options or change origin/destination state.'\n )\n", (22163, 22257), True, 'import streamlit as st\n'), ((22267, 22293), 'streamlit.dataframe', 'st.dataframe', ([], {'data': 'df_show'}), '(data=df_show)\n', (22279, 22293), True, 'import streamlit as st\n'), ((24021, 24043), 'altair.vconcat', 'alt.vconcat', (['heat', 'box'], {}), '(heat, box)\n', (24032, 24043), True, 'import altair as alt\n'), ((25675, 25701), 'altair.vconcat', 'alt.vconcat', (['heat_bar', 'map'], {}), '(heat_bar, map)\n', (25686, 25701), True, 'import altair as alt\n'), ((14899, 14936), 'utils.parse_time_hms', 'parse_time_hms', (['airline_average_delay'], {}), '(airline_average_delay)\n', (14913, 14936), False, 'from utils import parse_time, parse_time_hms\n'), ((16701, 16714), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (16711, 16714), True, 'import streamlit as st\n'), ((17455, 17467), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (17464, 17467), True, 'import numpy as np\n'), ((17588, 17604), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (17596, 17604), True, 'import numpy as np\n'), ((17656, 17670), 'numpy.array', 'np.array', (['dest'], {}), '(dest)\n', (17664, 17670), True, 'import numpy as np\n'), ((17807, 17820), 'numpy.array', 'np.array', (['air'], {}), '(air)\n', (17815, 17820), True, 'import numpy as np\n'), ((18533, 18546), 'numpy.log', 'np.log', (['miles'], {}), '(miles)\n', (18539, 18546), True, 'import numpy as np\n'), ((18573, 18589), 'streamlit.markdown', 'st.markdown', (['""" """'], {}), "(' ')\n", (18584, 18589), True, 'import streamlit as st\n'), ((18652, 18673), 'pandas.DataFrame', 'pd.DataFrame', (['df_pred'], {}), '(df_pred)\n', (18664, 18673), True, 'import pandas as pd\n'), ((18706, 18748), 'streamlit.subheader', 'st.subheader', (['"""Predicted Price per Ticket"""'], {}), "('Predicted Price per Ticket')\n", (18718, 18748), True, 'import streamlit as st\n'), ((18761, 18817), 'streamlit.metric', 'st.metric', (['"""Low"""', 'f"""${low}"""', '"""+$"""'], {'delta_color': '"""inverse"""'}), "('Low', f'${low}', '+$', delta_color='inverse')\n", (18770, 18817), True, 'import streamlit as st\n'), ((18828, 18857), 'streamlit.metric', 'st.metric', (['"""Mean"""', 'f"""${mean}"""'], {}), "('Mean', f'${mean}')\n", (18837, 18857), True, 'import streamlit as st\n'), ((18870, 18928), 'streamlit.metric', 'st.metric', (['"""High"""', 'f"""${high}"""', '"""-$"""'], {'delta_color': '"""inverse"""'}), "('High', f'${high}', '-$', delta_color='inverse')\n", (18879, 18928), True, 'import streamlit as st\n'), ((18953, 19019), 'pandas.DataFrame', 'pd.DataFrame', (['[[low, mean, high]]'], {'columns': "['Low', 'Mean', 'High']"}), "([[low, mean, high]], columns=['Low', 'Mean', 'High'])\n", (18965, 19019), True, 'import pandas as pd\n'), ((19133, 19170), 'streamlit.expander', 'st.expander', (['"""See price distribution"""'], {}), "('See price distribution')\n", (19144, 19170), True, 'import streamlit as st\n'), ((20351, 20405), 'streamlit.altair_chart', 'st.altair_chart', (['price_chart'], {'use_container_width': '(True)'}), '(price_chart, use_container_width=True)\n', (20366, 20405), True, 'import streamlit as st\n'), ((20455, 20486), 'streamlit.metric', 'st.metric', (['""" """', '"""Not Available"""'], {}), "(' ', 'Not Available')\n", (20464, 20486), True, 'import streamlit as st\n'), ((20499, 20566), 'streamlit.markdown', 'st.markdown', (['"""**Please choose a different origin or destination!**"""'], {}), "('**Please choose a different origin or destination!**')\n", (20510, 20566), True, 'import streamlit as st\n'), ((12863, 12891), 'utils.parse_time_hms', 'parse_time_hms', (['depart_delta'], {}), '(depart_delta)\n', (12877, 12891), False, 'from utils import parse_time, parse_time_hms\n'), ((11295, 11308), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (11305, 11308), True, 'import streamlit as st\n'), ((11846, 11859), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (11856, 11859), True, 'import streamlit as st\n'), ((19776, 19788), 'altair.value', 'alt.value', (['(4)'], {}), '(4)\n', (19785, 19788), True, 'import altair as alt\n'), ((19970, 19982), 'altair.value', 'alt.value', (['(2)'], {}), '(2)\n', (19979, 19982), True, 'import altair as alt\n'), ((20187, 20199), 'altair.value', 'alt.value', (['(2)'], {}), '(2)\n', (20196, 20199), True, 'import altair as alt\n'), ((11472, 11500), 'utils.parse_time_hms', 'parse_time_hms', (['depart_delta'], {}), '(depart_delta)\n', (11486, 11500), False, 'from utils import parse_time, parse_time_hms\n'), ((12023, 12051), 'utils.parse_time_hms', 'parse_time_hms', (['depart_delta'], {}), '(depart_delta)\n', (12037, 12051), False, 'from utils import parse_time, parse_time_hms\n'), ((12126, 12154), 'utils.parse_time_hms', 'parse_time_hms', (['arrive_delta'], {}), '(arrive_delta)\n', (12140, 12154), False, 'from utils import parse_time, parse_time_hms\n'), ((15697, 15738), 'altair.Chart', 'alt.Chart', (['flight_df_selected2'], {'width': '(640)'}), '(flight_df_selected2, width=640)\n', (15706, 15738), True, 'import altair as alt\n'), ((15808, 15830), 'altair.Axis', 'alt.Axis', ([], {'labels': '(False)'}), '(labels=False)\n', (15816, 15830), True, 'import altair as alt\n'), ((15874, 15962), 'altair.EncodingSortField', 'alt.EncodingSortField', ([], {'field': '"""airline_average_delay"""', 'op': '"""count"""', 'order': '"""ascending"""'}), "(field='airline_average_delay', op='count', order=\n 'ascending')\n", (15895, 15962), True, 'import altair as alt\n'), ((23602, 23624), 'altair.ColorValue', 'alt.ColorValue', (['"""grey"""'], {}), "('grey')\n", (23616, 23624), True, 'import altair as alt\n'), ((24840, 24862), 'altair.ColorValue', 'alt.ColorValue', (['"""grey"""'], {}), "('grey')\n", (24854, 24862), True, 'import altair as alt\n'), ((19657, 19679), 'altair.Chart', 'alt.Chart', (['df_interval'], {}), '(df_interval)\n', (19666, 19679), True, 'import altair as alt\n'), ((23456, 23477), 'altair.Chart', 'alt.Chart', (['heat_price'], {}), '(heat_price)\n', (23465, 23477), True, 'import altair as alt\n'), ((23862, 23889), 'altair.Color', 'alt.Color', (['"""AirlineCompany"""'], {}), "('AirlineCompany')\n", (23871, 23889), True, 'import altair as alt\n'), ((24698, 24715), 'altair.Chart', 'alt.Chart', (['subset'], {}), '(subset)\n', (24707, 24715), True, 'import altair as alt\n'), ((25001, 25018), 'altair.Chart', 'alt.Chart', (['states'], {}), '(states)\n', (25010, 25018), True, 'import altair as alt\n'), ((25451, 25488), 'altair.LookupData', 'alt.LookupData', ([], {'data': 'states', 'key': '"""id"""'}), "(data=states, key='id')\n", (25465, 25488), True, 'import altair as alt\n'), ((19299, 19354), 'altair.X', 'alt.X', (['"""PricePerTicket:Q"""'], {'title': '"""Price per Ticket ($)"""'}), "('PricePerTicket:Q', title='Price per Ticket ($)')\n", (19304, 19354), True, 'import altair as alt\n'), ((19401, 19446), 'altair.Y', 'alt.Y', (['"""count()"""'], {'title': '"""Raw Frequency Count"""'}), "('count()', title='Raw Frequency Count')\n", (19406, 19446), True, 'import altair as alt\n'), ((23735, 23748), 'altair.Chart', 'alt.Chart', (['df'], {}), '(df)\n', (23744, 23748), True, 'import altair as alt\n'), ((24457, 24470), 'vega_datasets.data.income', 'data.income', ([], {}), '()\n', (24468, 24470), False, 'from vega_datasets import data\n'), ((13274, 13317), 'altair.Y', 'alt.Y', (['"""airline_iata"""'], {'title': '"""Airline iata"""'}), "('airline_iata', title='Airline iata')\n", (13279, 13317), True, 'import altair as alt\n'), ((25296, 25318), 'altair.value', 'alt.value', (['"""lightgray"""'], {}), "('lightgray')\n", (25305, 25318), True, 'import altair as alt\n'), ((19220, 19237), 'altair.Chart', 'alt.Chart', (['df_viz'], {}), '(df_viz)\n', (19229, 19237), True, 'import altair as alt\n'), ((25188, 25205), 'altair.Chart', 'alt.Chart', (['subset'], {}), '(subset)\n', (25197, 25205), True, 'import altair as alt\n'), ((13059, 13100), 'altair.Chart', 'alt.Chart', (['flight_df_selected1'], {'width': '(640)'}), '(flight_df_selected1, width=640)\n', (13068, 13100), True, 'import altair as alt\n'), ((13246, 13257), 'altair.Scale', 'alt.Scale', ([], {}), '()\n', (13255, 13257), True, 'import altair as alt\n'), ((13403, 13429), 'altair.Legend', 'alt.Legend', ([], {'orient': '"""right"""'}), "(orient='right')\n", (13413, 13429), True, 'import altair as alt\n')]
|
import numpy as np
import pandas as pd
def Loader(events,args):
"""
Create a table with the pulses
"""
gb = events.groupby('Pulse',sort=False)
pulses = events.loc[gb.Sigma.idxmax()]
pulses.index = pulses.Pulse
pulses.index.name = None
pulses = pulses.drop('Pulse', axis='columns')
pulses.index.name = 'idx'
pulses['Rank'] = 0
pulses.Rank = pulses.Rank.astype(np.int8)
pulses['Candidate'] = -1
pulses.Candidate = pulses.Candidate.astype(np.int32)
pulses['N_events'] = gb.DM.count()
pulses.N_events = pulses.N_events.astype(np.int16)
pulses = pulses[pulses.N_events >= args.N_min]
if pulses.shape[0] == 0: return pulses
# Apply filters to discriminate interesting pulses
if not args.no_filter: classic_filters(events[events.Pulse.isin(pulses.index)], pulses, args)
#Store the pulses
pulses.sort_values(['DM','Time'], inplace=True)
if not args.no_store: pulses.to_hdf(args.store_name, 'pulses')
return pulses
def classic_filters(events, pulses, args):
"""
Apply RFI filters to the pulses
"""
RFI_code = 9
events = events[events.Pulse.isin(pulses.index)]
events.sort_values(by='DM',inplace=True)
gb = events.groupby('Pulse')
pulses.sort_index(inplace=True)
#Remove flat SNR pulses
pulses.Rank[pulses.Sigma / gb.Sigma.min() <= args.SNR_peak_min / args.SNR_min] = RFI_code
#Remove flat duration pulses (from Eq.6.21 of Pulsar Handbook)
pulses.Rank.loc[gb.Downfact.max() / pulses.Downfact < (args.SNR_peak_min / args.SNR_min)**2] = RFI_code
#Remove pulses peaking near the DM edges
if args.DM_range is not None:
DM_frac = (args.DM_range[1] - args.DM_range[0]) * 0.05 #Remove 5% of DM range from each edge
pulses.Rank[(pulses.DM < args.DM_range[0] + DM_frac) | (pulses.DM > args.DM_range[1] - DM_frac)] = RFI_code
#Remove pulses intersecting half the maximum SNR other than 2,4,6,8 times
def crosses(sig):
diff = sig - (sig.max() + sig.min()) / 2.
count = np.count_nonzero(np.diff(np.sign(diff)))
return (count != 2) & (count != 4) & (count != 6) & (count != 8)
pulses.Rank[gb.apply(lambda x: crosses(x.Sigma))] = RFI_code
#Remove weaker pulses within 20 ms of brighter ones
def simultaneous(p):
puls = pulses.Rank[np.abs(pulses.Time-p.Time) < 0.02]
if puls.shape[0] == 1: return False
if p.name == puls.index[0]: return False
else: return True
pulses.Rank[pulses.apply(lambda x: simultaneous(x), axis=1)] = RFI_code
return
|
[
"numpy.abs",
"numpy.sign"
] |
[((1990, 2003), 'numpy.sign', 'np.sign', (['diff'], {}), '(diff)\n', (1997, 2003), True, 'import numpy as np\n'), ((2239, 2267), 'numpy.abs', 'np.abs', (['(pulses.Time - p.Time)'], {}), '(pulses.Time - p.Time)\n', (2245, 2267), True, 'import numpy as np\n')]
|
# Author: <NAME> at 16/08/2021 <<EMAIL>>
# Licence: MIT License
# Copyright: <NAME> (2018) <<EMAIL>>
from functools import partial
import numpy as np
from scipy import linalg
from .utils import (readout_forward, _initialize_readout,
_prepare_inputs_for_learning)
from ..base.node import Node
from ..base.types import global_dtype
def _solve_ridge(XXT, YXT, ridge):
return linalg.solve(XXT + ridge, YXT.T, assume_a="sym")
def partial_backward(readout: Node, X_batch, Y_batch=None):
transient = readout.transient
X, Y = _prepare_inputs_for_learning(X_batch, Y_batch,
transient=transient,
bias=readout.input_bias,
allow_reshape=True)
xxt = X.T.dot(X)
yxt = Y.T.dot(X)
XXT = readout.get_buffer("XXT")
YXT = readout.get_buffer("YXT")
# This is not thread-safe, apparently, using Numpy memmap as buffers
# ok for parallelization then with a lock (see ESN object)
XXT += xxt
YXT += yxt
def backward(readout: Node, X=None, Y=None):
ridge = readout.ridge
XXT = readout.get_buffer("XXT")
YXT = readout.get_buffer("YXT")
input_dim = readout.input_dim
if readout.input_bias:
input_dim += 1
ridgeid = (ridge * np.eye(input_dim, dtype=global_dtype))
Wout_raw = _solve_ridge(XXT, YXT, ridgeid)
if readout.input_bias:
Wout, bias = Wout_raw[1:, :], Wout_raw[0, :][np.newaxis, :]
readout.set_param("Wout", Wout)
readout.set_param("bias", bias)
else:
readout.set_param("Wout", Wout_raw)
def initialize(readout: Node,
x=None,
y=None,
Wout_init=None):
_initialize_readout(readout, x, y, bias=readout.input_bias,
init_func=Wout_init)
def initialize_buffers(readout):
# create memmaped buffers for matrices X.X^T and Y.X^T pre-computed
# in parallel for ridge regression
# ! only memmap can be used ! Impossible to share Numpy arrays with
# different processes in r/w mode otherwise (with proper locking)
input_dim = readout.input_dim
output_dim = readout.output_dim
if readout.input_bias:
input_dim += 1
readout.create_buffer("XXT", (input_dim,
input_dim))
readout.create_buffer("YXT", (output_dim,
input_dim))
class Ridge(Node):
def __init__(self, output_dim=None, ridge=0.0, transient=0, Wout=None,
input_bias=True, name=None):
super(Ridge, self).__init__(params={"Wout": None, "bias": None},
hypers={"ridge": ridge,
"transient": transient,
"input_bias": input_bias},
forward=readout_forward,
partial_backward=partial_backward,
backward=backward,
output_dim=output_dim,
initializer=partial(initialize,
Wout_init=Wout),
buffers_initializer=initialize_buffers,
name=name)
|
[
"numpy.eye",
"functools.partial",
"scipy.linalg.solve"
] |
[((401, 449), 'scipy.linalg.solve', 'linalg.solve', (['(XXT + ridge)', 'YXT.T'], {'assume_a': '"""sym"""'}), "(XXT + ridge, YXT.T, assume_a='sym')\n", (413, 449), False, 'from scipy import linalg\n'), ((1328, 1365), 'numpy.eye', 'np.eye', (['input_dim'], {'dtype': 'global_dtype'}), '(input_dim, dtype=global_dtype)\n', (1334, 1365), True, 'import numpy as np\n'), ((3167, 3202), 'functools.partial', 'partial', (['initialize'], {'Wout_init': 'Wout'}), '(initialize, Wout_init=Wout)\n', (3174, 3202), False, 'from functools import partial\n')]
|
import pandas as pd
import numpy as np
from numpy import corrcoef
import matplotlib.pyplot as plt
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from math import *
plt.style.use('ggplot')
fig = plt.figure()
COUNTER = 1
#Return the category dictionary,categorical variables list and continuous list for every column in dataframe.
#The categories are assigned as "target(type)_feature(type)"
def get_category(df,target_name,categorical_name,columns_name):
cat_dict = {}
fin_cat_dict = {}
catg_catg = []
cont_cont = []
catg_cont = []
cont_catg = []
for col in columns_name:
if len(df[col].unique())<=2:
cat_dict[col] = "categorical"
elif col in categorical_name:
cat_dict[col] = "categorical"
else:
cat_dict[col] = "continous"
for col in cat_dict:
if cat_dict[col]=="categorical" and cat_dict[target_name]=="categorical":
fin_cat_dict[col] = "catg_catg"
catg_catg.append(col)
elif cat_dict[col]=="continous" and cat_dict[target_name]=="continous":
fin_cat_dict[col] = "cont_cont"
cont_cont.append(col)
elif cat_dict[col]=="continous" and cat_dict[target_name]=="categorical":
fin_cat_dict[col] = "catg_cont"
catg_cont.append(col)
else:
fin_cat_dict[col] = "cont_catg"
cont_catg.append(col)
return fin_cat_dict,catg_catg,cont_cont,catg_cont,cont_catg
#Return True if the categorical_name are present in the orignal dataframe columns.
def is_present(columns_name,categorical_name):
ls = [i for i in categorical_name if i not in columns_name]
if len(ls)==0:
return True
else:
raise ValueError(str(ls)+" is not present as a column in the data,Please check the name")
#Function returns list of columns with non-numeric data.
def clean_str_list(df,lst):
rem=[]
for i in lst:
res = any(isinstance(n,str) for n in df[i])
if res == True:
rem.append(i)
for j in rem:
lst.remove(j)
return lst
#Returns the Pearson Correlation Coefficient for the continous data columns.
def pearson_correlation_cont_cont(x,y):
return corrcoef(x,y)
# This function is for the bivariate analysis between two continous varibale.Plots scatter plots and shows the coeff for the data.
def bivariate_analysis_cont_cont(cont_cont_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
clean_cont_cont_list = clean_str_list(df,cont_cont_list)
if len(clean_str_list(df,[target_name])) == 0 and len(cont_cont_list)>0:
raise ValueError("You seem to have a target variable with string values.")
clean_df = df.dropna()
for col in clean_cont_cont_list:
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = clean_df[col]
y = np.float32(clean_df[target_name])
corr = pearson_correlation_cont_cont(x,y)
plt.xlabel(col+"\n count "+str(count)+"\n Corr: "+str(np.float32(corr[0][1])), fontsize=10)
plt.ylabel(target_name, fontsize=10)
plt.scatter(x,y)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
#Chi test is used to see association between catgorical vs categorical variables.
#Lower Pvalue are significant they should be < 0.05
#chi value = X^2 = summation [(observed-expected)^2/expected]
# The distribution of the statistic X2 is chi-square with (r-1)(c-1) degrees of freedom, where r represents the number of rows in the two-way table and c represents the number of columns. The distribution is denoted (df), where df is the number of degrees of freedom.
#pvalue = p(df>=x^2)
def evaluate_chi(x,y):
chi,p_val = chi2(x,y)
return chi,p_val
def bivariate_analysis_catg_catg(catg_catg_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,bin_size="auto"):
clean_catg_catg_list = clean_str_list(df,catg_catg_list)
clean_df = df.dropna()
target_classes =df[target_name].unique()
label = [str(i) for i in target_classes]
c = 0
for col in clean_catg_catg_list:
summary = clean_df[col].describe()
binwidth = 0.7
if bin_size == 'auto':
bins_size =np.arange(min(clean_df[col].tolist()), max(clean_df[col].tolist()) + binwidth, binwidth)
else:
bins_size = bin_size
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[target_name]==i][col]) for i in target_classes]
y = clean_df[target_name]
chi,p_val = evaluate_chi(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(col+"\n chi: "+str(np.float32(chi[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel("Frequency", fontsize=10)
plt.hist(x,bins=bins_size,stacked=True,label = label)
plt.legend(prop={'size': 10})
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
c+=1
return plt,COUNTER
# Analysis of variance (ANOVA) is a collection of statistical models used to analyze the differences among group means and their associated procedures (such as "variation" among and between groups)
# In its simplest form, ANOVA provides a statistical test of whether or not the means of several groups are equal, and therefore generalizes the t-test to more than two groups. ANOVAs are useful for comparing (testing) three or more means (groups or variables) for statistical significance.
# A one-way ANOVA is used to compare the means of more than two independent groups. A one-way ANOVA comparing just two groups will give you the same results as the independent t test.
def evaluate_anova(x,y):
F_value,pvalue = f_classif(x,y)
return F_value,pvalue
# In descriptive statistics, a box plot or boxplot is a convenient way of graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.
# Quartile: In descriptive statistics, the quartiles of a ranked set of data values are the three points that divide the data set into four equal groups, each group comprising a quarter of the data
def bivariate_analysis_cont_catg(cont_catg_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
clean_cont_catg_list = clean_str_list(df,cont_catg_list)
if len(clean_str_list(df,[target_name])) == 0 and len(cont_catg_list)>0:
raise ValueError("You seem to have a target variable with string values.")
clean_df = df.dropna()
for col in clean_cont_catg_list:
col_classes =clean_df[col].unique()
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[col]==i][target_name]) for i in col_classes]
y = np.float32(clean_df[target_name])
f_value,p_val = evaluate_anova(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(col+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel(target_name, fontsize=10)
plt.boxplot(x)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
# This function is for the bivariate analysis between categorical vs continuous varibale.Plots box plots.
def bivariate_analysis_catg_cont(catg_cont_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
# No need to remove string varible as they are handled by chi2 function of sklearn.
# clean_catg_cont_list = clean_str_list(df,catg_cont_list)
clean_catg_cont_list = catg_cont_list
clean_df = df.dropna()
for col in clean_catg_cont_list:
col_classes =df[target_name].unique()
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[target_name]==i][col]) for i in col_classes]
y = clean_df[target_name]
f_value,p_val = evaluate_anova(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(target_name+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel(col, fontsize=10)
plt.boxplot(x)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
#returns the total number of subplots to be made.
def total_subplots(df,lst):
clean_df = df.dropna()
total = [len(clean_str_list(clean_df,i)) for i in lst]
return sum(total)
# This function returns new categotical list after removing drop values if in case they are written in both drop and categorical_name list.
def remove_drop_from_catglist(drop,categorical_name):
for col in drop:
if col in categorical_name:
categorical_name.remove(col)
return categorical_name
def plot(data_input,target_name="",categorical_name=[],drop=[],PLOT_COLUMNS_SIZE = 4,bin_size="auto",wspace=0.5,hspace=0.8):
"""
This is the main function to give Bivariate analysis between the target variable and the input features.
Parameters
-----------
data_input : Dataframe
This is the input Dataframe with all data.
target_name : String
The name of the target column.
categorical_name : list
Names of all categorical variable columns with more than 2 classes, to distinguish with the continuous variables.
drop : list
Names of columns to be dropped.
PLOT_COLUMNS_SIZE : int
Number of plots to display vertically in the display window.The row size is adjusted accordingly.
bin_size : int ;default="auto"
Number of bins for the histogram displayed in the categorical vs categorical category.
wspace : int ;default = 0.5
Horizontal padding between subplot on the display window.
hspace : int ;default = 0.5
Vertical padding between subplot on the display window.
-----------
"""
if type(data_input).__name__ == "DataFrame" :
# Column names
columns_name = data_input.columns.values
#To drop user specified columns.
if is_present(columns_name,drop):
data_input = data_input.drop(drop,axis=1)
columns_name = data_input.columns.values
categorical_name = remove_drop_from_catglist(drop,categorical_name)
else:
raise ValueError("Couldn't find it in the input Dataframe!")
if target_name == "":
raise ValueError("Please mention a target variable")
#Checks if the categorical_name are present in the orignal dataframe columns.
categorical_is_present = is_present(columns_name,categorical_name)
target_is_present = is_present(columns_name,[target_name])
if categorical_is_present:
fin_cat_dict,catg_catg_list,cont_cont_list,catg_cont_list,cont_catg_list = get_category(data_input,target_name,categorical_name,columns_name)
#Subplot(Total number of graphs)
total = total_subplots(data_input,[cont_cont_list,catg_catg_list,catg_cont_list,cont_catg_list])
if total < PLOT_COLUMNS_SIZE:
total = PLOT_COLUMNS_SIZE
PLOT_ROW_SIZE = ceil(float(total)/PLOT_COLUMNS_SIZE)
#Call various functions
plot,count = bivariate_analysis_cont_cont(cont_cont_list,data_input,target_name,total,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
plot,count = bivariate_analysis_catg_catg(catg_catg_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,bin_size=bin_size)
plot,count = bivariate_analysis_cont_catg(cont_catg_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
plot,count = bivariate_analysis_catg_cont(catg_cont_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
fig.subplots_adjust(bottom=0.08,left = 0.05,right=0.97,top=0.93,wspace = wspace,hspace = hspace)
plot.show()
else:
raise ValueError("Make sure input data is a Dataframe.")
|
[
"matplotlib.pyplot.boxplot",
"matplotlib.pyplot.hist",
"numpy.corrcoef",
"matplotlib.pyplot.ylabel",
"sklearn.feature_selection.f_classif",
"matplotlib.pyplot.style.use",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"sklearn.feature_selection.chi2",
"matplotlib.pyplot.subplot",
"numpy.float32",
"matplotlib.pyplot.legend"
] |
[((208, 231), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (221, 231), True, 'import matplotlib.pyplot as plt\n'), ((239, 251), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (249, 251), True, 'import matplotlib.pyplot as plt\n'), ((2265, 2279), 'numpy.corrcoef', 'corrcoef', (['x', 'y'], {}), '(x, y)\n', (2273, 2279), False, 'from numpy import corrcoef\n'), ((4005, 4015), 'sklearn.feature_selection.chi2', 'chi2', (['x', 'y'], {}), '(x, y)\n', (4009, 4015), False, 'from sklearn.feature_selection import chi2\n'), ((6159, 6174), 'sklearn.feature_selection.f_classif', 'f_classif', (['x', 'y'], {}), '(x, y)\n', (6168, 6174), False, 'from sklearn.feature_selection import f_classif\n'), ((2942, 2996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['PLOT_ROW_SIZE', 'PLOT_COLUMNS_SIZE', 'COUNTER'], {}), '(PLOT_ROW_SIZE, PLOT_COLUMNS_SIZE, COUNTER)\n', (2953, 2996), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3157), 'numpy.float32', 'np.float32', (['clean_df[target_name]'], {}), '(clean_df[target_name])\n', (3134, 3157), True, 'import numpy as np\n'), ((3317, 3353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['target_name'], {'fontsize': '(10)'}), '(target_name, fontsize=10)\n', (3327, 3353), True, 'import matplotlib.pyplot as plt\n'), ((3362, 3379), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (3373, 3379), True, 'import matplotlib.pyplot as plt\n'), ((4739, 4793), 'matplotlib.pyplot.subplot', 'plt.subplot', (['PLOT_ROW_SIZE', 'PLOT_COLUMNS_SIZE', 'COUNTER'], {}), '(PLOT_ROW_SIZE, PLOT_COLUMNS_SIZE, COUNTER)\n', (4750, 4793), True, 'import matplotlib.pyplot as plt\n'), ((5188, 5224), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {'fontsize': '(10)'}), "('Frequency', fontsize=10)\n", (5198, 5224), True, 'import matplotlib.pyplot as plt\n'), ((5233, 5287), 'matplotlib.pyplot.hist', 'plt.hist', (['x'], {'bins': 'bins_size', 'stacked': '(True)', 'label': 'label'}), '(x, bins=bins_size, stacked=True, label=label)\n', (5241, 5287), True, 'import matplotlib.pyplot as plt\n'), ((5295, 5324), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': "{'size': 10}"}), "(prop={'size': 10})\n", (5305, 5324), True, 'import matplotlib.pyplot as plt\n'), ((7327, 7381), 'matplotlib.pyplot.subplot', 'plt.subplot', (['PLOT_ROW_SIZE', 'PLOT_COLUMNS_SIZE', 'COUNTER'], {}), '(PLOT_ROW_SIZE, PLOT_COLUMNS_SIZE, COUNTER)\n', (7338, 7381), True, 'import matplotlib.pyplot as plt\n'), ((7568, 7601), 'numpy.float32', 'np.float32', (['clean_df[target_name]'], {}), '(clean_df[target_name])\n', (7578, 7601), True, 'import numpy as np\n'), ((7799, 7835), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['target_name'], {'fontsize': '(10)'}), '(target_name, fontsize=10)\n', (7809, 7835), True, 'import matplotlib.pyplot as plt\n'), ((7844, 7858), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['x'], {}), '(x)\n', (7855, 7858), True, 'import matplotlib.pyplot as plt\n'), ((8617, 8671), 'matplotlib.pyplot.subplot', 'plt.subplot', (['PLOT_ROW_SIZE', 'PLOT_COLUMNS_SIZE', 'COUNTER'], {}), '(PLOT_ROW_SIZE, PLOT_COLUMNS_SIZE, COUNTER)\n', (8628, 8671), True, 'import matplotlib.pyplot as plt\n'), ((9085, 9113), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['col'], {'fontsize': '(10)'}), '(col, fontsize=10)\n', (9095, 9113), True, 'import matplotlib.pyplot as plt\n'), ((9122, 9136), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['x'], {}), '(x)\n', (9133, 9136), True, 'import matplotlib.pyplot as plt\n'), ((4896, 4947), 'numpy.array', 'np.array', (['clean_df[clean_df[target_name] == i][col]'], {}), '(clean_df[clean_df[target_name] == i][col])\n', (4904, 4947), True, 'import numpy as np\n'), ((7484, 7535), 'numpy.array', 'np.array', (['clean_df[clean_df[col] == i][target_name]'], {}), '(clean_df[clean_df[col] == i][target_name])\n', (7492, 7535), True, 'import numpy as np\n'), ((8774, 8825), 'numpy.array', 'np.array', (['clean_df[clean_df[target_name] == i][col]'], {}), '(clean_df[clean_df[target_name] == i][col])\n', (8782, 8825), True, 'import numpy as np\n'), ((3055, 3070), 'numpy.float32', 'np.float32', (['std'], {}), '(std)\n', (3065, 3070), True, 'import numpy as np\n'), ((3271, 3293), 'numpy.float32', 'np.float32', (['corr[0][1]'], {}), '(corr[0][1])\n', (3281, 3293), True, 'import numpy as np\n'), ((4852, 4867), 'numpy.float32', 'np.float32', (['std'], {}), '(std)\n', (4862, 4867), True, 'import numpy as np\n'), ((5039, 5062), 'numpy.array', 'np.array', (['clean_df[col]'], {}), '(clean_df[col])\n', (5047, 5062), True, 'import numpy as np\n'), ((7440, 7455), 'numpy.float32', 'np.float32', (['std'], {}), '(std)\n', (7450, 7455), True, 'import numpy as np\n'), ((7642, 7665), 'numpy.array', 'np.array', (['clean_df[col]'], {}), '(clean_df[col])\n', (7650, 7665), True, 'import numpy as np\n'), ((8730, 8745), 'numpy.float32', 'np.float32', (['std'], {}), '(std)\n', (8740, 8745), True, 'import numpy as np\n'), ((8920, 8943), 'numpy.array', 'np.array', (['clean_df[col]'], {}), '(clean_df[col])\n', (8928, 8943), True, 'import numpy as np\n'), ((3025, 3041), 'numpy.float32', 'np.float32', (['mean'], {}), '(mean)\n', (3035, 3041), True, 'import numpy as np\n'), ((4822, 4838), 'numpy.float32', 'np.float32', (['mean'], {}), '(mean)\n', (4832, 4838), True, 'import numpy as np\n'), ((5119, 5137), 'numpy.float32', 'np.float32', (['chi[0]'], {}), '(chi[0])\n', (5129, 5137), True, 'import numpy as np\n'), ((7410, 7426), 'numpy.float32', 'np.float32', (['mean'], {}), '(mean)\n', (7420, 7426), True, 'import numpy as np\n'), ((7726, 7748), 'numpy.float32', 'np.float32', (['f_value[0]'], {}), '(f_value[0])\n', (7736, 7748), True, 'import numpy as np\n'), ((8700, 8716), 'numpy.float32', 'np.float32', (['mean'], {}), '(mean)\n', (8710, 8716), True, 'import numpy as np\n'), ((9012, 9034), 'numpy.float32', 'np.float32', (['f_value[0]'], {}), '(f_value[0])\n', (9022, 9034), True, 'import numpy as np\n')]
|
import os
import numpy as np
def save_samples_truncted_prob(fname, points, prob):
'''
Save the visualization of sampling to a ply file.
Red points represent positive predictions.
Green points represent negative predictions.
Parameters
fname: File name to save
points: [N, 3] array of points
prob: [1, N] array of predictions in the range [0~1]
Return:
None
'''
prob = prob.transpose(0, 1).detach().numpy()
r = (prob > 0.5).reshape([-1, 1]) * 255
g = (prob < 0.5).reshape([-1, 1]) * 255
b = np.zeros(r.shape)
to_save = np.concatenate([points, r, g, b,prob], axis=-1)
return np.savetxt(fname,
to_save,
fmt='%.6f %.6f %.6f %d %d %d %.6f',
comments='',
header=(
'ply\nformat ascii 1.0\nelement vertex {:d}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty float prob\nend_header').format(
points.shape[0])
)
def save_gallery(preds,samples,names,gallery_id,epoch):
pred = preds[0].cpu()
sample = samples[0].transpose(0, 1).cpu()
name = names[0]
save_gallery_path = os.path.join(gallery_id,name.split('/')[-2],"epoch_{:03d}".format(epoch))
os.makedirs(save_gallery_path,exist_ok=True)
path = os.path.join(save_gallery_path,'pred.ply')
save_samples_truncted_prob(path,sample,pred)
|
[
"os.makedirs",
"numpy.zeros",
"os.path.join",
"numpy.concatenate"
] |
[((568, 585), 'numpy.zeros', 'np.zeros', (['r.shape'], {}), '(r.shape)\n', (576, 585), True, 'import numpy as np\n'), ((601, 649), 'numpy.concatenate', 'np.concatenate', (['[points, r, g, b, prob]'], {'axis': '(-1)'}), '([points, r, g, b, prob], axis=-1)\n', (615, 649), True, 'import numpy as np\n'), ((1380, 1425), 'os.makedirs', 'os.makedirs', (['save_gallery_path'], {'exist_ok': '(True)'}), '(save_gallery_path, exist_ok=True)\n', (1391, 1425), False, 'import os\n'), ((1436, 1479), 'os.path.join', 'os.path.join', (['save_gallery_path', '"""pred.ply"""'], {}), "(save_gallery_path, 'pred.ply')\n", (1448, 1479), False, 'import os\n')]
|
from bokeh.application.handlers import FunctionHandler, DirectoryHandler
from bokeh.application import Application
import numpy as np
import holoviews as hv
import boto3
from PIL import Image
import holoviews.plotting.bokeh # important
from bokeh.io import show, curdoc
from bokeh.layouts import layout
import io
from holoviews.operation.datashader import datashade
from bokeh.models import Slider, Button
from marshmallow import Schema, fields, INCLUDE
renderer = hv.renderer('bokeh').instance(mode='server')
class BokehImageAppArgsSchema(Schema):
bucket = fields.List(fields.String())
key = fields.List(fields.String())
height = fields.List(fields.Integer())
width = fields.List(fields.Integer())
# Define valid function for FunctionHandler
# when deploying as script, simply attach to curdoc
def modify_doc(doc):
args = doc.session_context.request.arguments
args_schema = BokehImageAppArgsSchema()
loaded_args = args_schema.load(args, unknown=INCLUDE)
bucket = loaded_args['bucket'][0]
key = loaded_args['key'][0]
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket)
object = bucket.Object(key)
file_stream = io.BytesIO()
object.download_fileobj(file_stream)
pil_image = Image.open(file_stream)
hv_img_plot = hv.Image(np.asarray(pil_image)).options(
height=loaded_args['height'][0], width=loaded_args['width'][0])
# Create HoloViews plot and attach the document
hvplot = renderer.get_plot(hv_img_plot, doc)
# Combine the holoviews plot and widgets in a layout
plot = layout([
[hvplot.state]], sizing_mode='fixed')
doc.add_root(plot)
return doc
bokeh_image_app = Application(FunctionHandler(modify_doc))
|
[
"bokeh.layouts.layout",
"PIL.Image.open",
"holoviews.renderer",
"bokeh.application.handlers.FunctionHandler",
"io.BytesIO",
"numpy.asarray",
"boto3.resource",
"marshmallow.fields.String",
"marshmallow.fields.Integer"
] |
[((1073, 1093), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (1087, 1093), False, 'import boto3\n'), ((1176, 1188), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1186, 1188), False, 'import io\n'), ((1246, 1269), 'PIL.Image.open', 'Image.open', (['file_stream'], {}), '(file_stream)\n', (1256, 1269), False, 'from PIL import Image\n'), ((1572, 1617), 'bokeh.layouts.layout', 'layout', (['[[hvplot.state]]'], {'sizing_mode': '"""fixed"""'}), "([[hvplot.state]], sizing_mode='fixed')\n", (1578, 1617), False, 'from bokeh.layouts import layout\n'), ((1698, 1725), 'bokeh.application.handlers.FunctionHandler', 'FunctionHandler', (['modify_doc'], {}), '(modify_doc)\n', (1713, 1725), False, 'from bokeh.application.handlers import FunctionHandler, DirectoryHandler\n'), ((469, 489), 'holoviews.renderer', 'hv.renderer', (['"""bokeh"""'], {}), "('bokeh')\n", (480, 489), True, 'import holoviews as hv\n'), ((580, 595), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (593, 595), False, 'from marshmallow import Schema, fields, INCLUDE\n'), ((619, 634), 'marshmallow.fields.String', 'fields.String', ([], {}), '()\n', (632, 634), False, 'from marshmallow import Schema, fields, INCLUDE\n'), ((661, 677), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (675, 677), False, 'from marshmallow import Schema, fields, INCLUDE\n'), ((703, 719), 'marshmallow.fields.Integer', 'fields.Integer', ([], {}), '()\n', (717, 719), False, 'from marshmallow import Schema, fields, INCLUDE\n'), ((1298, 1319), 'numpy.asarray', 'np.asarray', (['pil_image'], {}), '(pil_image)\n', (1308, 1319), True, 'import numpy as np\n')]
|
#import hickle as hkl
import numpy as np
from keras import backend as K
from keras.preprocessing.image import Iterator
import matplotlib.pyplot as plt
# Defines one class: SequenceGenerator. a subclass of Iterator
# ====================================
# Called from kitti_train.py and kitti_evaluate.py.
# Class SequenceGenerator is a subclass of Iterator.
# Data generator that creates sequences for input into PredNet.
class SequenceGenerator(Iterator): # Iterator: can be iterated over in for-loop
def __init__(self, data_file, source_file, nt,
batch_size=8, shuffle=False, seed=None,
output_mode='error', sequence_start_mode='all', N_seq=None,
data_format=K.image_data_format()):
print("\ndata_utils_RPB.py: Instantiating sequence generator:\n")
# LOAD DATA FILE
print("data_utils_RBP.py: Data file: \n", data_file)
self.X = np.load(data_file) # X will be like (n_images, nb_cols, nb_rows, nb_channels)
#self.X =hkl.transpose(self.X, (0, 3, 2, 1))
# ===============================================================
# Added statements to print out two consecutive frames. ASM
print("data_utils.py: self.X.shape\n", self.X.shape) # e.g., (41396, 128, 160, 3)
# print("1st row:\n", self.X[0,:,:,:]) # will print the raw array
# Print 1st two consecutive frames
# NOTE: the video sequence seems to be stored in reverse order!!! Is this a bug?
# 1. When called from "kitti_train.py" the frames for X_train.py and X_val.py seem
# to be stored in reverse order.
# 2. When called from "kitti_evaluate.py" the frames for X_test.py seem to be
# in correct order.
# 3. Need to make sure that the source files properly match the data files.
my_temp = np.array(self.X[0,:,:,:], dtype = int, copy = True) # convert from float to int
plt.imshow(my_temp) # look at 1st image
plt.show()
my_temp = np.array(self.X[1,:,:,:], dtype = int, copy = True) # convert from float to int
plt.imshow(my_temp) # look at 2nd image
plt.show()
# LOAD SOURCE FILE
print("data_utils.py: Source file: \n", source_file)
self.sources = np.load(source_file) # Labels in b'string' format
# Above: source for each image so when creating sequences can assure that consecutive
# frames are from same video
print("data_utils.py: self.sources.shape\n", self.sources.shape) # e.g., (41396,)
print(self.sources[0]) # should print a byte literal representation of a string
# End of print statements
# ===============================================================
# SET OTHER PARAMS
self.nt = nt # 10
self.batch_size = batch_size # 4 if called from "kitti_train.py"
self.data_format = data_format # K.image_data_format()
assert sequence_start_mode in {'all', 'unique'}, 'sequence_start_mode must be in {all, unique}'
self.sequence_start_mode = sequence_start_mode # default is 'all'
assert output_mode in {'error', 'prediction'}, 'output_mode must be in {error, prediction}'
self.output_mode = output_mode # default is 'error'
if self.data_format == 'channels_first': # tensorflow data format is 'channels_last'
self.X = np.transpose(self.X, (0, 3, 1, 2))
self.im_shape = self.X[0].shape # (128, 160, 3) I think.
if self.sequence_start_mode == 'all': # allow for any possible sequence, starting from any frame
self.possible_starts = np.array([i for i in range(self.X.shape[0] - self.nt) if self.sources[i] == self.sources[i + self.nt - 1]])
print("data_utils.py: possible_starts all: ", self.possible_starts)
elif self.sequence_start_mode == 'unique': #create sequences where each unique frame is in at most one sequence
curr_location = 0
possible_starts = []
while curr_location < self.X.shape[0] - self.nt + 1:
if self.sources[curr_location] == self.sources[curr_location + self.nt - 1]:
possible_starts.append(curr_location)
curr_location += self.nt
else:
curr_location += 1
self.possible_starts = possible_starts
print("data_utils.py: possible_starts unique: ", self.possible_starts)
if shuffle:
self.possible_starts = np.random.permutation(self.possible_starts)
if N_seq is not None and len(self.possible_starts) > N_seq: # select a subset of sequences if want to
self.possible_starts = self.possible_starts[:N_seq]
self.N_sequences = len(self.possible_starts)
super(SequenceGenerator, self).__init__(len(self.possible_starts), batch_size, shuffle, seed)
# End of __init__()
def __getitem__(self, null):
return self.next()
def next(self): # Returns a batch of x and y data
with self.lock:
current_index = (self.batch_index * self.batch_size) % self.n
index_array, current_batch_size = next(self.index_generator), self.batch_size
batch_x = np.zeros((current_batch_size, self.nt) + self.im_shape, np.float32)
for i, idx in enumerate(index_array):
idx = self.possible_starts[idx]
batch_x[i] = self.preprocess(self.X[idx:idx+self.nt])
if self.output_mode == 'error': # model outputs errors, so y should be zeros
batch_y = np.zeros(current_batch_size, np.float32)
elif self.output_mode == 'prediction': # output actual pixels
batch_y = batch_x
return batch_x, batch_y # inputs, targets
def preprocess(self, X):
return X.astype(np.float32) / 255 # maps to [0, 1]
# Returns 10 frames
def create_all(self):
# Below: plus operator is concatentation. Initialize multidim array of float32 zeros w/ specified shape
X_all = np.zeros((self.N_sequences, self.nt) + self.im_shape, np.float32)
for i, idx in enumerate(self.possible_starts):
X_all[i] = self.preprocess(self.X[idx:idx+self.nt]) # map [0,255] to [0,1] for 10 frames
return X_all
|
[
"matplotlib.pyplot.imshow",
"numpy.transpose",
"keras.backend.image_data_format",
"numpy.random.permutation",
"numpy.array",
"numpy.zeros",
"numpy.load",
"matplotlib.pyplot.show"
] |
[((718, 739), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (737, 739), True, 'from keras import backend as K\n'), ((928, 946), 'numpy.load', 'np.load', (['data_file'], {}), '(data_file)\n', (935, 946), True, 'import numpy as np\n'), ((1876, 1926), 'numpy.array', 'np.array', (['self.X[0, :, :, :]'], {'dtype': 'int', 'copy': '(True)'}), '(self.X[0, :, :, :], dtype=int, copy=True)\n', (1884, 1926), True, 'import numpy as np\n'), ((1964, 1983), 'matplotlib.pyplot.imshow', 'plt.imshow', (['my_temp'], {}), '(my_temp)\n', (1974, 1983), True, 'import matplotlib.pyplot as plt\n'), ((2012, 2022), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2020, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2041, 2091), 'numpy.array', 'np.array', (['self.X[1, :, :, :]'], {'dtype': 'int', 'copy': '(True)'}), '(self.X[1, :, :, :], dtype=int, copy=True)\n', (2049, 2091), True, 'import numpy as np\n'), ((2129, 2148), 'matplotlib.pyplot.imshow', 'plt.imshow', (['my_temp'], {}), '(my_temp)\n', (2139, 2148), True, 'import matplotlib.pyplot as plt\n'), ((2177, 2187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2185, 2187), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2328), 'numpy.load', 'np.load', (['source_file'], {}), '(source_file)\n', (2315, 2328), True, 'import numpy as np\n'), ((5287, 5354), 'numpy.zeros', 'np.zeros', (['((current_batch_size, self.nt) + self.im_shape)', 'np.float32'], {}), '((current_batch_size, self.nt) + self.im_shape, np.float32)\n', (5295, 5354), True, 'import numpy as np\n'), ((6080, 6145), 'numpy.zeros', 'np.zeros', (['((self.N_sequences, self.nt) + self.im_shape)', 'np.float32'], {}), '((self.N_sequences, self.nt) + self.im_shape, np.float32)\n', (6088, 6145), True, 'import numpy as np\n'), ((3439, 3473), 'numpy.transpose', 'np.transpose', (['self.X', '(0, 3, 1, 2)'], {}), '(self.X, (0, 3, 1, 2))\n', (3451, 3473), True, 'import numpy as np\n'), ((4567, 4610), 'numpy.random.permutation', 'np.random.permutation', (['self.possible_starts'], {}), '(self.possible_starts)\n', (4588, 4610), True, 'import numpy as np\n'), ((5619, 5659), 'numpy.zeros', 'np.zeros', (['current_batch_size', 'np.float32'], {}), '(current_batch_size, np.float32)\n', (5627, 5659), True, 'import numpy as np\n')]
|
import psycopg2
from datetime import datetime
from psycopg2 import sql
from est.fltr import county_return
from est.db.cur import con_cur
import numpy as np
import pandas as pd
def comp_find(est, a, b):
temp1 = est
temp2 = np.array(temp1[0])
county = temp2[0].strip()
state = temp2[1].strip()
cur, con = con_cur()
cur.execute("""
SELECT comp_st, comp_cty, comp_lv, comp_perc FROM est_LandValue(%s, %s, %s, %s)
""", (a, b, state, county))
comp_states = cur.fetchall()
con.close()
return(comp_states)
def find_comps(state, county, radius, population):
cur, con = con_cur()
cur.execute("""
SELECT comp_st, comp_cty, comp_lv, comp_perc FROM est_LandValue(%s, %s, %s, %s)
""", (radius, population, state, county))
comp_states = pd.DataFrame(cur.fetchall(), columns = ['State', 'County', 'Land Value', 'Perc Land Value'])
con.close()
return(comp_states)
|
[
"numpy.array",
"est.db.cur.con_cur"
] |
[((231, 249), 'numpy.array', 'np.array', (['temp1[0]'], {}), '(temp1[0])\n', (239, 249), True, 'import numpy as np\n'), ((325, 334), 'est.db.cur.con_cur', 'con_cur', ([], {}), '()\n', (332, 334), False, 'from est.db.cur import con_cur\n'), ((632, 641), 'est.db.cur.con_cur', 'con_cur', ([], {}), '()\n', (639, 641), False, 'from est.db.cur import con_cur\n')]
|
from aiohttp import web
import socketio
import numpy as np
def load_eigenvector(k,d):
vec_path = "eigenvectors/eigen_k=" + str(k) + ",d=" + str(d) + ".npy"
eigenvector_np = np.load(vec_path)
eigenvector_str = ""
for x in np.nditer(eigenvector_np):
eigenvector_str += str(x) + " "
# print()
# print(eigenvector_str)
return eigenvector_str
# creates a new Async Socket IO Server
sio = socketio.AsyncServer(cors_allowed_origins="*")
# Creates a new Aiohttp Web Application
app = web.Application()
# Binds our Socket.IO server to our Web App
# instance
sio.attach(app)
# we can define aiohttp endpoints just as we normally
# would with no change
async def index(request):
with open('index.html') as f:
return web.Response(text=f.read(), content_type='text/html')
async def test(request):
with open('test.js') as f:
return web.Response(text=f.read(), content_type='text/js')
# If we wanted to create a new websocket endpoint,
# use this decorator, passing in the name of the
# event we wish to listen out for
@sio.on('hi')
async def print_message(sid, message, d_JS):
k = message
d = d_JS
# print(k)
# print(d)
messageToJS = load_eigenvector(k,d)
# print()
# print(messageToJS)
# print()
# print(messageToJS)
# When we receive a new event of type
# 'message' through a socket.io connection
# we print the socket ID and the message
# print("Socket ID: " , sid)
# print(message) #message is the value sent from the HTML
await sio.emit('message', messageToJS)
# notice it has to be of type 'message' and then pass the
# value to send to html doc
# @sio.on('d')
# async def get_d_val(sid, message):
# d = message
# We bind our aiohttp endpoint to our app
# router
app.router.add_get('/', index)
app.router.add_get('/test.js', test)
# We kick off our server
if __name__ == '__main__':
web.run_app(app)
|
[
"aiohttp.web.run_app",
"numpy.nditer",
"aiohttp.web.Application",
"socketio.AsyncServer",
"numpy.load"
] |
[((423, 469), 'socketio.AsyncServer', 'socketio.AsyncServer', ([], {'cors_allowed_origins': '"""*"""'}), "(cors_allowed_origins='*')\n", (443, 469), False, 'import socketio\n'), ((516, 533), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (531, 533), False, 'from aiohttp import web\n'), ((182, 199), 'numpy.load', 'np.load', (['vec_path'], {}), '(vec_path)\n', (189, 199), True, 'import numpy as np\n'), ((238, 263), 'numpy.nditer', 'np.nditer', (['eigenvector_np'], {}), '(eigenvector_np)\n', (247, 263), True, 'import numpy as np\n'), ((1924, 1940), 'aiohttp.web.run_app', 'web.run_app', (['app'], {}), '(app)\n', (1935, 1940), False, 'from aiohttp import web\n')]
|
#!/usr/bin/env python
# pylint: disable=invalid-name,ungrouped-imports
import logging
import math
import os
from importlib import import_module
import coloredlogs
import numpy as np
import tensorflow as tf
from scipy.misc import imread
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import (array_ops, control_flow_ops, functional_ops,
math_ops)
def get_hand_segmentation_for_image(image_file, hand_dir):
return "{}/{}".format(hand_dir, os.path.basename(image_file).replace("image", "hand"))
def get_patho_segmentation_for_image(image_file, patho_dir):
return "{}/{}".format(patho_dir, os.path.basename(image_file).replace("image", "patho"))
def get_combined_segmentation_for_image(image_file, combined_dir):
return "{}/{}".format(combined_dir, os.path.basename(image_file).replace("image", "combined"))
image_subdir = "image"
hand_subdir = "hand"
patho_subdir = "patho"
combined_subdir = "combined"
data_subdirs = {
image_subdir: image_subdir,
hand_subdir: hand_subdir,
patho_subdir: patho_subdir,
combined_subdir: combined_subdir
}
image_transformation_functions = {
image_subdir: lambda x, y: x,
hand_subdir: get_hand_segmentation_for_image,
combined_subdir: get_combined_segmentation_for_image,
patho_subdir: get_patho_segmentation_for_image
}
def is_valid_file(file_name, pattern):
return (not pattern or pattern in file_name) and (file_name.endswith(".png") or file_name.endswith(".jpg"))
def prepare_images(images, is_colored):
tf.logging.info("Preparing {} images".format(len(images)))
# normalize the images to the range of [-1, 1]
normalized_images = np.array(images, dtype=np.float32) / 127.5 - 1
return normalized_images if is_colored else \
normalized_images.reshape(*normalized_images.shape, 1) # add dimension for "color depth"
def segmentation_score(output, ground_truth):
assert output.shape[0] == ground_truth.shape[0]
predicted = tf.cast(output >= 0, tf.uint8)
actual = tf.cast(ground_truth >= 0, tf.uint8)
tp = tf.count_nonzero(predicted * actual)
# tn = tf.count_nonzero((predicted - 1) * (actual - 1))
fp = tf.count_nonzero(predicted * (actual - 1))
fn = tf.count_nonzero((predicted - 1) * actual)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
return 2 * precision * recall / (precision + recall)
def logistic(logit):
exp = np.exp(-logit) if isinstance(logit, np.ndarray) else tf.exp(-logit)
return 1 / (1 + exp)
# since it's unvailable in 1.12.0, this is copied from:
# https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
def kernel_classifier_distance_and_std_from_activations(real_activations,
generated_activations,
max_block_size=1024,
dtype=None):
# pylint: disable=no-member
"""Kernel "classifier" distance for evaluating a generative model.
This methods computes the kernel classifier distance from activations of
real images and generated images. This can be used independently of the
kernel_classifier_distance() method, especially in the case of using large
batches during evaluation where we would like to precompute all of the
activations before computing the classifier distance, or if we want to
compute multiple metrics based on the same images. It also returns a rough
estimate of the standard error of the estimator.
This technique is described in detail in https://arxiv.org/abs/1801.01401.
Given two distributions P and Q of activations, this function calculates
E_{X, X' ~ P}[k(X, X')] + E_{Y, Y' ~ Q}[k(Y, Y')]
- 2 E_{X ~ P, Y ~ Q}[k(X, Y)]
where k is the polynomial kernel
k(x, y) = ( x^T y / dimension + 1 )^3.
This captures how different the distributions of real and generated images'
visual features are. Like the Frechet distance (and unlike the Inception
score), this is a true distance and incorporates information about the
target images. Unlike the Frechet score, this function computes an
*unbiased* and asymptotically normal estimator, which makes comparing
estimates across models much more intuitive.
The estimator used takes time quadratic in max_block_size. Larger values of
max_block_size will decrease the variance of the estimator but increase the
computational cost. This differs slightly from the estimator used by the
original paper; it is the block estimator of https://arxiv.org/abs/1307.1954.
The estimate of the standard error will also be more reliable when there are
more blocks, i.e. when max_block_size is smaller.
NOTE: the blocking code assumes that real_activations and
generated_activations are both in random order. If either is sorted in a
meaningful order, the estimator will behave poorly.
Args:
real_activations: 2D Tensor containing activations of real data. Shape is
[batch_size, activation_size].
generated_activations: 2D Tensor containing activations of generated data.
Shape is [batch_size, activation_size].
max_block_size: integer, default 1024. The distance estimator splits samples
into blocks for computational efficiency. Larger values are more
computationally expensive but decrease the variance of the distance
estimate. Having a smaller block size also gives a better estimate of the
standard error.
dtype: if not None, coerce activations to this dtype before computations.
Returns:
The Kernel Inception Distance. A floating-point scalar of the same type
as the output of the activations.
An estimate of the standard error of the distance estimator (a scalar of
the same type).
"""
real_activations.shape.assert_has_rank(2)
generated_activations.shape.assert_has_rank(2)
real_activations.shape[1].assert_is_compatible_with(
generated_activations.shape[1])
if dtype is None:
dtype = real_activations.dtype
assert generated_activations.dtype == dtype
else:
real_activations = math_ops.cast(real_activations, dtype)
generated_activations = math_ops.cast(generated_activations, dtype)
# Figure out how to split the activations into blocks of approximately
# equal size, with none larger than max_block_size.
n_r = array_ops.shape(real_activations)[0]
n_g = array_ops.shape(generated_activations)[0]
n_bigger = math_ops.maximum(n_r, n_g)
n_blocks = math_ops.to_int32(math_ops.ceil(n_bigger / max_block_size))
v_r = n_r // n_blocks
v_g = n_g // n_blocks
n_plusone_r = n_r - v_r * n_blocks
n_plusone_g = n_g - v_g * n_blocks
sizes_r = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_r], v_r),
array_ops.fill([n_plusone_r], v_r + 1),
], 0)
sizes_g = array_ops.concat([
array_ops.fill([n_blocks - n_plusone_g], v_g),
array_ops.fill([n_plusone_g], v_g + 1),
], 0)
zero = array_ops.zeros([1], dtype=dtypes.int32)
inds_r = array_ops.concat([zero, math_ops.cumsum(sizes_r)], 0)
inds_g = array_ops.concat([zero, math_ops.cumsum(sizes_g)], 0)
dim = math_ops.cast(real_activations.shape[1], dtype)
def compute_kid_block(i):
'Compute the ith block of the KID estimate.'
r_s = inds_r[i]
r_e = inds_r[i + 1]
r = real_activations[r_s:r_e]
m = math_ops.cast(r_e - r_s, dtype)
g_s = inds_g[i]
g_e = inds_g[i + 1]
g = generated_activations[g_s:g_e]
n = math_ops.cast(g_e - g_s, dtype)
k_rr = (math_ops.matmul(r, r, transpose_b=True) / dim + 1)**3
k_rg = (math_ops.matmul(r, g, transpose_b=True) / dim + 1)**3
k_gg = (math_ops.matmul(g, g, transpose_b=True) / dim + 1)**3
return (-2 * math_ops.reduce_mean(k_rg) +
(math_ops.reduce_sum(k_rr) - math_ops.trace(k_rr)) / (m * (m - 1)) +
(math_ops.reduce_sum(k_gg) - math_ops.trace(k_gg)) / (n * (n - 1)))
ests = functional_ops.map_fn(
compute_kid_block, math_ops.range(n_blocks), dtype=dtype, back_prop=False)
mn = math_ops.reduce_mean(ests)
# nn_impl.moments doesn't use the Bessel correction, which we want here
n_blocks_ = math_ops.cast(n_blocks, dtype)
var = control_flow_ops.cond(
math_ops.less_equal(n_blocks, 1),
lambda: array_ops.constant(float('nan'), dtype=dtype),
lambda: math_ops.reduce_sum(math_ops.square(ests - mn)) / (n_blocks_ - 1))
return mn, math_ops.sqrt(var / n_blocks_)
def load_model(config):
module_names = [
"noise_to_image_models",
"image_to_image_models",
"deep_image_to_image_models",
"deep_noise_to_image_models",
"deep_noise_to_image_models",
"deep_noise_to_square_image_models",
"deep_image_super_resolution_models"
]
for module_name in module_names:
try:
return load_class_from_module(module_name, config.model_name)(config)
except AttributeError:
pass
assert False, "No model with name '{}' found".format(config.model_name)
def load_checkpoint(config, checkpoint_number=None, generator=None, discriminator=None,
first_generator=None, second_generator=None, first_discriminator=None, second_discriminator=None):
# pylint: disable=too-many-arguments
tf.logging.info("Loading model from '{}', checkpoint {}".format(config.checkpoint_dir, checkpoint_number))
models = {
"generator": generator,
"discriminator": discriminator,
"first_generator": first_generator,
"first_discriminator": first_discriminator,
"second_generator": second_generator,
"second_discriminator": second_discriminator
}
models = {key: models[key] for key in models if models[key]}
checkpoint = tf.train.Checkpoint(**models)
checkpoint_to_restore = "{}/ckpt-{}".format(config.checkpoint_dir, checkpoint_number) \
if checkpoint_number else tf.train.latest_checkpoint(config.checkpoint_dir)
checkpoint.restore(checkpoint_to_restore)
def load_image_names(data_dir, pattern=None):
image_dir = os.path.join("data", data_dir, image_subdir)
tf.logging.info("Loading image names from '{}'{}".format(
image_dir, " matching pattern '{}'".format(pattern) if pattern else ""))
return sorted([os.path.join(image_dir, file_name) for file_name in os.listdir(image_dir) if is_valid_file(file_name, pattern)])
def augment_images(images, original, flip_lr, flip_ud):
assert isinstance(images[0], (np.ndarray, tf.Tensor))
if not flip_lr and not flip_ud:
assert original
return images
augmented_images = []
if flip_lr:
tf.logging.info("Adding L-R-flipped images")
if flip_ud:
tf.logging.info("Adding U-D-flipped images")
for image in images:
if original:
augmented_images.append(image)
if flip_lr:
augmented_images.append(np.fliplr(image))
if flip_ud:
augmented_images.append(np.flipud(image))
if flip_lr and flip_ud:
augmented_images.append(np.flipud(np.fliplr(image)))
return augmented_images
def load_images(image_names, data_dir, image_type, original=True, flip_lr=False, flip_ud=False):
image_dir = os.path.join("data", data_dir, data_subdirs[image_type])
tf.logging.info("Loading {} images from '{}'".format(len(image_names), image_dir))
is_colored = image_type == "image"
get_file_name = lambda x: image_transformation_functions[image_type](x, image_dir)
return prepare_images(
augment_images(
[imread(get_file_name(file_name), mode="RGB" if is_colored else "L") for file_name in image_names],
original, flip_lr, flip_ud),
is_colored)
def configure_logging():
tf.logging.set_verbosity(tf.logging.INFO)
coloredlogs.install(level="INFO")
coloredlogs.DEFAULT_LEVEL_STYLES = {
"debug": {"color": "white", "bold": False},
"info": {"color": "white", "bold": True},
"warning": {"color": "yellow", "bold": True},
"error": {"color": "red", "bold": True},
"fatal": {"color": "magenta", "bold": True},
}
logger = logging.getLogger("tensorflow")
log_format = "%(asctime)s %(levelname)s %(message)s"
formatter = coloredlogs.ColoredFormatter(log_format)
for handler in logger.handlers:
handler.setFormatter(formatter)
logger.propagate = False
def get_memory_usage_string():
used = tf.contrib.memory_stats.BytesInUse()
total = tf.contrib.memory_stats.BytesLimit()
peak = tf.contrib.memory_stats.MaxBytesInUse()
return "{:.1f}/{:.1f}GB ({:.1f}%); peak: {:.1f}GB ({:.1f}%)".format(
used/1e3**3, total/1e3**3, 100.0*used/total, peak/1e3**3, 100.0*peak/total)
def load_class_from_module(module_name, class_name):
return getattr(import_module(module_name, class_name), class_name)
def flatten(list_of_lists):
return [item for sublist in list_of_lists for item in sublist]
def format_human(number, digits=3):
unit = 1000
if number < unit:
return str(number)
magnitude = int(math.log(number) / math.log(unit))
pre = "kMGTPE"[magnitude-1]
scaled_number = number / math.pow(unit, magnitude)
if scaled_number == int(scaled_number):
scaled_number = int(scaled_number)
else:
scaled_number = round(scaled_number, digits)
return "{}{}".format(scaled_number, pre)
def slerp(val, low, high):
# https://github.com/dribnet/plat/blob/master/plat/interpolate.py
"""Spherical interpolation. val has a range of 0 to 1."""
if val <= 0:
return low
if val >= 1:
return high
if np.allclose(low, high):
return low
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
def truncate_input(values, threshold):
tf.logging.debug("Range before truncating: {} - {}".format(tf.reduce_min(values), tf.reduce_max(values)))
def my_elementwise_func(x):
if abs(x) < threshold:
return x
while abs(x) >= threshold:
x = tf.random_normal((1,))[0]
return x
def recursive_map(inputs):
if len(inputs.shape): # pylint: disable=len-as-condition
return tf.map_fn(recursive_map, inputs)
return my_elementwise_func(inputs)
values = recursive_map(values)
tf.logging.debug("Range after truncating: {} - {}".format(tf.reduce_min(values), tf.reduce_max(values)))
return values
|
[
"logging.getLogger",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.train.Checkpoint",
"tensorflow.logging.set_verbosity",
"math.log",
"numpy.array",
"tensorflow.python.ops.math_ops.cumsum",
"numpy.sin",
"tensorflow.contrib.memory_stats.BytesInUse",
"tensorflow.python.ops.math_ops.range",
"tensorflow.cast",
"numpy.linalg.norm",
"tensorflow.python.ops.math_ops.sqrt",
"tensorflow.reduce_min",
"os.listdir",
"tensorflow.random_normal",
"tensorflow.count_nonzero",
"tensorflow.python.ops.math_ops.trace",
"tensorflow.contrib.memory_stats.BytesLimit",
"numpy.exp",
"tensorflow.python.ops.array_ops.fill",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.square",
"numpy.allclose",
"tensorflow.python.ops.math_ops.maximum",
"importlib.import_module",
"numpy.flipud",
"numpy.fliplr",
"tensorflow.python.ops.math_ops.less_equal",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.reduce_max",
"tensorflow.train.latest_checkpoint",
"tensorflow.contrib.memory_stats.MaxBytesInUse",
"tensorflow.python.ops.math_ops.ceil",
"coloredlogs.install",
"math.pow",
"tensorflow.logging.info",
"os.path.join",
"os.path.basename",
"tensorflow.map_fn",
"tensorflow.python.ops.math_ops.reduce_mean",
"coloredlogs.ColoredFormatter",
"tensorflow.exp"
] |
[((1988, 2018), 'tensorflow.cast', 'tf.cast', (['(output >= 0)', 'tf.uint8'], {}), '(output >= 0, tf.uint8)\n', (1995, 2018), True, 'import tensorflow as tf\n'), ((2030, 2066), 'tensorflow.cast', 'tf.cast', (['(ground_truth >= 0)', 'tf.uint8'], {}), '(ground_truth >= 0, tf.uint8)\n', (2037, 2066), True, 'import tensorflow as tf\n'), ((2075, 2111), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['(predicted * actual)'], {}), '(predicted * actual)\n', (2091, 2111), True, 'import tensorflow as tf\n'), ((2177, 2219), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['(predicted * (actual - 1))'], {}), '(predicted * (actual - 1))\n', (2193, 2219), True, 'import tensorflow as tf\n'), ((2227, 2269), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['((predicted - 1) * actual)'], {}), '((predicted - 1) * actual)\n', (2243, 2269), True, 'import tensorflow as tf\n'), ((6505, 6531), 'tensorflow.python.ops.math_ops.maximum', 'math_ops.maximum', (['n_r', 'n_g'], {}), '(n_r, n_g)\n', (6521, 6531), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7016, 7056), 'tensorflow.python.ops.array_ops.zeros', 'array_ops.zeros', (['[1]'], {'dtype': 'dtypes.int32'}), '([1], dtype=dtypes.int32)\n', (7031, 7056), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7196, 7243), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['real_activations.shape[1]', 'dtype'], {}), '(real_activations.shape[1], dtype)\n', (7209, 7243), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8092, 8118), 'tensorflow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', (['ests'], {}), '(ests)\n', (8112, 8118), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8208, 8238), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['n_blocks', 'dtype'], {}), '(n_blocks, dtype)\n', (8221, 8238), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((9731, 9760), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**models)\n', (9750, 9760), True, 'import tensorflow as tf\n'), ((10038, 10082), 'os.path.join', 'os.path.join', (['"""data"""', 'data_dir', 'image_subdir'], {}), "('data', data_dir, image_subdir)\n", (10050, 10082), False, 'import os\n'), ((11118, 11174), 'os.path.join', 'os.path.join', (['"""data"""', 'data_dir', 'data_subdirs[image_type]'], {}), "('data', data_dir, data_subdirs[image_type])\n", (11130, 11174), False, 'import os\n'), ((11620, 11661), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (11644, 11661), True, 'import tensorflow as tf\n'), ((11664, 11697), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""'}), "(level='INFO')\n", (11683, 11697), False, 'import coloredlogs\n'), ((12004, 12035), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (12021, 12035), False, 'import logging\n'), ((12105, 12145), 'coloredlogs.ColoredFormatter', 'coloredlogs.ColoredFormatter', (['log_format'], {}), '(log_format)\n', (12133, 12145), False, 'import coloredlogs\n'), ((12285, 12321), 'tensorflow.contrib.memory_stats.BytesInUse', 'tf.contrib.memory_stats.BytesInUse', ([], {}), '()\n', (12319, 12321), True, 'import tensorflow as tf\n'), ((12332, 12368), 'tensorflow.contrib.memory_stats.BytesLimit', 'tf.contrib.memory_stats.BytesLimit', ([], {}), '()\n', (12366, 12368), True, 'import tensorflow as tf\n'), ((12378, 12417), 'tensorflow.contrib.memory_stats.MaxBytesInUse', 'tf.contrib.memory_stats.MaxBytesInUse', ([], {}), '()\n', (12415, 12417), True, 'import tensorflow as tf\n'), ((13421, 13443), 'numpy.allclose', 'np.allclose', (['low', 'high'], {}), '(low, high)\n', (13432, 13443), True, 'import numpy as np\n'), ((13547, 13560), 'numpy.sin', 'np.sin', (['omega'], {}), '(omega)\n', (13553, 13560), True, 'import numpy as np\n'), ((2411, 2425), 'numpy.exp', 'np.exp', (['(-logit)'], {}), '(-logit)\n', (2417, 2425), True, 'import numpy as np\n'), ((2464, 2478), 'tensorflow.exp', 'tf.exp', (['(-logit)'], {}), '(-logit)\n', (2470, 2478), True, 'import tensorflow as tf\n'), ((6157, 6195), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['real_activations', 'dtype'], {}), '(real_activations, dtype)\n', (6170, 6195), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6224, 6267), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['generated_activations', 'dtype'], {}), '(generated_activations, dtype)\n', (6237, 6267), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6404, 6437), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['real_activations'], {}), '(real_activations)\n', (6419, 6437), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6449, 6487), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['generated_activations'], {}), '(generated_activations)\n', (6464, 6487), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6563, 6603), 'tensorflow.python.ops.math_ops.ceil', 'math_ops.ceil', (['(n_bigger / max_block_size)'], {}), '(n_bigger / max_block_size)\n', (6576, 6603), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7408, 7439), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(r_e - r_s)', 'dtype'], {}), '(r_e - r_s, dtype)\n', (7421, 7439), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7532, 7563), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['(g_e - g_s)', 'dtype'], {}), '(g_e - g_s, dtype)\n', (7545, 7563), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8028, 8052), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['n_blocks'], {}), '(n_blocks)\n', (8042, 8052), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8276, 8308), 'tensorflow.python.ops.math_ops.less_equal', 'math_ops.less_equal', (['n_blocks', '(1)'], {}), '(n_blocks, 1)\n', (8295, 8308), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8466, 8496), 'tensorflow.python.ops.math_ops.sqrt', 'math_ops.sqrt', (['(var / n_blocks_)'], {}), '(var / n_blocks_)\n', (8479, 8496), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((9883, 9932), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['config.checkpoint_dir'], {}), '(config.checkpoint_dir)\n', (9909, 9932), True, 'import tensorflow as tf\n'), ((10579, 10623), 'tensorflow.logging.info', 'tf.logging.info', (['"""Adding L-R-flipped images"""'], {}), "('Adding L-R-flipped images')\n", (10594, 10623), True, 'import tensorflow as tf\n'), ((10642, 10686), 'tensorflow.logging.info', 'tf.logging.info', (['"""Adding U-D-flipped images"""'], {}), "('Adding U-D-flipped images')\n", (10657, 10686), True, 'import tensorflow as tf\n'), ((12642, 12680), 'importlib.import_module', 'import_module', (['module_name', 'class_name'], {}), '(module_name, class_name)\n', (12655, 12680), False, 'from importlib import import_module\n'), ((12992, 13017), 'math.pow', 'math.pow', (['unit', 'magnitude'], {}), '(unit, magnitude)\n', (13000, 13017), False, 'import math\n'), ((1688, 1722), 'numpy.array', 'np.array', (['images'], {'dtype': 'np.float32'}), '(images, dtype=np.float32)\n', (1696, 1722), True, 'import numpy as np\n'), ((6767, 6812), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_blocks - n_plusone_r]', 'v_r'], {}), '([n_blocks - n_plusone_r], v_r)\n', (6781, 6812), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6820, 6858), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_plusone_r]', '(v_r + 1)'], {}), '([n_plusone_r], v_r + 1)\n', (6834, 6858), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6905, 6950), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_blocks - n_plusone_g]', 'v_g'], {}), '([n_blocks - n_plusone_g], v_g)\n', (6919, 6950), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((6958, 6996), 'tensorflow.python.ops.array_ops.fill', 'array_ops.fill', (['[n_plusone_g]', '(v_g + 1)'], {}), '([n_plusone_g], v_g + 1)\n', (6972, 6996), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7092, 7116), 'tensorflow.python.ops.math_ops.cumsum', 'math_ops.cumsum', (['sizes_r'], {}), '(sizes_r)\n', (7107, 7116), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7157, 7181), 'tensorflow.python.ops.math_ops.cumsum', 'math_ops.cumsum', (['sizes_g'], {}), '(sizes_g)\n', (7172, 7181), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((10237, 10271), 'os.path.join', 'os.path.join', (['image_dir', 'file_name'], {}), '(image_dir, file_name)\n', (10249, 10271), False, 'import os\n'), ((12900, 12916), 'math.log', 'math.log', (['number'], {}), '(number)\n', (12908, 12916), False, 'import math\n'), ((12919, 12933), 'math.log', 'math.log', (['unit'], {}), '(unit)\n', (12927, 12933), False, 'import math\n'), ((13736, 13757), 'tensorflow.reduce_min', 'tf.reduce_min', (['values'], {}), '(values)\n', (13749, 13757), True, 'import tensorflow as tf\n'), ((13759, 13780), 'tensorflow.reduce_max', 'tf.reduce_max', (['values'], {}), '(values)\n', (13772, 13780), True, 'import tensorflow as tf\n'), ((14038, 14070), 'tensorflow.map_fn', 'tf.map_fn', (['recursive_map', 'inputs'], {}), '(recursive_map, inputs)\n', (14047, 14070), True, 'import tensorflow as tf\n'), ((14203, 14224), 'tensorflow.reduce_min', 'tf.reduce_min', (['values'], {}), '(values)\n', (14216, 14224), True, 'import tensorflow as tf\n'), ((14226, 14247), 'tensorflow.reduce_max', 'tf.reduce_max', (['values'], {}), '(values)\n', (14239, 14247), True, 'import tensorflow as tf\n'), ((506, 534), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (522, 534), False, 'import os\n'), ((658, 686), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (674, 686), False, 'import os\n'), ((820, 848), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (836, 848), False, 'import os\n'), ((10289, 10310), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (10299, 10310), False, 'import os\n'), ((10811, 10827), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (10820, 10827), True, 'import numpy as np\n'), ((10875, 10891), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (10884, 10891), True, 'import numpy as np\n'), ((13491, 13510), 'numpy.linalg.norm', 'np.linalg.norm', (['low'], {}), '(low)\n', (13505, 13510), True, 'import numpy as np\n'), ((13517, 13537), 'numpy.linalg.norm', 'np.linalg.norm', (['high'], {}), '(high)\n', (13531, 13537), True, 'import numpy as np\n'), ((13570, 13597), 'numpy.sin', 'np.sin', (['((1.0 - val) * omega)'], {}), '((1.0 - val) * omega)\n', (13576, 13597), True, 'import numpy as np\n'), ((13607, 13626), 'numpy.sin', 'np.sin', (['(val * omega)'], {}), '(val * omega)\n', (13613, 13626), True, 'import numpy as np\n'), ((13896, 13918), 'tensorflow.random_normal', 'tf.random_normal', (['(1,)'], {}), '((1,))\n', (13912, 13918), True, 'import tensorflow as tf\n'), ((7577, 7616), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['r', 'r'], {'transpose_b': '(True)'}), '(r, r, transpose_b=True)\n', (7592, 7616), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7643, 7682), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['r', 'g'], {'transpose_b': '(True)'}), '(r, g, transpose_b=True)\n', (7658, 7682), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7709, 7748), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['g', 'g'], {'transpose_b': '(True)'}), '(g, g, transpose_b=True)\n', (7724, 7748), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7780, 7806), 'tensorflow.python.ops.math_ops.reduce_mean', 'math_ops.reduce_mean', (['k_rg'], {}), '(k_rg)\n', (7800, 7806), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7903, 7928), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['k_gg'], {}), '(k_gg)\n', (7922, 7928), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7931, 7951), 'tensorflow.python.ops.math_ops.trace', 'math_ops.trace', (['k_gg'], {}), '(k_gg)\n', (7945, 7951), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((8405, 8431), 'tensorflow.python.ops.math_ops.square', 'math_ops.square', (['(ests - mn)'], {}), '(ests - mn)\n', (8420, 8431), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((10961, 10977), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (10970, 10977), True, 'import numpy as np\n'), ((7822, 7847), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['k_rr'], {}), '(k_rr)\n', (7841, 7847), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n'), ((7850, 7870), 'tensorflow.python.ops.math_ops.trace', 'math_ops.trace', (['k_rr'], {}), '(k_rr)\n', (7864, 7870), False, 'from tensorflow.python.ops import array_ops, control_flow_ops, functional_ops, math_ops\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PUB_DATAVIZ: Visualization tools for PINNACLE
# Copyright (c) 2020, <NAME>
#
# MIT License:
# https://github.com/IATE-CONICET-UNC/pinnacle/blob/master/LICENSE
from matplotlib import pyplot as plt
from pinnacle.plot_styles import cycling_attrs, aes_attrs
import numpy as np
import random
class pub_dataviz:
def __init__(self, inst):
'''
Initialize an instance of a visualizerbecariosthods)
----------------
- papers_histogram: histogram of the years of publications
- cumulative_per_author: cumulative number of papers per author
- authors_citations_years: scatter for number of authors and
citations.
- top_proceedings: relation between total number of
publications and papers.
- number_authors: distribution of the number of authors with
time.
'''
self.inst = inst
self.config = inst.config
# def filter_quality(self):
def papers_histogram(self, top=False, per_auth=False, quality=5):
'''
Papers_histogram: histogram of the years of publications
Parameters
----------
top: bool
If True, paper in selected journals are used, otherwise,
all papers.
'''
if top:
y = self.inst.pub_inst_top.year.values
else:
# ACA HACER UNA FUNCION PARA FILTRAR CON EL Q
y = self.inst.pub_inst_all.year.values
if per_auth:
y = list(self.inst.history.index)
Ht = []
for a in y:
k = self.inst.history.loc[a][0]
Ht.append(k)
w = []
for i in range(len(Ht)):
w.append(1/(max(1, Ht[i])))
sufix = '_norm'
else:
y = [int(a) for a in y]
Ht = np.ones(len(y))
w = np.ones(len(Ht))
sufix = ''
tbreaks = np.arange(int(min(y))-0.5, int(max(y)+1)+0.5, 1)
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
H = ax.hist(y, bins=tbreaks, weights=w)
ymax = max(H[0])
ax.set_ylim(0, ymax)
ax.grid()
ax.set_xlabel('year')
if top:
ax.set_ylabel('number of papers')
ax.set_title('publications by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_top{sufix}.png")
else:
ax.set_ylabel('number of published works')
ax.set_title('papers published by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_all{sufix}.png")
fig.savefig(fout)
plt.close()
def papers_histogram2(self, top=False, per_auth=False):
'''
Papers_histogram: histogram of the years of publications
Parameters
----------
top: bool
If True, paper in selected journals are used, otherwise,
all papers.
'''
if per_auth:
y = list(self.inst.history.index)
npp = []
for a in y:
k = self.inst.history.loc[a]
if top:
npp.append(k[2]/max(1, k[0]))
else:
npp.append(k[1]/max(1, k[0]))
sufix = '_norm'
hist = npp
else:
y = list(self.inst.history.index)
y = [int(a) for a in y]
sufix = ''
tbreaks = np.arange(int(min(y))-0.5, int(max(y)+1)+0.5, 1)
H = np.histogram(y, bins=tbreaks)
hist = H[0]
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot()
ax.step(y, hist)
ymax = max(hist)*1.05
ax.set_ylim(0, ymax)
ax.grid()
ax.set_xlabel('year')
if top:
ax.set_ylabel('number of papers')
ax.set_title('publications by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_top{sufix}.png")
else:
ax.set_ylabel('number of published works')
ax.set_title('papers published by IATE')
fout = (f"{self.config.dir_plot}/"
f"papers_per_year_all{sufix}.png")
fig.savefig(fout)
plt.close()
def cumulative_per_author(self, top=False, normalize_first=False):
'''
Parameters
----------
top: bool
Use all works or papers from selected journals
normalize_first: bool
Normalize to the year of the first publication
'''
import datetime
now = datetime.datetime.now()
current_year = now.year
if normalize_first:
tedges = np.arange(-0.5, 20.5, 1)
tmeans = np.arange(0, 20, 1)
fout = (f"{self.config.dir_plot}/papers_by_author_zero.png")
titlen = 'normalized to first'
xlab = 'years from first publication'
else:
tedges = np.arange(1995, 2021, 1)
tmeans = np.arange(1995, 2020, 1)
fout = (f"{self.config.dir_plot}/papers_by_author_year.png")
titlen = ''
xlab = 'year'
if top:
df = self.inst.pub_auth_top
titlet = 'papers'
else:
df = self.inst.pub_auth_all
titlet = 'publications'
fig = plt.figure(figsize=(14, 7))
ax = fig.add_subplot()
cycling_attrs()
y_max = 0
auth_names = list(df.author1.unique())
for a in auth_names:
d = df[df['author1'].isin([a])]
y = [int(i) for i in d.year.values]
if len(y) == 0:
continue
y = np.array(y)
if normalize_first:
active = current_year - min(y) + 1
y = y - min(y)
tedges = np.arange(-0.5, active + 0.5, 1)
tmeans = np.arange(0, active, 1)
H = np.histogram(y, bins=tedges)
ac = H[0].cumsum()
y_max = max(y_max, max(ac))
aesthetics = aes_attrs()
ax.plot(tmeans, ac, label=a, **aesthetics)
title = f'Cumulative {titlet} by IATE researchers {titlen}'
ax.set_title(title)
ax.set_xlabel(xlab)
ax.set_ylabel('cumulative number')
ax.legend(loc=2, ncol=2, fontsize='small', frameon=False,
handlelength=6)
fig.savefig(fout)
plt.close()
def authors_citations_years(self, top=True):
'''
Plot a scatter of number of authors and number of citations
Parameters
----------
top: bool
Use all works or papers from selected journals
'''
if top:
df = self.inst.pub_inst_top
else:
df = self.inst.pub_inst_all
npapers = df.shape[0]
na = []
nc = []
ye = []
for i in range(npapers):
pprs = df.iloc[i]
nauths = len(pprs.authors)
ncitas = pprs.citation_count
year = pprs.year
r = random.random()*0.6 - 0.3
na.append(nauths+r)
r = random.random()*0.6 - 0.3
nc.append(ncitas+1+r)
ye.append(int(year))
y = ((np.array(ye)-1980)*0.2)**2.6
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
ax.scatter(na, nc, s=y, color=(0, 0, 1, 0.3))
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('Number of authors')
ax.set_ylabel('Number of citations + 1')
ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5), labelspacing=3)
fout = (f"{self.config.dir_plot}/nauth_ncitas_year.png")
fig.savefig(fout)
plt.close()
def top_proceedings(self):
'''
Plot a scatter of number of publications vs number of papers
'''
tod = []
top = []
auth_names = list(self.inst.pub_inst_all.author1.unique())
for a in auth_names:
df = self.inst.pub_inst_all
dfa = df[df['author1'].isin([a])]
df = self.inst.pub_inst_top
dft = df[df['author1'].isin([a])]
tod.append(dfa.shape[0])
top.append(dft.shape[0])
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
ax.scatter(tod, top)
m = max(tod)
ax.plot([0, m], [0, m])
ax.set_title('all works vs. top papers')
ax.set_xlabel('all works')
ax.set_ylabel('papers top')
fout = (f"{self.config.dir_plot}/top_vs_all.png")
fig.savefig(fout)
plt.close()
def number_authors(self, top=True):
'''
Plot a scatter for the number of authors as a function of time
Parameters
----------
top: bool
Use all works or papers from selected journals
'''
if top:
df = self.inst.pub_inst_top
else:
df = self.inst.pub_inst_all
nauth = []
for i, p in df.iterrows():
nauth.append(len(p.authors))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
years = [int(y) for y in df.year.values]
ax.scatter(years, nauth)
ax.set_yscale('log')
ax.set_title('number of authors per year')
ax.set_xlabel('year')
ax.set_ylabel('N authors')
fout = (f"{self.config.dir_plot}/year_nauth.png")
fig.savefig(fout)
plt.close()
def nauth_npprs(self, top=True):
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot()
x = list(self.inst.history.index)
y = self.inst.history['pop']
if top:
z = self.inst.history['npapers_top']
else:
z = self.inst.history['npapers_all']
ax.plot(x, y, label='authors')
ax.plot(x, z, label='papers')
ax.legend()
ax.set_title('number of authors per paper')
ax.set_xlabel('year')
ax.set_ylabel('N authors / paper')
if top:
ax.set_title('publications by IATE, top papers')
fout = (f"{self.config.dir_plot}/nauth_npprs_years_top.png")
else:
ax.set_title('papers published by IATE, all works')
fout = (f"{self.config.dir_plot}/nauth_npprs_years_all.png")
fig.savefig(fout)
plt.close()
def plot_all(self):
'''
Make all the plots.
'''
self.papers_histogram2(top=True)
self.papers_histogram2(top=False)
self.papers_histogram2(top=True, per_auth=True)
self.papers_histogram2(top=False, per_auth=True)
self.cumulative_per_author(top=False, normalize_first=False)
self.cumulative_per_author(top=False, normalize_first=True)
self.cumulative_per_author(top=True, normalize_first=False)
self.cumulative_per_author(top=True, normalize_first=True)
self.authors_citations_years()
self.top_proceedings()
self.nauth_npprs()
|
[
"numpy.histogram",
"pinnacle.plot_styles.cycling_attrs",
"matplotlib.pyplot.close",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.array",
"pinnacle.plot_styles.aes_attrs",
"random.random",
"numpy.arange"
] |
[((2049, 2075), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2059, 2075), True, 'from matplotlib import pyplot as plt\n'), ((2733, 2744), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2742, 2744), True, 'from matplotlib import pyplot as plt\n'), ((3678, 3704), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (3688, 3704), True, 'from matplotlib import pyplot as plt\n'), ((4344, 4355), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4353, 4355), True, 'from matplotlib import pyplot as plt\n'), ((4699, 4722), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4720, 4722), False, 'import datetime\n'), ((5458, 5485), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 7)'}), '(figsize=(14, 7))\n', (5468, 5485), True, 'from matplotlib import pyplot as plt\n'), ((5525, 5540), 'pinnacle.plot_styles.cycling_attrs', 'cycling_attrs', ([], {}), '()\n', (5538, 5540), False, 'from pinnacle.plot_styles import cycling_attrs, aes_attrs\n'), ((6544, 6555), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6553, 6555), True, 'from matplotlib import pyplot as plt\n'), ((7423, 7450), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7433, 7450), True, 'from matplotlib import pyplot as plt\n'), ((7867, 7878), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7876, 7878), True, 'from matplotlib import pyplot as plt\n'), ((8399, 8426), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (8409, 8426), True, 'from matplotlib import pyplot as plt\n'), ((8754, 8765), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8763, 8765), True, 'from matplotlib import pyplot as plt\n'), ((9242, 9269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (9252, 9269), True, 'from matplotlib import pyplot as plt\n'), ((9624, 9635), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9633, 9635), True, 'from matplotlib import pyplot as plt\n'), ((9688, 9715), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (9698, 9715), True, 'from matplotlib import pyplot as plt\n'), ((10516, 10527), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10525, 10527), True, 'from matplotlib import pyplot as plt\n'), ((3609, 3638), 'numpy.histogram', 'np.histogram', (['y'], {'bins': 'tbreaks'}), '(y, bins=tbreaks)\n', (3621, 3638), True, 'import numpy as np\n'), ((4805, 4829), 'numpy.arange', 'np.arange', (['(-0.5)', '(20.5)', '(1)'], {}), '(-0.5, 20.5, 1)\n', (4814, 4829), True, 'import numpy as np\n'), ((4851, 4870), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)'], {}), '(0, 20, 1)\n', (4860, 4870), True, 'import numpy as np\n'), ((5072, 5096), 'numpy.arange', 'np.arange', (['(1995)', '(2021)', '(1)'], {}), '(1995, 2021, 1)\n', (5081, 5096), True, 'import numpy as np\n'), ((5118, 5142), 'numpy.arange', 'np.arange', (['(1995)', '(2020)', '(1)'], {}), '(1995, 2020, 1)\n', (5127, 5142), True, 'import numpy as np\n'), ((5799, 5810), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5807, 5810), True, 'import numpy as np\n'), ((6049, 6077), 'numpy.histogram', 'np.histogram', (['y'], {'bins': 'tedges'}), '(y, bins=tedges)\n', (6061, 6077), True, 'import numpy as np\n'), ((6175, 6186), 'pinnacle.plot_styles.aes_attrs', 'aes_attrs', ([], {}), '()\n', (6184, 6186), False, 'from pinnacle.plot_styles import cycling_attrs, aes_attrs\n'), ((5950, 5982), 'numpy.arange', 'np.arange', (['(-0.5)', '(active + 0.5)', '(1)'], {}), '(-0.5, active + 0.5, 1)\n', (5959, 5982), True, 'import numpy as np\n'), ((6008, 6031), 'numpy.arange', 'np.arange', (['(0)', 'active', '(1)'], {}), '(0, active, 1)\n', (6017, 6031), True, 'import numpy as np\n'), ((7197, 7212), 'random.random', 'random.random', ([], {}), '()\n', (7210, 7212), False, 'import random\n'), ((7271, 7286), 'random.random', 'random.random', ([], {}), '()\n', (7284, 7286), False, 'import random\n'), ((7379, 7391), 'numpy.array', 'np.array', (['ye'], {}), '(ye)\n', (7387, 7391), True, 'import numpy as np\n')]
|
import numpy as np
def Topsis(weights, numerical_data, impact):
try:
if(numerical_data.shape[1] != weights.shape[0] or weights.shape != impact.shape or numerical_data.shape[1] != impact.shape[0]):
raise Exception("Given input is not correct")
except Exception as e:
print("Given input is incorrect")
return
#Converting weight matrix into percent form
weights = weights/weights.sum()
#Making normalized matrix
for i in range(numerical_data.shape[1]):
numerical_data[:,i] = (numerical_data[:,i]/np.sqrt((numerical_data[:,i]**2).sum()))
#Multiplying columns with their specific weights
numerical_data = numerical_data*(weights.reshape(1,numerical_data.shape[1]))
ideal_best_values = []
ideal_worst_values = []
for i in range(numerical_data.shape[1]):
if(impact[i] == "+"):
#It indicates this particular feature value need to be increased
ideal_best_values.append(numerical_data[:,i].max())
ideal_worst_values.append(numerical_data[:,i].min())
elif(impact[i] == "-"):
#This feature value need to be decreased
ideal_best_values.append(numerical_data[:,i].min())
ideal_worst_values.append(numerical_data[:,i].max())
ideal_best_values = np.array(ideal_best_values, dtype = np.float)
ideal_worst_values = np.array(ideal_worst_values, dtype = np.float)
euclDist_ideal_best = np.sqrt(((numerical_data - ideal_best_values)**2).sum(axis = 1))
euclDist_ideal_worst = np.sqrt(((numerical_data - ideal_worst_values)**2).sum(axis = 1))
performance_score = euclDist_ideal_worst/(euclDist_ideal_best + euclDist_ideal_worst)
ranking = np.argsort(performance_score)
return np.argmax(performance_score)#Returning the index of the row having maximum performance score
|
[
"numpy.argsort",
"numpy.array",
"numpy.argmax"
] |
[((1182, 1225), 'numpy.array', 'np.array', (['ideal_best_values'], {'dtype': 'np.float'}), '(ideal_best_values, dtype=np.float)\n', (1190, 1225), True, 'import numpy as np\n'), ((1250, 1294), 'numpy.array', 'np.array', (['ideal_worst_values'], {'dtype': 'np.float'}), '(ideal_worst_values, dtype=np.float)\n', (1258, 1294), True, 'import numpy as np\n'), ((1575, 1604), 'numpy.argsort', 'np.argsort', (['performance_score'], {}), '(performance_score)\n', (1585, 1604), True, 'import numpy as np\n'), ((1613, 1641), 'numpy.argmax', 'np.argmax', (['performance_score'], {}), '(performance_score)\n', (1622, 1641), True, 'import numpy as np\n')]
|
from copy import deepcopy
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from apex import amp
from torch.cuda.amp import autocast as autocast
from transformers import BertModel, BertTokenizer
from util import text_processing
from collections import OrderedDict
from . import ops as ops
from .config import cfg
from .lcgn import LCGN, SemanLCGN
from .input_unit import Encoder
from .output_unit import Classifier
from .optimization import *
class SingleHop(nn.Module):
def __init__(self):
super().__init__()
self.proj_q = ops.Linear(cfg.ENC_DIM, cfg.CTX_DIM)
self.inter2att = ops.Linear(cfg.CTX_DIM, 1)
def forward(self, kb, vecQuestions, imagesObjectNum):
proj_q = self.proj_q(vecQuestions)
interactions = F.normalize(kb * proj_q[:, None, :], dim=-1)
raw_att = self.inter2att(interactions).squeeze(-1)# 128 * 49
raw_att = ops.apply_mask1d(raw_att, imagesObjectNum)
att = F.softmax(raw_att, dim=-1)
x_att = torch.bmm(att[:, None, :], kb).squeeze(1)
return x_att
class LCGNnet(nn.Module):
def __init__(self, num_vocab, num_choices):
super().__init__()
if cfg.INIT_WRD_EMB_FROM_FILE:
embeddingsInit = np.load(cfg.WRD_EMB_INIT_FILE) # 2956 * 300
assert embeddingsInit.shape == (num_vocab-1, cfg.WRD_EMB_DIM)
else:
embeddingsInit = np.random.randn(num_vocab-1, cfg.WRD_EMB_DIM)
self.num_vocab = num_vocab # 2957
self.num_choices = num_choices # 1845
self.tokenizer = BertTokenizer.from_pretrained('/home/xdjf/bert_config/bert-base-uncased')
self.model = BertModel.from_pretrained('/home/xdjf/bert_config/bert-base-uncased')
self.name_dict = text_processing.VocabDict(cfg.VOCAB_NAME_FILE)
name_embedding = self.reset_name_embedding()
self.encoder = Encoder(embeddingsInit, name_embedding)
self.lcgn = LCGN()
#self.sema_lcgn = SemanLCGN()
self.single_hop = SingleHop()
self.classifier = Classifier(num_choices)
#self.seman_encoder = ops.Linear(cfg.WRD_EMB_DIM, cfg.CMD_DIM)
def reset_name_embedding(self):
weight = torch.zeros(self.name_dict.num_vocab - 1, 768)
for word in self.name_dict.word_list:
if word == '<unk>':
continue
temp_embedding = self.extract_name_embedding(word)
weight[self.name_dict.word2idx(word) - 1] = temp_embedding
return weight
def extract_name_embedding(self, name):
token_name = self.tokenizer.encode(name, add_special_tokens=False)
input_ids = torch.tensor([token_name])
with torch.no_grad():
_, out = self.model(input_ids)
return out # 1* 768
def forward(self, batch):
#batchSize = len(batch['image_feat_batch'])
questionIndices = batch[0]
questionLengths = batch[1]
semanIndices = batch[2]
semanLengths = batch[3]
answerIndices = batch[4]
nameIndices = batch[5]
nameLengths = batch[6]
images = batch[7]
imagesObjectNum = batch[8]
batchSize = images.size(0)
# LSTM
questionCntxWords, vecQuestions, word_seman, encode_seman, name_embed = self.encoder(
questionIndices, questionLengths, # 128 * 30 * 512 128 * 512
semanIndices, semanLengths,
nameIndices, nameLengths)
encode_seman = encode_seman.permute(1, 0, 2)
#encode_seman = self.seman_encoder(encode_seman)
# semanCnt = semanCnt[:, 0, :]
# LCGN
x_out = self.lcgn(
images=images, q_encoding=vecQuestions,
lstm_outputs=questionCntxWords, word_seman=word_seman, encode_seman=encode_seman, semanIndices=semanIndices, batch_size=batchSize,
q_length=questionLengths, entity_num=imagesObjectNum, name_embed=name_embed, nameLengths=nameLengths)
# x_out_seman = self.sema_lcgn(
# images=images, seman_outputs=semanCnt,
# batch_size=batchSize, entity_num=imagesObjectNum)
# x_out = self.tensor_inter_graph_propagation(x_out, x_out_seman)
# Single-Hop
x_att = self.single_hop(x_out, vecQuestions, imagesObjectNum)
logits = self.classifier(x_att, vecQuestions) # 128 * 1845
predictions, num_correct = self.add_pred_op(logits, answerIndices)
loss = self.add_answer_loss_op(logits, answerIndices)
return {"predictions": predictions,
"batch_size": int(batchSize),
"num_correct": int(num_correct),
"loss": loss,
"accuracy": float(num_correct * 1. / batchSize)}
def tensor_inter_graph_propagation(self, x_out_1, x_out_2):
bsz, imageNum, dModel= x_out_1.size(0), x_out_1.size(1), x_out_1.size(2)
x_sum_1 = torch.sum(x_out_1, dim=1)
x_sum_2 = torch.sum(x_out_2, dim=1)
x_expand_1 = x_sum_1.repeat(1, 2)
x_expand_2 = x_sum_2.repeat(1, 2)
x_sum = torch.cat([x_expand_1, x_expand_2], -1)
x_sum = x_sum.unsqueeze(1)
x_sum = x_sum.repeat(1, imageNum, 1)
x_union = torch.cat([x_out_1, x_out_2], dim=-1)
x_union_expand = x_union.repeat(1, 1, 2)
x_kr = torch.mul(x_union_expand, x_sum)
x_kr = x_kr.view(bsz * imageNum, 4, dModel)
x_kr = x_kr.permute(0, 2, 1)
x_out = self.conv1d(x_kr)
x_out = x_out.squeeze(-1)
x_out = x_out.view(bsz, imageNum, dModel)
return x_out
def add_pred_op(self, logits, answers):
if cfg.MASK_PADUNK_IN_LOGITS:
logits = logits.clone()
logits[..., :2] += -1e30 # mask <pad> and <unk>
preds = torch.argmax(logits, dim=-1).detach() # 128
corrects = (preds == answers)
correctNum = torch.sum(corrects).item()
preds = preds.cpu()#.numpy()
return preds, correctNum
def add_answer_loss_op(self, logits, answers):
if cfg.TRAIN.LOSS_TYPE == "softmax":
loss = F.cross_entropy(logits, answers)
elif cfg.TRAIN.LOSS_TYPE == "sigmoid":
answerDist = F.one_hot(answers, self.num_choices).float() # 128 * 1845
loss = F.binary_cross_entropy_with_logits(
logits, answerDist) * self.num_choices
else:
raise Exception("non-identified loss")
return loss
class LCGNwrapper():
def __init__(self, num_vocab, num_choices, cfg=None, rank=-1, gpu=0):
self.no_decay = ['bias', 'norm']
torch.cuda.set_device(gpu)
self.model = LCGNnet(num_vocab, num_choices).cuda(gpu)
self.trainable_params = [
{
"params": [p for n, p in self.model.named_parameters() if p.requires_grad and not any(nd in n for nd in self.no_decay)],
"weight_decay": cfg.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if p.requires_grad and any(nd in n for nd in self.no_decay)],
"weight_decay": 0.0
}
]
self.optimizer = torch.optim.AdamW(
self.trainable_params, lr=cfg.TRAIN.SOLVER.LR)
#self.optimizer = AdamW(self.trainable_params, lr=cfg.TRAIN.SOLVER.LR, eps=cfg.adam_epsilon)
total_step = int(943000 / cfg.n_gpus // cfg.TRAIN.BATCH_SIZE + 1) * cfg.TRAIN.MAX_EPOCH
self.scheduler = get_cosine_schedule_with_warmup(
self.optimizer, num_warmup_steps=cfg.warmup_steps, num_training_steps=total_step)
if cfg.fp16:
self.scaler = torch.cuda.amp.GradScaler()
#self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level=cfg.fp16_opt_level)
if cfg.n_gpus > 1:
self.model = nn.parallel.DistributedDataParallel(self.model,
device_ids=[gpu], output_device=gpu, find_unused_parameters=True)
self.lr = cfg.TRAIN.SOLVER.LR
self.fp16 = cfg.fp16
self.fp16_opt_level = cfg.fp16_opt_level
if cfg.USE_EMA:
self.ema_param_dict = {
name: p for name, p in self.model.named_parameters()
if p.requires_grad}
self.ema = ops.ExponentialMovingAverage(
self.ema_param_dict, decay=cfg.EMA_DECAY_RATE)
self.using_ema_params = False
def train(self, training=True):
self.model.train(training)
if training:
self.set_params_from_original()
else:
self.set_params_from_ema()
def eval(self):
self.train(False)
def state_dict(self):
# Generate state dict in training mode
current_mode = self.model.training
self.train(True)
assert (not cfg.USE_EMA) or (not self.using_ema_params)
return {
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'ema': self.ema.state_dict() if cfg.USE_EMA else None
}
# restore original mode
self.train(current_mode)
def load_state_dict(self, state_dict):
# Load parameters in training mode
current_mode = self.model.training
self.train(True)
assert (not cfg.USE_EMA) or (not self.using_ema_params)
new_state_dict = OrderedDict()
for k, v in state_dict['model'].items():
name = k[7: ]
new_state_dict[name] = v
self.model.load_state_dict(new_state_dict)
if 'optimizer' in state_dict:
self.optimizer.load_state_dict(state_dict['optimizer'])
else:
print('Optimizer does not exist in checkpoint! '
'Loaded only model parameters.')
if cfg.USE_EMA:
if 'ema' in state_dict and state_dict['ema'] is not None:
self.ema.load_state_dict(state_dict['ema'])
else:
print('cfg.USE_EMA is True, but EMA does not exist in '
'checkpoint! Using model params to initialize EMA.')
self.ema.load_state_dict(
{k: p.data for k, p in self.ema_param_dict.items()})
# restore original mode
self.train(current_mode)
def set_params_from_ema(self):
if (not cfg.USE_EMA) or self.using_ema_params:
return
self.original_state_dict = deepcopy(self.model.state_dict())
self.ema.set_params_from_ema(self.ema_param_dict)
self.using_ema_params = True
def set_params_from_original(self):
if (not cfg.USE_EMA) or (not self.using_ema_params):
return
self.model.load_state_dict(self.original_state_dict)
self.using_ema_params = False
def run_batch(self, batch, train, lr=None):
assert train == self.model.training
assert (not train) or (lr is not None), 'lr must be set for training'
if train:
if lr != self.lr:
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
self.lr = lr
self.optimizer.zero_grad()
if cfg.fp16:
with autocast():
batch_res = self.model.forward(batch)
else:
batch_res = self.model.forward(batch)
loss = batch_res['loss']
if self.fp16:
self.scaler.scale(loss).backward()
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
else:
loss.backward()
if cfg.TRAIN.CLIP_GRADIENTS:
if self.fp16:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(
self.model.parameters(), cfg.TRAIN.GRAD_MAX_NORM)
#torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), cfg.TRAIN.GRAD_MAX_NORM)
else:
nn.utils.clip_grad_norm_(
self.model.parameters(), cfg.TRAIN.GRAD_MAX_NORM)
if cfg.fp16:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.scheduler.step()
batch_res['lr'] = self.scheduler.get_last_lr()[0]
if cfg.USE_EMA:
self.ema.step(self.ema_param_dict)
else:
with torch.no_grad():
batch_res = self.model.forward(batch)
return batch_res
|
[
"torch.mul",
"util.text_processing.VocabDict",
"torch.sum",
"torch.bmm",
"torch.nn.functional.softmax",
"torch.cuda.amp.GradScaler",
"torch.cuda.amp.autocast",
"torch.nn.parallel.DistributedDataParallel",
"torch.argmax",
"collections.OrderedDict",
"transformers.BertModel.from_pretrained",
"torch.nn.functional.normalize",
"torch.nn.functional.one_hot",
"numpy.random.randn",
"torch.cuda.set_device",
"torch.cat",
"transformers.BertTokenizer.from_pretrained",
"torch.tensor",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"numpy.load",
"torch.zeros",
"torch.optim.AdamW",
"torch.nn.functional.binary_cross_entropy_with_logits"
] |
[((796, 840), 'torch.nn.functional.normalize', 'F.normalize', (['(kb * proj_q[:, None, :])'], {'dim': '(-1)'}), '(kb * proj_q[:, None, :], dim=-1)\n', (807, 840), True, 'import torch.nn.functional as F\n'), ((985, 1011), 'torch.nn.functional.softmax', 'F.softmax', (['raw_att'], {'dim': '(-1)'}), '(raw_att, dim=-1)\n', (994, 1011), True, 'import torch.nn.functional as F\n'), ((1584, 1657), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""/home/xdjf/bert_config/bert-base-uncased"""'], {}), "('/home/xdjf/bert_config/bert-base-uncased')\n", (1613, 1657), False, 'from transformers import BertModel, BertTokenizer\n'), ((1679, 1748), 'transformers.BertModel.from_pretrained', 'BertModel.from_pretrained', (['"""/home/xdjf/bert_config/bert-base-uncased"""'], {}), "('/home/xdjf/bert_config/bert-base-uncased')\n", (1704, 1748), False, 'from transformers import BertModel, BertTokenizer\n'), ((1774, 1820), 'util.text_processing.VocabDict', 'text_processing.VocabDict', (['cfg.VOCAB_NAME_FILE'], {}), '(cfg.VOCAB_NAME_FILE)\n', (1799, 1820), False, 'from util import text_processing\n'), ((2217, 2263), 'torch.zeros', 'torch.zeros', (['(self.name_dict.num_vocab - 1)', '(768)'], {}), '(self.name_dict.num_vocab - 1, 768)\n', (2228, 2263), False, 'import torch\n'), ((2676, 2702), 'torch.tensor', 'torch.tensor', (['[token_name]'], {}), '([token_name])\n', (2688, 2702), False, 'import torch\n'), ((4912, 4937), 'torch.sum', 'torch.sum', (['x_out_1'], {'dim': '(1)'}), '(x_out_1, dim=1)\n', (4921, 4937), False, 'import torch\n'), ((4956, 4981), 'torch.sum', 'torch.sum', (['x_out_2'], {'dim': '(1)'}), '(x_out_2, dim=1)\n', (4965, 4981), False, 'import torch\n'), ((5084, 5123), 'torch.cat', 'torch.cat', (['[x_expand_1, x_expand_2]', '(-1)'], {}), '([x_expand_1, x_expand_2], -1)\n', (5093, 5123), False, 'import torch\n'), ((5223, 5260), 'torch.cat', 'torch.cat', (['[x_out_1, x_out_2]'], {'dim': '(-1)'}), '([x_out_1, x_out_2], dim=-1)\n', (5232, 5260), False, 'import torch\n'), ((5326, 5358), 'torch.mul', 'torch.mul', (['x_union_expand', 'x_sum'], {}), '(x_union_expand, x_sum)\n', (5335, 5358), False, 'import torch\n'), ((6621, 6647), 'torch.cuda.set_device', 'torch.cuda.set_device', (['gpu'], {}), '(gpu)\n', (6642, 6647), False, 'import torch\n'), ((7203, 7267), 'torch.optim.AdamW', 'torch.optim.AdamW', (['self.trainable_params'], {'lr': 'cfg.TRAIN.SOLVER.LR'}), '(self.trainable_params, lr=cfg.TRAIN.SOLVER.LR)\n', (7220, 7267), False, 'import torch\n'), ((9482, 9495), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9493, 9495), False, 'from collections import OrderedDict\n'), ((1263, 1293), 'numpy.load', 'np.load', (['cfg.WRD_EMB_INIT_FILE'], {}), '(cfg.WRD_EMB_INIT_FILE)\n', (1270, 1293), True, 'import numpy as np\n'), ((1424, 1471), 'numpy.random.randn', 'np.random.randn', (['(num_vocab - 1)', 'cfg.WRD_EMB_DIM'], {}), '(num_vocab - 1, cfg.WRD_EMB_DIM)\n', (1439, 1471), True, 'import numpy as np\n'), ((2716, 2731), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2729, 2731), False, 'import torch\n'), ((6115, 6147), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'answers'], {}), '(logits, answers)\n', (6130, 6147), True, 'import torch.nn.functional as F\n'), ((7717, 7744), 'torch.cuda.amp.GradScaler', 'torch.cuda.amp.GradScaler', ([], {}), '()\n', (7742, 7744), False, 'import torch\n'), ((7913, 8030), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['self.model'], {'device_ids': '[gpu]', 'output_device': 'gpu', 'find_unused_parameters': '(True)'}), '(self.model, device_ids=[gpu],\n output_device=gpu, find_unused_parameters=True)\n', (7948, 8030), False, 'from torch import nn\n'), ((1029, 1059), 'torch.bmm', 'torch.bmm', (['att[:, None, :]', 'kb'], {}), '(att[:, None, :], kb)\n', (1038, 1059), False, 'import torch\n'), ((5798, 5826), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (5810, 5826), False, 'import torch\n'), ((5901, 5920), 'torch.sum', 'torch.sum', (['corrects'], {}), '(corrects)\n', (5910, 5920), False, 'import torch\n'), ((12633, 12648), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12646, 12648), False, 'import torch\n'), ((6297, 6351), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logits', 'answerDist'], {}), '(logits, answerDist)\n', (6331, 6351), True, 'import torch.nn.functional as F\n'), ((11331, 11341), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (11339, 11341), True, 'from torch.cuda.amp import autocast as autocast\n'), ((6220, 6256), 'torch.nn.functional.one_hot', 'F.one_hot', (['answers', 'self.num_choices'], {}), '(answers, self.num_choices)\n', (6229, 6256), True, 'import torch.nn.functional as F\n')]
|
"""
3D Agn spin visualisation
"""
import logging
import os
import shutil
import corner
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyvista as pv
import scipy.stats
from bbh_simulator.calculate_kick_vel_from_samples import Samples
from matplotlib import rc
from tqdm import tqdm
logging.getLogger("bbh_simulator").setLevel(logging.ERROR)
logging.getLogger().setLevel(logging.ERROR)
rc("text", usetex=True)
N_VEC = "Num BBH"
COS_theta_12 = "cos(theta_12)"
COS_theta_1L = "cos(theta_1L)"
BILBY_BLUE_COLOR = "#0072C1"
VIOLET_COLOR = "#8E44AD"
PARAMS = dict(
chi_eff=dict(l=r"$\chi_{eff}$", r=(-1, 1)),
chi_p=dict(l=r"$\chi_{p}$", r=(0, 1)),
cos_tilt_1=dict(l=r"$\cos(t1)$", r=(-1, 1)),
cos_tilt_2=dict(l=r"$\cos(t2)$", r=(-1, 1)),
cos_theta_12=dict(l=r"$\cos \theta_{12}$", r=(-1, 1)),
cos_theta_1L=dict(l=r"$\cos \theta_{1L}$", r=(-1, 1)),
)
def rotate_vector_along_z(v1, theta):
"""
|cos tilt −sin tilt 0| |x| |x cos tilt − y sin tilt| |x'|
|sin tilt cos tilt 0| |y| = |x sin tilt + y cos tilt| = |y'|
| 0 0 1| |z| | z | |z'|
"""
x, y, z = v1[0], v1[1], v1[2]
return [
x * np.cos(theta) - y * np.sin(theta),
x * np.sin(theta) + y * np.cos(theta),
z,
]
def rotate_vector_along_y(v1, theta):
"""
| cos tilt 0 sin tilt| |grid_x| | grid_x cos tilt + pred_z sin tilt| |grid_x'|
| 0 1 0| |grid_y| = | grid_y | = |grid_y'|
|−sin tilt 0 cos tilt| |pred_z| |−grid_x sin tilt + pred_z cos tilt| |pred_z'|
"""
x, y, z = v1[0], v1[1], v1[2]
return [
x * np.cos(theta) + z * np.sin(theta),
y,
-x * np.sin(theta) + z * np.cos(theta),
]
def get_isotropic_vector(std=1):
"""
Generates a random 3D unit vector (direction) with a uniform spherical distribution
Algo from http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
:return:
"""
theta = np.random.uniform(0, np.pi * 2)
# truncated normal distribution --> peaks at costheta = 1
# hyperparam --> sigma
# costheta = np.random.uniform(std, 1)
mean = 1
clip_a, clip_b = -1, 1
if std == 0:
std = 0.00001
a, b = (clip_a - mean) / std, (clip_b - mean) / std
costheta = scipy.stats.truncnorm.rvs(
a=a, b=b, loc=mean, scale=std, size=1
)[0]
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(theta)
y = np.sin(theta) * np.sin(theta)
z = np.cos(theta)
return [x, y, z]
def rotate_v2_to_v1(v1, v2):
azimuth = get_azimuth_angle(v1[0], v1[1])
zenith = get_zenith_angle(v1[2])
v2 = rotate_vector_along_y(v2, zenith)
v2 = rotate_vector_along_z(v2, azimuth)
return v2
def compute_vectors(mesh):
origin = 0
vectors = mesh.points - origin
vectors = normalise_vectors(vectors)
return vectors
def normalise_vectors(vectors):
return vectors / np.linalg.norm(vectors, axis=1)[:, None]
class SphereAngleAnimation:
def __init__(self):
# default parameters
self.kwargs = {
"radius": 1,
N_VEC: 100,
COS_theta_1L: 1,
COS_theta_12: 1,
}
self.s1_color = "lightblue"
self.s2_color = "lightgreen"
self.plotter = self.init_plotter()
self.add_sliders()
self.plotter.show("AGN BBH spins")
self.add_vectors()
def __call__(self, param, value):
self.kwargs[param] = value
self.update()
def add_sliders(self):
LEFT = dict(
pointa=(0.025, 0.1),
pointb=(0.31, 0.1),
)
MIDDLE = dict(pointa=(0.35, 0.1), pointb=(0.64, 0.1))
RIGHT = dict(
pointa=(0.67, 0.1),
pointb=(0.98, 0.1),
)
self.plotter.add_slider_widget(
callback=lambda value: self(COS_theta_1L, value),
rng=[0, 1],
value=1,
title=f"min {COS_theta_1L}",
style="modern",
**LEFT,
)
self.plotter.add_slider_widget(
callback=lambda value: self(COS_theta_12, value),
rng=[0, 1],
value=1,
title=f"min {COS_theta_12}",
style="modern",
**MIDDLE,
)
self.plotter.add_slider_widget(
callback=lambda value: self(N_VEC, int(value)),
rng=[1, 1000],
value=100,
title=N_VEC,
style="modern",
**RIGHT,
)
def init_plotter(self):
p = pv.Plotter()
p.add_mesh(pv.Sphere(radius=self.kwargs["radius"]))
ar_kwgs = dict(
scale=self.kwargs["radius"] * 2,
shaft_radius=0.01,
tip_radius=0.05,
tip_length=0.1,
)
p.add_mesh(pv.Arrow(direction=[1, 0, 0], **ar_kwgs), color="blue") # x
p.add_mesh(pv.Arrow(direction=[0, 1, 0], **ar_kwgs), color="red") # y
p.add_mesh(
pv.Arrow(direction=[0, 0, 1], **ar_kwgs), color="green"
) # Z
p.add_legend(
labels=[
["L", "green"],
["S1", self.s1_color],
["S2", self.s2_color],
]
)
return p
def add_vectors(self):
s1_vectors = [
get_isotropic_vector(self.kwargs[COS_theta_1L])
for _ in range(self.kwargs[N_VEC])
]
s2_vectors = [
get_isotropic_vector(self.kwargs[COS_theta_12])
for _ in range(self.kwargs[N_VEC])
]
s2_vectors = [
rotate_v2_to_v1(s1, s2) for s1, s2 in zip(s1_vectors, s2_vectors)
]
self.add_vector_list(s1_vectors, name="s1", color=self.s1_color)
self.add_vector_list(s2_vectors, name="s2", color=self.s2_color)
def add_vector_list(self, vectors, name, color):
self.plotter.remove_actor(f"{name}_pts")
self.plotter.remove_actor(f"{name}_arrows")
pt_cloud = pv.PolyData(vectors)
vectors = compute_vectors(pt_cloud)
pt_cloud["vectors"] = vectors
arrows = pt_cloud.glyph(
orient="vectors",
scale=False,
factor=0.3,
)
self.plotter.add_mesh(
pt_cloud,
color=color,
point_size=10,
render_points_as_spheres=True,
name=f"{name}_pts",
)
self.plotter.add_mesh(arrows, color=color, name=f"{name}_arrows")
def update(self):
self.add_vectors()
def get_zenith_angle(z):
"""Angle from z to vector [0, pi)"""
return np.arccos(z)
def get_azimuth_angle(x, y):
"""angle bw north vector and projected vector on the horizontal plane [0, 2pi]"""
azimuth = np.arctan2(y, x) # [-pi, pi)
if azimuth < 0.0:
azimuth += 2 * np.pi
return azimuth
def get_chi_eff(s1, s2, q=1):
s1z, s2z = s1[2], s2[2]
return (s1z * s2z) * (q / (1 + q))
def get_chi_p(s1, s2, q=1):
chi1p = np.sqrt(s1[0] ** 2 + s1[1] ** 2)
chi2p = np.sqrt(s2[0] ** 2 + s2[1] ** 2)
qfactor = q * ((4 * q) + 3) / (4 + (3 * q))
return np.maximum(chi1p, chi2p * qfactor)
N = 1000
def convert_vectors_to_bbh_param(cos_theta1L_std, cos_theta12_std):
"""Generate BBH spin vectors and convert to LIGO BBH params
cos_tilt_i:
Cosine of the zenith angle between the s and j [-1,1]
theta_12:
diff bw azimuthal angles of the s1hat+s2 projections on orbital plane [0, 2pi]
theta_jl:
diff bw L and J azimuthal angles [0, 2pi]
"""
n = N
lhat = normalise_vectors([[0, 0, 1] for _ in range(n)])
s1hat = normalise_vectors(
[get_isotropic_vector(cos_theta1L_std) for _ in range(n)]
)
s2hat = normalise_vectors(
[get_isotropic_vector(cos_theta12_std) for _ in range(n)]
)
s2hat = normalise_vectors(
[rotate_v2_to_v1(s1v, s2v) for s1v, s2v in zip(s1hat, s2hat)]
)
df = pd.DataFrame(
dict(
spin_1x=s1hat[:, 0],
spin_1y=s1hat[:, 1],
spin_1z=s1hat[:, 2],
spin_2x=s2hat[:, 0],
spin_2y=s2hat[:, 1],
spin_2z=s2hat[:, 2],
cos_tilt_1=np.cos([get_zenith_angle(v[2]) for v in s1hat]),
cos_tilt_2=np.cos([get_zenith_angle(v[2]) for v in s2hat]),
chi_eff=[get_chi_eff(s1, s2) for s1, s2 in zip(s1hat, s2hat)],
chi_p=[get_chi_p(s1, s2) for s1, s2 in zip(s1hat, s2hat)],
cos_theta_12=[
np.cos(get_angle_bw_vectors(s1, s2))
for s1, s2 in zip(s1hat, s2hat)
],
cos_theta_1L=[
np.cos(get_angle_bw_vectors(s1, l))
for s1, l in zip(s1hat, lhat)
],
mass_1_source=[25 for _ in s1hat],
mass_2_source=[25 for _ in s1hat],
)
)
s = Samples(posterior=df)
# s.calculate_remnant_kick_velocity()
return s.posterior
def get_angle_bw_vectors(v1, v2):
unit_vector1 = v1 / np.linalg.norm(v1)
unit_vector2 = v2 / np.linalg.norm(v2)
dot_product = np.dot(unit_vector1, unit_vector2)
return np.arccos(dot_product)
def plot_corner_of_spins(cos_theta1L_std, cos_theta12_std, save=True):
bbh_vectors = convert_vectors_to_bbh_param(
cos_theta1L_std=cos_theta1L_std, cos_theta12_std=cos_theta12_std
)
params = [p for p in PARAMS.keys()]
bbh_vectors = bbh_vectors[params]
labels = [PARAMS[p]["l"] for p in params]
range = [PARAMS[p]["r"] for p in params]
corner.corner(bbh_vectors, **CORNER_KWARGS, labels=labels, range=range)
if save:
plt.savefig(
f"spins_theta1L{cos_theta1L_std:.2f}_theta12{cos_theta12_std:.2f}.png"
)
def get_normalisation_weight(len_current_samples, len_of_longest_samples):
return np.ones(len_current_samples) * (
len_of_longest_samples / len_current_samples
)
def plot_overlaid_corners(cos_theta1L_std_vals, cos_theta12_std_vals, pltdir):
params = dict(
chi_eff=dict(l=r"$\chi_{eff}$", r=(-1, 1)),
chi_p=dict(l=r"$\chi_{p}$", r=(-1, 1)),
cos_tilt_1=dict(l=r"$\cos(t1)$", r=(-1, 1)),
cos_theta_12=dict(l=r"$\cos \theta_{12}$", r=(-1, 1)),
remnant_kick_mag=dict(l=r"$|\vec{v}_k|\ $km/s", r=(0, 3000)),
)
base = convert_vectors_to_bbh_param(cos_theta1L_std=1, cos_theta12_std=1)
labels = [params[p]["l"] for p in params]
range = [params[p]["r"] for p in params]
kwargs = dict(**CORNER_KWARGS, labels=labels, range=range)
if os.path.isdir(pltdir):
shutil.rmtree(pltdir)
os.makedirs(pltdir, exist_ok=False)
i = 0
for min_cos_theta1L, min_cos_theta12 in tqdm(
zip(cos_theta1L_std_vals, cos_theta12_std_vals),
total=len(cos_theta1L_std_vals),
desc="Hyper-Param settings",
):
f = f"{pltdir}/{i:02}_p12{min_cos_theta12:.1f}_p1L{min_cos_theta1L:.1f}.png"
compare = convert_vectors_to_bbh_param(
cos_theta1L_std=min_cos_theta1L, cos_theta12_std=min_cos_theta12
)
compare.to_csv(f.replace(".png", ".csv"))
fig = corner.corner(base[params], **kwargs, color=BILBY_BLUE_COLOR)
normalising_weights = get_normalisation_weight(
len(compare), max(len(compare), len(base))
)
corner.corner(
compare[params],
fig=fig,
weights=normalising_weights,
**kwargs,
color=VIOLET_COLOR,
)
orig_line = mlines.Line2D(
[], [], color=BILBY_BLUE_COLOR, label="Isotropic Spins"
)
weighted_line = mlines.Line2D(
[],
[],
color=VIOLET_COLOR,
label=f"Adjusted spins $\sigma \cos(12)={min_cos_theta12:.1f}, \sigma \cos(1L)={min_cos_theta1L:.1f}$",
)
plt.legend(
handles=[orig_line, weighted_line],
fontsize=25,
frameon=False,
bbox_to_anchor=(1, len(labels)),
loc="upper right",
)
plt.savefig(f)
plt.close()
i += 1
import glob
from bilby_report.tools import image_utils
def save_gif(gifname, outdir="gif", loop=False):
image_paths = glob.glob(f"{outdir}/*.png")
gif_filename = os.path.join(outdir, gifname)
orig_len = len(image_paths)
image_paths.sort()
if loop:
image_paths += image_paths[::-1]
assert orig_len <= len(image_paths)
image_utils.make_gif(
image_paths=image_paths, duration=50, gif_save_path=gif_filename
)
print(f"Saved gif {gif_filename}")
if __name__ == "__main__":
r = SphereAngleAnimation()
# varying = list(np.arange(0, 2.1, 0.5))
# constant = [1 for i in range(len(varying))]
#
# outdir = "../output/vary_12"
# plot_overlaid_corners(cos_theta1L_std_vals=constant,
# cos_theta12_std_vals=varying, pltdir=outdir)
# save_gif("vary_12.gif", outdir=outdir, loop=True)
#
# outdir = "../output/vary_1L"
# plot_overlaid_corners(cos_theta1L_std_vals=varying,
# cos_theta12_std_vals=constant, pltdir=outdir)
# save_gif("vary_1L.gif", outdir=outdir, loop=True)
|
[
"logging.getLogger",
"numpy.arccos",
"numpy.sqrt",
"bilby_report.tools.image_utils.make_gif",
"numpy.arctan2",
"matplotlib.rc",
"numpy.linalg.norm",
"numpy.sin",
"corner.corner",
"matplotlib.lines.Line2D",
"pyvista.Arrow",
"bbh_simulator.calculate_kick_vel_from_samples.Samples",
"pyvista.PolyData",
"matplotlib.pyplot.close",
"numpy.dot",
"os.path.isdir",
"numpy.maximum",
"glob.glob",
"matplotlib.pyplot.savefig",
"numpy.ones",
"shutil.rmtree",
"numpy.cos",
"os.makedirs",
"os.path.join",
"pyvista.Sphere",
"numpy.random.uniform",
"pyvista.Plotter"
] |
[((452, 475), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (454, 475), False, 'from matplotlib import rc\n'), ((2082, 2113), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(np.pi * 2)'], {}), '(0, np.pi * 2)\n', (2099, 2113), True, 'import numpy as np\n'), ((2492, 2511), 'numpy.arccos', 'np.arccos', (['costheta'], {}), '(costheta)\n', (2501, 2511), True, 'import numpy as np\n'), ((2596, 2609), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2602, 2609), True, 'import numpy as np\n'), ((6720, 6732), 'numpy.arccos', 'np.arccos', (['z'], {}), '(z)\n', (6729, 6732), True, 'import numpy as np\n'), ((6864, 6880), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (6874, 6880), True, 'import numpy as np\n'), ((7105, 7137), 'numpy.sqrt', 'np.sqrt', (['(s1[0] ** 2 + s1[1] ** 2)'], {}), '(s1[0] ** 2 + s1[1] ** 2)\n', (7112, 7137), True, 'import numpy as np\n'), ((7150, 7182), 'numpy.sqrt', 'np.sqrt', (['(s2[0] ** 2 + s2[1] ** 2)'], {}), '(s2[0] ** 2 + s2[1] ** 2)\n', (7157, 7182), True, 'import numpy as np\n'), ((7242, 7276), 'numpy.maximum', 'np.maximum', (['chi1p', '(chi2p * qfactor)'], {}), '(chi1p, chi2p * qfactor)\n', (7252, 7276), True, 'import numpy as np\n'), ((8983, 9004), 'bbh_simulator.calculate_kick_vel_from_samples.Samples', 'Samples', ([], {'posterior': 'df'}), '(posterior=df)\n', (8990, 9004), False, 'from bbh_simulator.calculate_kick_vel_from_samples import Samples\n'), ((9211, 9245), 'numpy.dot', 'np.dot', (['unit_vector1', 'unit_vector2'], {}), '(unit_vector1, unit_vector2)\n', (9217, 9245), True, 'import numpy as np\n'), ((9257, 9279), 'numpy.arccos', 'np.arccos', (['dot_product'], {}), '(dot_product)\n', (9266, 9279), True, 'import numpy as np\n'), ((9653, 9724), 'corner.corner', 'corner.corner', (['bbh_vectors'], {'labels': 'labels', 'range': 'range'}), '(bbh_vectors, **CORNER_KWARGS, labels=labels, range=range)\n', (9666, 9724), False, 'import corner\n'), ((10665, 10686), 'os.path.isdir', 'os.path.isdir', (['pltdir'], {}), '(pltdir)\n', (10678, 10686), False, 'import os\n'), ((10722, 10757), 'os.makedirs', 'os.makedirs', (['pltdir'], {'exist_ok': '(False)'}), '(pltdir, exist_ok=False)\n', (10733, 10757), False, 'import os\n'), ((12340, 12368), 'glob.glob', 'glob.glob', (['f"""{outdir}/*.png"""'], {}), "(f'{outdir}/*.png')\n", (12349, 12368), False, 'import glob\n'), ((12388, 12417), 'os.path.join', 'os.path.join', (['outdir', 'gifname'], {}), '(outdir, gifname)\n', (12400, 12417), False, 'import os\n'), ((12571, 12662), 'bilby_report.tools.image_utils.make_gif', 'image_utils.make_gif', ([], {'image_paths': 'image_paths', 'duration': '(50)', 'gif_save_path': 'gif_filename'}), '(image_paths=image_paths, duration=50, gif_save_path=\n gif_filename)\n', (12591, 12662), False, 'from bilby_report.tools import image_utils\n'), ((348, 382), 'logging.getLogger', 'logging.getLogger', (['"""bbh_simulator"""'], {}), "('bbh_simulator')\n", (365, 382), False, 'import logging\n'), ((407, 426), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (424, 426), False, 'import logging\n'), ((2520, 2533), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2526, 2533), True, 'import numpy as np\n'), ((2536, 2549), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2542, 2549), True, 'import numpy as np\n'), ((2558, 2571), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2564, 2571), True, 'import numpy as np\n'), ((2574, 2587), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2580, 2587), True, 'import numpy as np\n'), ((4666, 4678), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (4676, 4678), True, 'import pyvista as pv\n'), ((6102, 6122), 'pyvista.PolyData', 'pv.PolyData', (['vectors'], {}), '(vectors)\n', (6113, 6122), True, 'import pyvista as pv\n'), ((9131, 9149), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (9145, 9149), True, 'import numpy as np\n'), ((9174, 9192), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (9188, 9192), True, 'import numpy as np\n'), ((9746, 9834), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""spins_theta1L{cos_theta1L_std:.2f}_theta12{cos_theta12_std:.2f}.png"""'], {}), "(\n f'spins_theta1L{cos_theta1L_std:.2f}_theta12{cos_theta12_std:.2f}.png')\n", (9757, 9834), True, 'import matplotlib.pyplot as plt\n'), ((9940, 9968), 'numpy.ones', 'np.ones', (['len_current_samples'], {}), '(len_current_samples)\n', (9947, 9968), True, 'import numpy as np\n'), ((10696, 10717), 'shutil.rmtree', 'shutil.rmtree', (['pltdir'], {}), '(pltdir)\n', (10709, 10717), False, 'import shutil\n'), ((11246, 11307), 'corner.corner', 'corner.corner', (['base[params]'], {'color': 'BILBY_BLUE_COLOR'}), '(base[params], **kwargs, color=BILBY_BLUE_COLOR)\n', (11259, 11307), False, 'import corner\n'), ((11437, 11540), 'corner.corner', 'corner.corner', (['compare[params]'], {'fig': 'fig', 'weights': 'normalising_weights', 'color': 'VIOLET_COLOR'}), '(compare[params], fig=fig, weights=normalising_weights, **\n kwargs, color=VIOLET_COLOR)\n', (11450, 11540), False, 'import corner\n'), ((11628, 11698), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': 'BILBY_BLUE_COLOR', 'label': '"""Isotropic Spins"""'}), "([], [], color=BILBY_BLUE_COLOR, label='Isotropic Spins')\n", (11641, 11698), True, 'import matplotlib.lines as mlines\n'), ((11745, 11904), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': 'VIOLET_COLOR', 'label': 'f"""Adjusted spins $\\\\sigma \\\\cos(12)={min_cos_theta12:.1f}, \\\\sigma \\\\cos(1L)={min_cos_theta1L:.1f}$"""'}), "([], [], color=VIOLET_COLOR, label=\n f'Adjusted spins $\\\\sigma \\\\cos(12)={min_cos_theta12:.1f}, \\\\sigma \\\\cos(1L)={min_cos_theta1L:.1f}$'\n )\n", (11758, 11904), True, 'import matplotlib.lines as mlines\n'), ((12164, 12178), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f'], {}), '(f)\n', (12175, 12178), True, 'import matplotlib.pyplot as plt\n'), ((12187, 12198), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12196, 12198), True, 'import matplotlib.pyplot as plt\n'), ((3040, 3071), 'numpy.linalg.norm', 'np.linalg.norm', (['vectors'], {'axis': '(1)'}), '(vectors, axis=1)\n', (3054, 3071), True, 'import numpy as np\n'), ((4698, 4737), 'pyvista.Sphere', 'pv.Sphere', ([], {'radius': "self.kwargs['radius']"}), "(radius=self.kwargs['radius'])\n", (4707, 4737), True, 'import pyvista as pv\n'), ((4925, 4965), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '[1, 0, 0]'}), '(direction=[1, 0, 0], **ar_kwgs)\n', (4933, 4965), True, 'import pyvista as pv\n'), ((5005, 5045), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '[0, 1, 0]'}), '(direction=[0, 1, 0], **ar_kwgs)\n', (5013, 5045), True, 'import pyvista as pv\n'), ((5097, 5137), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '[0, 0, 1]'}), '(direction=[0, 0, 1], **ar_kwgs)\n', (5105, 5137), True, 'import pyvista as pv\n'), ((1250, 1263), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1256, 1263), True, 'import numpy as np\n'), ((1270, 1283), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1276, 1283), True, 'import numpy as np\n'), ((1297, 1310), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1303, 1310), True, 'import numpy as np\n'), ((1317, 1330), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1323, 1330), True, 'import numpy as np\n'), ((1723, 1736), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1729, 1736), True, 'import numpy as np\n'), ((1743, 1756), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1749, 1756), True, 'import numpy as np\n'), ((1782, 1795), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1788, 1795), True, 'import numpy as np\n'), ((1802, 1815), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1808, 1815), True, 'import numpy as np\n')]
|
#encoding=utf8
## 参考https://blog.csdn.net/dengxing1234/article/details/73739836
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.externals import joblib
import numpy as np
from scipy.sparse import hstack
from sklearn.preprocessing.data import OneHotEncoder
def xgboost_lr_train(libsvmFileNameInitial):
# load样本数据
X_all, y_all = load_svmlight_file(libsvmFileNameInitial)
# 训练/测试数据分割
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size = 0.3, random_state = 42)
# 定义xgb模型
xgboost = xgb.XGBClassifier(nthread=4, learning_rate=0.08,
n_estimators=50, max_depth=5, gamma=0, subsample=0.9, colsample_bytree=0.5)
# 训练xgb学习
xgboost.fit(X_train, y_train)
# 预测xgb及AUC评测
y_pred_test = xgboost.predict_proba(X_test)[:, 1]
xgb_test_auc = roc_auc_score(y_test, y_pred_test)
print('xgboost test auc: %.5f' % xgb_test_auc)
# xgboost编码原有特征
X_train_leaves = xgboost.apply(X_train)
X_test_leaves = xgboost.apply(X_test)
# 合并编码后的训练数据和测试数据
All_leaves = np.concatenate((X_train_leaves, X_test_leaves), axis=0)
All_leaves = All_leaves.astype(np.int32)
# 对所有特征进行ont-hot编码
xgbenc = OneHotEncoder()
X_trans = xgbenc.fit_transform(All_leaves)
(train_rows, cols) = X_train_leaves.shape
# 定义LR模型
lr = LogisticRegression()
# lr对xgboost特征编码后的样本模型训练
lr.fit(X_trans[:train_rows, :], y_train)
# 预测及AUC评测
y_pred_xgblr1 = lr.predict_proba(X_trans[train_rows:, :])[:, 1]
xgb_lr_auc1 = roc_auc_score(y_test, y_pred_xgblr1)
print('基于Xgb特征编码后的LR AUC: %.5f' % xgb_lr_auc1)
# 定义LR模型
lr = LogisticRegression(n_jobs=-1)
# 组合特征
X_train_ext = hstack([X_trans[:train_rows, :], X_train])
X_test_ext = hstack([X_trans[train_rows:, :], X_test])
# lr对组合特征的样本模型训练
lr.fit(X_train_ext, y_train)
# 预测及AUC评测
y_pred_xgblr2 = lr.predict_proba(X_test_ext)[:, 1]
xgb_lr_auc2 = roc_auc_score(y_test, y_pred_xgblr2)
print('基于组合特征的LR AUC: %.5f' % xgb_lr_auc2)
if __name__ == '__main__':
xgboost_lr_train("data/sample_libsvm_data.txt")
|
[
"sklearn.datasets.load_svmlight_file",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.roc_auc_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.data.OneHotEncoder",
"scipy.sparse.hstack",
"numpy.concatenate",
"xgboost.XGBClassifier"
] |
[((536, 577), 'sklearn.datasets.load_svmlight_file', 'load_svmlight_file', (['libsvmFileNameInitial'], {}), '(libsvmFileNameInitial)\n', (554, 577), False, 'from sklearn.datasets import load_svmlight_file\n'), ((634, 696), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_all', 'y_all'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X_all, y_all, test_size=0.3, random_state=42)\n', (650, 696), False, 'from sklearn.model_selection import train_test_split\n'), ((730, 859), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'nthread': '(4)', 'learning_rate': '(0.08)', 'n_estimators': '(50)', 'max_depth': '(5)', 'gamma': '(0)', 'subsample': '(0.9)', 'colsample_bytree': '(0.5)'}), '(nthread=4, learning_rate=0.08, n_estimators=50, max_depth\n =5, gamma=0, subsample=0.9, colsample_bytree=0.5)\n', (747, 859), True, 'import xgboost as xgb\n'), ((1023, 1057), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (1036, 1057), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((1257, 1312), 'numpy.concatenate', 'np.concatenate', (['(X_train_leaves, X_test_leaves)'], {'axis': '(0)'}), '((X_train_leaves, X_test_leaves), axis=0)\n', (1271, 1312), True, 'import numpy as np\n'), ((1395, 1410), 'sklearn.preprocessing.data.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1408, 1410), False, 'from sklearn.preprocessing.data import OneHotEncoder\n'), ((1528, 1548), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1546, 1548), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1724, 1760), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_xgblr1'], {}), '(y_test, y_pred_xgblr1)\n', (1737, 1760), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n'), ((1835, 1864), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1853, 1864), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1894, 1936), 'scipy.sparse.hstack', 'hstack', (['[X_trans[:train_rows, :], X_train]'], {}), '([X_trans[:train_rows, :], X_train])\n', (1900, 1936), False, 'from scipy.sparse import hstack\n'), ((1954, 1995), 'scipy.sparse.hstack', 'hstack', (['[X_trans[train_rows:, :], X_test]'], {}), '([X_trans[train_rows:, :], X_test])\n', (1960, 1995), False, 'from scipy.sparse import hstack\n'), ((2140, 2176), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_xgblr2'], {}), '(y_test, y_pred_xgblr2)\n', (2153, 2176), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score\n')]
|
#!/usr/bin/env python
# encoding: utf-8
"""
calc beat score of files
copyright: www.mgtv.com
"""
import os
import sys
import argparse
import numpy as np
import traceback
import beat_evaluation_toolbox as be
def calc_beat_score_of_file(annotation_file, beat_file):
#check input params
if os.path.exists(annotation_file) == False:
print("failed! annotation_file:%s not exist\n" % (annotation_file))
return False, 0.0
if os.path.exists(beat_file) == False:
print("failed! beat_file:%s not exist\n" % (beat_file))
return False, 0.0
data_annotation = np.loadtxt(annotation_file, usecols=(0))
data_annotation = np.expand_dims(data_annotation, axis=0)
data_beat = np.loadtxt(beat_file, usecols=(0))
data_beat = np.expand_dims(data_beat, axis=0)
R = be.evaluate_db(data_annotation, data_beat, 'all', doCI=False)
#输出结果
print(R['scores'])
pscore = R['scores']['pScore'][0]
f_measure = R['scores']['fMeasure'][0]
aml_c = R['scores']['amlC'][0]
aml_t = R['scores']['amlT'][0]
cml_c = R['scores']['cmlC'][0]
cml_t = R['scores']['cmlT'][0]
cem_acc = R['scores']['cemgilAcc'][0]
total_score = (aml_c + cem_acc + cml_c + f_measure + pscore + cml_t + aml_t) / 7
print("[%s] score:%.4f"%(beat_file, total_score))
return True, total_score
def calc_avg_score_of_files(annotation_files_dir, beat_files_dir, file_extension):
#check input params
if os.path.exists(annotation_files_dir) == False:
print("failed! annotation_files_dir:%s not exist\n" % (annotation_files_dir))
return False, 0.0
if os.path.exists(beat_files_dir) == False:
print("failed! beat_files_dir:%s not exist\n" % (beat_files_dir))
return False, 0.0
if not annotation_files_dir.endswith("/"):
annotation_files_dir += "/"
if not beat_files_dir.endswith("/"):
beat_files_dir += "/"
annotation_files_url = [f for f in os.listdir(annotation_files_dir) if f.endswith((file_extension))]
nb_annotation_files = len(annotation_files_url)
beat_files_url = [f for f in os.listdir(beat_files_dir) if f.endswith((file_extension))]
nb_beat_files = len(beat_files_url)
if nb_annotation_files != nb_beat_files or nb_annotation_files == 0:
print("failed! annotation files num:%d beat files num:%d\n" % (nb_annotation_files, nb_beat_files))
return False, 0.0
sum_score = 0.0
for i in range(nb_annotation_files):
annotation_file = annotation_files_dir + annotation_files_url[i]
beat_file = beat_files_dir + annotation_files_url[i]
if os.path.exists(beat_file) == False:
print("failed! beat file:%s not exist\n" % (beat_file))
return False, 0.0
ret, score = calc_beat_score_of_file(annotation_file, beat_file)
if ret == False:
print("failed! calc_beat_score_of_file failed for file:%s\n" % (beat_file))
return False, 0.0
sum_score = sum_score + score
avg_score = sum_score / nb_annotation_files
return True, avg_score
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="calc avg score of beat(downbeat) files")
parser.add_argument("--annotation_files_dir", required=True, help="Path to input annotation files dir", default="")
parser.add_argument("--beat_files_dir", required=True, help="Path to input beats files dir", default="")
parser.add_argument("--file_extension", required=True, help="File ext, beat or downbeat", default="")
# 获得工作目录,程序模块名称,并切换工作目录
s_work_path, s_module_name = os.path.split(os.path.abspath(sys.argv[0]))
print(s_work_path, s_module_name)
os.chdir(s_work_path)
try:
args = parser.parse_args()
ret, score = calc_avg_score_of_files(args.annotation_files_dir, args.beat_files_dir, args.file_extension)
print("Final score:%.4f" % score)
except Exception as e:
traceback.print_exc()
print("Exception running beat_score_calc: [%s]" % (str(e)))
ret = False
if ret == True:
sys.exit(0)
else:
sys.exit(1)
|
[
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"beat_evaluation_toolbox.evaluate_db",
"os.chdir",
"numpy.expand_dims",
"sys.exit",
"os.path.abspath",
"numpy.loadtxt",
"traceback.print_exc"
] |
[((640, 678), 'numpy.loadtxt', 'np.loadtxt', (['annotation_file'], {'usecols': '(0)'}), '(annotation_file, usecols=0)\n', (650, 678), True, 'import numpy as np\n'), ((703, 742), 'numpy.expand_dims', 'np.expand_dims', (['data_annotation'], {'axis': '(0)'}), '(data_annotation, axis=0)\n', (717, 742), True, 'import numpy as np\n'), ((769, 801), 'numpy.loadtxt', 'np.loadtxt', (['beat_file'], {'usecols': '(0)'}), '(beat_file, usecols=0)\n', (779, 801), True, 'import numpy as np\n'), ((820, 853), 'numpy.expand_dims', 'np.expand_dims', (['data_beat'], {'axis': '(0)'}), '(data_beat, axis=0)\n', (834, 853), True, 'import numpy as np\n'), ((867, 928), 'beat_evaluation_toolbox.evaluate_db', 'be.evaluate_db', (['data_annotation', 'data_beat', '"""all"""'], {'doCI': '(False)'}), "(data_annotation, data_beat, 'all', doCI=False)\n", (881, 928), True, 'import beat_evaluation_toolbox as be\n'), ((3285, 3362), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""calc avg score of beat(downbeat) files"""'}), "(description='calc avg score of beat(downbeat) files')\n", (3308, 3362), False, 'import argparse\n'), ((3850, 3871), 'os.chdir', 'os.chdir', (['s_work_path'], {}), '(s_work_path)\n', (3858, 3871), False, 'import os\n'), ((314, 345), 'os.path.exists', 'os.path.exists', (['annotation_file'], {}), '(annotation_file)\n', (328, 345), False, 'import os\n'), ((474, 499), 'os.path.exists', 'os.path.exists', (['beat_file'], {}), '(beat_file)\n', (488, 499), False, 'import os\n'), ((1539, 1575), 'os.path.exists', 'os.path.exists', (['annotation_files_dir'], {}), '(annotation_files_dir)\n', (1553, 1575), False, 'import os\n'), ((1714, 1744), 'os.path.exists', 'os.path.exists', (['beat_files_dir'], {}), '(beat_files_dir)\n', (1728, 1744), False, 'import os\n'), ((3778, 3806), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (3793, 3806), False, 'import os\n'), ((4280, 4291), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4288, 4291), False, 'import sys\n'), ((4310, 4321), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4318, 4321), False, 'import sys\n'), ((2059, 2091), 'os.listdir', 'os.listdir', (['annotation_files_dir'], {}), '(annotation_files_dir)\n', (2069, 2091), False, 'import os\n'), ((2211, 2237), 'os.listdir', 'os.listdir', (['beat_files_dir'], {}), '(beat_files_dir)\n', (2221, 2237), False, 'import os\n'), ((2744, 2769), 'os.path.exists', 'os.path.exists', (['beat_file'], {}), '(beat_file)\n', (2758, 2769), False, 'import os\n'), ((4133, 4154), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4152, 4154), False, 'import traceback\n')]
|
import numpy as np
def permutation_value(num_list, max_num):
return sum([num_list[i]*max_num**(len(num_list)-1-i) for i in range(len(num_list))])
def permutation_update(in_list):
pass
init_array = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
init_array.astype('int64')
#ULØST!
|
[
"numpy.array"
] |
[((222, 262), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (230, 262), True, 'import numpy as np\n')]
|
import sys
from PyQt4 import QtGui, QtCore
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import numpy
from math import sqrt, sin, cos, pi
from geometry.quaternion import Quaternion
class StripChart(QtGui.QWidget):
""" a class to implement a stripchart using the pyqtgraph plotting
utilities
"""
def __init__(self, plt=None, dim=1, relative_time=False, max_pts=200):
super(StripChart, self).__init__()
self.plt = plt
self.curve = []
self.xdata = []
self.ydata = []
for i in range(0,dim):
self.curve.append(plt.plot(pen='w'))
self.ydata.append([])
self.xdata.append([])
self.dim = dim
self.max_pts = max_pts
self._npts = [0,] * dim
self.pens = [None,] * dim
self.brushes = [None,] * dim
self._use_relative_time = relative_time
def _update_plot(self):
offset = 0.0
if self._use_relative_time:
for xd in self.xdata:
if len(xd) == 0:
continue
offset = max(numpy.amax(numpy.array(xd)), offset)
for xd,yd,c,i in zip(self.xdata, self.ydata, self.curve, range(self.dim)):
if numpy.isscalar(xd):
xd = [xd,]
yd = [yd,]
plot_xdata = numpy.array(xd) - offset
plot_ydata = numpy.array(yd)
c.setData(x=plot_xdata, y=plot_ydata)
if self.brushes is not None:
assert len(self.brushes) == self.dim, "Number of brush\
collections must match number of samples"
nbrush = 0
if self.brushes[i] is not None:
c.setBrush(self.brushes[i])
if self.pens is not None:
assert len(self.pens) == self.dim, "Number of pens\
collections must match number of samples"
npen = 0
if self.pens[i] is not None:
c.setPen(self.pens[i])
def update_data(self, x_new, y_new, idx=None, brushes=None, pens=None):
if idx is None:
idx = range(0,self.dim)
if self.dim == 1:
if not isinstance(x_new, tuple):
x_new = (x_new,)
if not isinstance(y_new, tuple):
y_new = (y_new,)
for x,y,i in zip(x_new, y_new, idx):
if self._npts[i] < self.max_pts:
if numpy.isnan(x) or numpy.isnan(y):
continue
self.xdata[i] = numpy.append(self.xdata[i], x)
self.ydata[i] = numpy.append(self.ydata[i], y)
self._npts[i] += 1
else:
if numpy.isnan(x) or numpy.isnan(y):
continue
self.xdata[i] = numpy.append(self.xdata[i][1:], x)
self.ydata[i] = numpy.append(self.ydata[i][1:], y)
if brushes is None:
brushes = [None,]*len(idx)
if pens is None:
pens = [None,]*len(idx)
for b,p,i in zip(brushes, pens, idx):
self.brushes[i] = b
self.pens[i] = p
self._update_plot()
class ImageDisplay(QtGui.QWidget):
""" image view widget
"""
def __init__(self, img=None, img_data=None, is_colorize=False):
super(ImageDisplay, self).__init__()
self._img_view = img
if img_data is not None:
self.img_data = img_data
else:
self.img_data = numpy.zeros((2,2))
self.cmax = 1.0
self.cmin = 0.0
self.is_colorize = is_colorize
def update_data(self, img_new=None):
if img_new is None or self._img_view is None:
return
self.img_data = img_new
if self.is_colorize:
self._img_view.setImage(self.colorize())
else:
self._img_view.setImage(self.img_data)
def colorize(self):
len_x = self.img_data.shape[0]
len_y = self.img_data.shape[1]
c = numpy.zeros((len_x, len_y, 3))
crange = self.cmax - self.cmin
c[:,:,0] = (self.img_data - self.cmin)/crange
c[:,:,1] = 1 - abs(self.img_data/crange)
c[:,:,2] = -(self.img_data - self.cmax)/crange
return c
class xyPlot(QtGui.QWidget):
""" Plot Widget for x-y data
"""
def __init__(self, plt=None, dim=1, xlim=None, ylim=None):
super(xyPlot, self).__init__()
self.plt = plt
self.curve = []
self.xdata = []
self.ydata = []
self.pens = [None,] * dim
self.brushes = [None,] * dim
self._xlim = xlim
self._ylim = ylim
self.dim = dim
def _update_plot(self):
for xd,yd,c,i in zip(self.xdata, self.ydata, self.curve, range(self.dim)):
if numpy.isscalar(xd):
xd = [xd,]
yd = [yd,]
if self.size is not None:
c.setData(x=xd, y=yd, size=self.size)
else:
c.setData(x=xd, y=yd)
if self.brushes is not None:
assert len(self.brushes) == self.dim, "Number of brush\
collections must match number of samples"
nbrush = 0
if self.brushes[i] is not None:
c.setBrush(self.brushes[i])
if self.pens is not None:
assert len(self.pens) == self.dim, "Number of pens\
collections must match number of samples"
npen = 0
if self.pens[i] is not None:
c.setPen(self.pens[i])
if self._xlim:
self.plt.setXRange(self._xlim[0], self._xlim[1])
if self._ylim:
self.plt.setYRange(self._ylim[0], self._ylim[1])
def update_data(self, x_new, y_new, curve_index=None, auto_update=True,
brushes=None, pens=None, size=None):
"""Update the xy plot
Arguments:
x_new: new x data to update. Must either be a numpy array or a tuple
of numpy arrays
y_new: new y data to update. Must either be a numpy array or a tuple
of numpy arrays
curve_index: tuple of indices which indicate the curves which the
tuples in x_new and y_new should update
auto_update: optional, boolean indicating if we should redraw the
plot, defaults to True
brushes: tuple of brushes corresponding to the data to update
pens: tuple of pens corresponding to the data to update
size: not really sure
Returns:
no returns
"""
assert type(x_new) is type(y_new), "x and y data must either be\
numpy arrays or tuples containing them"
if type(x_new) is not tuple:
assert self.dim == 1, "must specify tuple of data if there is\
more htan one data series"
x_new = (x_new,)
y_new = (y_new,)
curve_index = (0,)
assert curve_index is not None, "must specify the data series that\
correspond to data in x_new and y_new tuples"
if brushes is None:
brushes = [None,]*len(curve_index)
if pens is None:
pens = [None,]*len(curve_index)
for xd,yd,i,b,p in zip(x_new, y_new, curve_index, brushes, pens):
self.xdata[i] = xd
self.ydata[i] = yd
if b is not None:
self.brushes[i] = b
if p is not None:
self.pens[i] = p
self.size = size
if auto_update:
self._update_plot()
class ScatterPlot(xyPlot):
""" Widget for scatterplots. Inherits from xyPlot
"""
def __init__(self, plt=None, dim=1):
super(ScatterPlot, self).__init__(plt, dim)
for i in range(0, self.dim):
self.curve.append(pg.ScatterPlotItem(pen='w'))
plt.addItem(self.curve[-1])
self.ydata.append(numpy.zeros((1,)))
self.xdata.append(numpy.zeros((1,)))
class LinePlot(xyPlot):
""" Widget for lineplots. Inherits from xyPlot
"""
def __init__(self, plt=None, dim=1, xlim=None, ylim=None):
super(LinePlot, self).__init__(plt, dim, xlim, ylim)
for i in range(0, self.dim):
self.curve.append(pg.PlotCurveItem(pen='w'))
plt.addItem(self.curve[-1])
self.ydata.append(numpy.zeros((1,)))
self.xdata.append(numpy.zeros((1,)))
|
[
"pyqtgraph.PlotCurveItem",
"numpy.isscalar",
"pyqtgraph.ScatterPlotItem",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.isnan"
] |
[((4031, 4061), 'numpy.zeros', 'numpy.zeros', (['(len_x, len_y, 3)'], {}), '((len_x, len_y, 3))\n', (4042, 4061), False, 'import numpy\n'), ((1242, 1260), 'numpy.isscalar', 'numpy.isscalar', (['xd'], {}), '(xd)\n', (1256, 1260), False, 'import numpy\n'), ((1392, 1407), 'numpy.array', 'numpy.array', (['yd'], {}), '(yd)\n', (1403, 1407), False, 'import numpy\n'), ((3516, 3535), 'numpy.zeros', 'numpy.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3527, 3535), False, 'import numpy\n'), ((4820, 4838), 'numpy.isscalar', 'numpy.isscalar', (['xd'], {}), '(xd)\n', (4834, 4838), False, 'import numpy\n'), ((1342, 1357), 'numpy.array', 'numpy.array', (['xd'], {}), '(xd)\n', (1353, 1357), False, 'import numpy\n'), ((2564, 2594), 'numpy.append', 'numpy.append', (['self.xdata[i]', 'x'], {}), '(self.xdata[i], x)\n', (2576, 2594), False, 'import numpy\n'), ((2627, 2657), 'numpy.append', 'numpy.append', (['self.ydata[i]', 'y'], {}), '(self.ydata[i], y)\n', (2639, 2657), False, 'import numpy\n'), ((2825, 2859), 'numpy.append', 'numpy.append', (['self.xdata[i][1:]', 'x'], {}), '(self.xdata[i][1:], x)\n', (2837, 2859), False, 'import numpy\n'), ((2892, 2926), 'numpy.append', 'numpy.append', (['self.ydata[i][1:]', 'y'], {}), '(self.ydata[i][1:], y)\n', (2904, 2926), False, 'import numpy\n'), ((7918, 7945), 'pyqtgraph.ScatterPlotItem', 'pg.ScatterPlotItem', ([], {'pen': '"""w"""'}), "(pen='w')\n", (7936, 7945), True, 'import pyqtgraph as pg\n'), ((8017, 8034), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8028, 8034), False, 'import numpy\n'), ((8066, 8083), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8077, 8083), False, 'import numpy\n'), ((8361, 8386), 'pyqtgraph.PlotCurveItem', 'pg.PlotCurveItem', ([], {'pen': '"""w"""'}), "(pen='w')\n", (8377, 8386), True, 'import pyqtgraph as pg\n'), ((8458, 8475), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8469, 8475), False, 'import numpy\n'), ((8507, 8524), 'numpy.zeros', 'numpy.zeros', (['(1,)'], {}), '((1,))\n', (8518, 8524), False, 'import numpy\n'), ((2469, 2483), 'numpy.isnan', 'numpy.isnan', (['x'], {}), '(x)\n', (2480, 2483), False, 'import numpy\n'), ((2487, 2501), 'numpy.isnan', 'numpy.isnan', (['y'], {}), '(y)\n', (2498, 2501), False, 'import numpy\n'), ((2730, 2744), 'numpy.isnan', 'numpy.isnan', (['x'], {}), '(x)\n', (2741, 2744), False, 'import numpy\n'), ((2748, 2762), 'numpy.isnan', 'numpy.isnan', (['y'], {}), '(y)\n', (2759, 2762), False, 'import numpy\n'), ((1116, 1131), 'numpy.array', 'numpy.array', (['xd'], {}), '(xd)\n', (1127, 1131), False, 'import numpy\n')]
|
from typing import Tuple, Dict
import h5py
import pandas as pd
import numpy as np
from loguru import logger
from ruamel.yaml import YAML
from joblib import load, dump
from umda import EmbeddingModel
from sklearn.gaussian_process import GaussianProcessRegressor, kernels
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import KFold, GridSearchCV, ShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
USE_DASK = False
models = {
"linear_regression": [
LinearRegression(fit_intercept=False),
[{"normalize": [True, False],}],
],
"svr": [
SVR(),
[
{
"kernel": ["rbf",],# "poly"],
#"degree": [2, 3, 4],
"C": 10**np.linspace(1.5, 2., 20),
"gamma": ["auto", 0.05, 0.1],
"epsilon": 10**np.linspace(-3., -1., 20),
}
],
],
"knn": [
KNeighborsRegressor(),
[
{
"n_neighbors": [2, 4, 10, 15, 30, 50, 70],
"metric": ["cosine", "euclidean",],
"weights": ["uniform", "distance"]
}
],
],
"rfr": [
RandomForestRegressor(max_features=None, random_state=1205),
[
{"n_estimators": [10, 20, 50, 80, 100, 125, 150, 200],
"max_leaf_nodes": [None, 5, 10, 15, 20, 40],
"min_samples_leaf": [1, 3, 5, 10, 15, 20, 25, 35],
"max_depth": [None, 5, 10, 15, 20]
}
],
],
"gbr": [
GradientBoostingRegressor(random_state=1205),
[
{
"learning_rate": 10 ** np.linspace(-3.0, 1.0, 20),
"n_estimators": [5, 10, 30, 50, 80, 100, 125, 150, 200],
"subsample": [0.2, 0.4, 0.6, 0.8, 1.],
"max_depth": [1, 2, 3, 4, 5, 6]
}
],
],
"gpr": [
None,
[{"alpha": 10 ** np.linspace(-10.0, 1.0, 5), "n_restarts_optimizer": [5, 10, 15, 20]}],
],
}
def standardize_test(
estimator: "sklearn model",
search_params: Tuple[Dict],
data: Tuple[np.ndarray, np.ndarray],
seed: int = 42,
n_jobs: int = 8,
cv: int = 20
):
# split data into X and y for regression
X, y = data
# Manually specify 10-fold cross-validation for the grid search
kfold = KFold(cv, random_state=seed, shuffle=True)
grid_search = GridSearchCV(
estimator,
search_params,
scoring="neg_mean_squared_error",
cv=kfold,
n_jobs=n_jobs,
)
# run the grid search
grid_search.fit(X, y)
# give some summary statistics
y_mask = y != 0.0
y_pred = grid_search.best_estimator_.predict(X)
# masked error is excluding negative examples
mse = metrics.mean_squared_error(y_pred, y)
masked_mse = metrics.mean_squared_error(y_pred[y_mask], y[y_mask])
r2 = metrics.r2_score(y, y_pred)
errors = {"mse": float(mse), "masked_mse": float(masked_mse), "r^2": float(r2)}
return grid_search, grid_search.best_estimator_, errors
def mask_distant_species(
target: np.ndarray, fullset: np.ndarray, upper_percentile: float = 97.
) -> np.ndarray:
distances = cosine_distances(target, fullset)
logger.info(f"Min/max distance: {distances.min()}/{distances.max()}")
logger.info(f"Mean/std distance: {distances.mean()}/{distances.std()}")
lower, mean, upper = np.percentile(distances, [3., 50., upper_percentile])
logger.info(f"3%/50%/{upper_percentile}%: {lower:.3f}/{mean:.3f}/{upper:.3f}")
dist_mask = distances.mean(axis=0) > upper
return dist_mask
def main(
prediction_output: str,
seed: int = 42,
distance_threshold: float = 0.8,
n_jobs: int = 8,
cv: int = 10
):
logger.add("model_training.log")
logger.info(f"Using seed {seed}, cosine distance zeroing: {distance_threshold}")
logger.info(f"Cross-validation will be done with {n_jobs} workers.")
#rng = np.random.default_rng(seed)
logger.info("Loading data")
# prepare and load data
data = h5py.File("../data/processed/pipeline_embeddings_70.h5", "r")
original = h5py.File("../data/processed/smiles_embeddings_300.h5", "r")
pipeline = load("../models/embedding_pipeline.pkl")
pca = load("../models/pca_model.pkl")
embedding_model = load("../models/EmbeddingModel.pkl")
## load in the TMC-1 data and grab the embedding vectors
tmc1_df = pd.read_pickle("../data/processed/tmc1_ready.pkl")
# ignore H2 lol
#tmc1_df = tmc1_df.loc[tmc1_df["canonical"] != "[HH]"]
tmc1_df.reset_index(inplace=True, drop=True)
## get into NumPy array
#tmc1_vecs = np.vstack(tmc1_df["vectors"])
##indices = np.arange(len(data["pca"]))
#for step in pipeline.steps[:2]:
# tmc1_vecs = step[1].transform(tmc1_vecs)
## get the TMC-1 cluster IDs
#tmc1_cluster_ids = pipeline.steps[-1][1].predict(tmc1_vecs)
tmc1_vecs = np.vstack([embedding_model.vectorize(smi) for smi in tmc1_df["canonical"]])
tmc1_cluster_ids = np.array([embedding_model.cluster(smi) for smi in tmc1_df["canonical"]])
#if USE_DASK:
# tmc1_cluster_ids = tmc1_cluster_ids.compute()
## holdout_cluster_ids = pipeline.predict(holdout_vecs).compute()
## compute the PCA embedding for the TMC-1 molecules
#tmc1_embedding = pipeline.steps[0][1].transform(tmc1_vecs)
# holdout_embedding = pipeline.steps[0][1].transform(holdout_vecs)
# for computational efficiency, just grab the most relevant
# molecules to TMC-1
mask = np.zeros_like(data["cluster_ids"], dtype=bool)
for i in np.unique(tmc1_cluster_ids):
mask += data["cluster_ids"][:] == i
logger.info(f"There are {mask.sum()} molecules in the TMC-1 cluster(s)")
# Extract out the molecules contained within our cluster
all_pca = (data["pca"][:])[mask, :]
logger.info(f"Shape of the PCA vectors: {all_pca.shape}")
logger.info(f"Shape of the TMC1-1 vectors: {tmc1_vecs.shape}")
pca_dim = all_pca.shape[-1]
# subset_smiles = (data["smiles"][:])[mask]
# set them as "X" and "Y" for ease of reference
X = tmc1_vecs.copy()
Y = np.log10(tmc1_df["Column density (cm^-2)"].to_numpy())
# convert to abundance
#Y = tmc1_df["Column density (cm^-2)"].to_numpy() / 1e22
# what we want to do now is to set molecules we have little chance of
# detecting to have zero column densities
dist_mask = mask_distant_species(X, all_pca, distance_threshold)
dummies = all_pca[dist_mask,:]
logger.info(f"Setting {dist_mask.sum()} entries to zero column density.")
# logger.info(f"Examples of excluded molecules: {subset_smiles[dist_mask][:5]}")
dummy_y = np.zeros(dummies.shape[0])
logger.info("Preparing training data")
# add the constrained values to our training data
train_x = np.vstack([X, dummies])
train_y = np.hstack([Y, dummy_y])
logger.info(f"Shape of X: {train_x.shape} and Y: {train_y.shape}")
results = dict()
with h5py.File(prediction_output, "a") as h5_output:
try:
del h5_output["tmc1_cluster_mask"]
except:
pass
# save the intercluster mask
h5_output["tmc1_cluster_mask"] = mask
# now do the standardized training and testing for every model
for model_name, conditions in models.items():
# see if we can delete the key
try:
del h5_output[model_name]
except KeyError:
pass
logger.info(f"Performing {cv}-fold CV on {model_name}")
model, hyperparams = conditions
# for gaussian process, define the covariance function
if model_name == "gpr":
kernel = kernels.ConstantKernel() * kernels.RBF(
3.0, (1e-1, 10.0)
) + kernels.RationalQuadratic(
200.0, 20.0, alpha_bounds=(1e-3, 5e2), length_scale_bounds=(50.0, 1e4)
) * kernels.ConstantKernel() + kernels.ConstantKernel()
model = GaussianProcessRegressor(kernel, random_state=1205)
grid, best_model, errors = standardize_test(
model, hyperparams, (train_x, train_y), n_jobs=n_jobs, cv=cv, seed=seed
)
# log the model results
results[model_name] = errors
logger.info(f"Best errors for {model_name}: {errors}")
# pickle the CV grid
dump(grid, f"../models/{model_name}_grid.pkl")
cv_df = pd.DataFrame.from_dict(grid.cv_results_)
cv_df.to_csv(f"../models/{model_name}_grid_summary.csv", index=False)
logger.info(f"Caching predictions for best model")
if model_name != "gpr":
pred_Y = best_model.predict(all_pca)
h5_output[f"{model_name}"] = pred_Y
else:
pred_Y, pred_std = best_model.predict(all_pca, return_std=True)
gpr_tmc_y, gpr_tmc_cov = best_model.predict(
X, return_cov=True
)
# save a bunch of stuff for Gaussian Process
for target, name in zip(
[pred_Y, pred_std, gpr_tmc_y, gpr_tmc_cov],
["all", "all_std", "tmc_reproduction", "tmc_cov"],
):
try:
del h5_output[f"{model_name}_{name}"]
except KeyError:
pass
h5_output[f"{model_name}_{name}"] = target
tmc1_df[model_name] = best_model.predict(X)
tmc1_df.to_csv("tmc1_results.csv", index=False)
# save the errors for later reporting
yaml = YAML()
with open("../models/training_errors.yml", "w+") as write_file:
yaml.dump(results, write_file)
if __name__ == "__main__":
params = {
"prediction_output": "../data/processed/model_predictions.h5",
"seed": 42,
"distance_threshold": 99.98,
"n_jobs": 16,
"cv": 10
}
main(**params)
|
[
"sklearn.model_selection.GridSearchCV",
"sklearn.metrics.pairwise.cosine_distances",
"numpy.hstack",
"ruamel.yaml.YAML",
"sklearn.model_selection.KFold",
"sklearn.metrics.r2_score",
"pandas.read_pickle",
"loguru.logger.add",
"sklearn.gaussian_process.GaussianProcessRegressor",
"sklearn.ensemble.RandomForestRegressor",
"pandas.DataFrame.from_dict",
"sklearn.gaussian_process.kernels.ConstantKernel",
"numpy.linspace",
"numpy.vstack",
"joblib.load",
"sklearn.svm.SVR",
"sklearn.ensemble.GradientBoostingRegressor",
"joblib.dump",
"sklearn.gaussian_process.kernels.RBF",
"sklearn.metrics.mean_squared_error",
"h5py.File",
"sklearn.gaussian_process.kernels.RationalQuadratic",
"sklearn.linear_model.LinearRegression",
"numpy.unique",
"loguru.logger.info",
"sklearn.neighbors.KNeighborsRegressor",
"numpy.zeros",
"numpy.percentile",
"numpy.zeros_like"
] |
[((2628, 2670), 'sklearn.model_selection.KFold', 'KFold', (['cv'], {'random_state': 'seed', 'shuffle': '(True)'}), '(cv, random_state=seed, shuffle=True)\n', (2633, 2670), False, 'from sklearn.model_selection import KFold, GridSearchCV, ShuffleSplit\n'), ((2689, 2791), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['estimator', 'search_params'], {'scoring': '"""neg_mean_squared_error"""', 'cv': 'kfold', 'n_jobs': 'n_jobs'}), "(estimator, search_params, scoring='neg_mean_squared_error', cv\n =kfold, n_jobs=n_jobs)\n", (2701, 2791), False, 'from sklearn.model_selection import KFold, GridSearchCV, ShuffleSplit\n'), ((3055, 3092), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_pred', 'y'], {}), '(y_pred, y)\n', (3081, 3092), False, 'from sklearn import metrics\n'), ((3110, 3163), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_pred[y_mask]', 'y[y_mask]'], {}), '(y_pred[y_mask], y[y_mask])\n', (3136, 3163), False, 'from sklearn import metrics\n'), ((3173, 3200), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (3189, 3200), False, 'from sklearn import metrics\n'), ((3485, 3518), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['target', 'fullset'], {}), '(target, fullset)\n', (3501, 3518), False, 'from sklearn.metrics.pairwise import cosine_distances, euclidean_distances\n'), ((3694, 3749), 'numpy.percentile', 'np.percentile', (['distances', '[3.0, 50.0, upper_percentile]'], {}), '(distances, [3.0, 50.0, upper_percentile])\n', (3707, 3749), True, 'import numpy as np\n'), ((3752, 3830), 'loguru.logger.info', 'logger.info', (['f"""3%/50%/{upper_percentile}%: {lower:.3f}/{mean:.3f}/{upper:.3f}"""'], {}), "(f'3%/50%/{upper_percentile}%: {lower:.3f}/{mean:.3f}/{upper:.3f}')\n", (3763, 3830), False, 'from loguru import logger\n'), ((4041, 4073), 'loguru.logger.add', 'logger.add', (['"""model_training.log"""'], {}), "('model_training.log')\n", (4051, 4073), False, 'from loguru import logger\n'), ((4078, 4163), 'loguru.logger.info', 'logger.info', (['f"""Using seed {seed}, cosine distance zeroing: {distance_threshold}"""'], {}), "(f'Using seed {seed}, cosine distance zeroing: {distance_threshold}'\n )\n", (4089, 4163), False, 'from loguru import logger\n'), ((4163, 4231), 'loguru.logger.info', 'logger.info', (['f"""Cross-validation will be done with {n_jobs} workers."""'], {}), "(f'Cross-validation will be done with {n_jobs} workers.')\n", (4174, 4231), False, 'from loguru import logger\n'), ((4275, 4302), 'loguru.logger.info', 'logger.info', (['"""Loading data"""'], {}), "('Loading data')\n", (4286, 4302), False, 'from loguru import logger\n'), ((4342, 4403), 'h5py.File', 'h5py.File', (['"""../data/processed/pipeline_embeddings_70.h5"""', '"""r"""'], {}), "('../data/processed/pipeline_embeddings_70.h5', 'r')\n", (4351, 4403), False, 'import h5py\n'), ((4419, 4479), 'h5py.File', 'h5py.File', (['"""../data/processed/smiles_embeddings_300.h5"""', '"""r"""'], {}), "('../data/processed/smiles_embeddings_300.h5', 'r')\n", (4428, 4479), False, 'import h5py\n'), ((4495, 4535), 'joblib.load', 'load', (['"""../models/embedding_pipeline.pkl"""'], {}), "('../models/embedding_pipeline.pkl')\n", (4499, 4535), False, 'from joblib import load, dump\n'), ((4546, 4577), 'joblib.load', 'load', (['"""../models/pca_model.pkl"""'], {}), "('../models/pca_model.pkl')\n", (4550, 4577), False, 'from joblib import load, dump\n'), ((4600, 4636), 'joblib.load', 'load', (['"""../models/EmbeddingModel.pkl"""'], {}), "('../models/EmbeddingModel.pkl')\n", (4604, 4636), False, 'from joblib import load, dump\n'), ((4712, 4762), 'pandas.read_pickle', 'pd.read_pickle', (['"""../data/processed/tmc1_ready.pkl"""'], {}), "('../data/processed/tmc1_ready.pkl')\n", (4726, 4762), True, 'import pandas as pd\n'), ((5818, 5864), 'numpy.zeros_like', 'np.zeros_like', (["data['cluster_ids']"], {'dtype': 'bool'}), "(data['cluster_ids'], dtype=bool)\n", (5831, 5864), True, 'import numpy as np\n'), ((5878, 5905), 'numpy.unique', 'np.unique', (['tmc1_cluster_ids'], {}), '(tmc1_cluster_ids)\n', (5887, 5905), True, 'import numpy as np\n'), ((6133, 6190), 'loguru.logger.info', 'logger.info', (['f"""Shape of the PCA vectors: {all_pca.shape}"""'], {}), "(f'Shape of the PCA vectors: {all_pca.shape}')\n", (6144, 6190), False, 'from loguru import logger\n'), ((6195, 6257), 'loguru.logger.info', 'logger.info', (['f"""Shape of the TMC1-1 vectors: {tmc1_vecs.shape}"""'], {}), "(f'Shape of the TMC1-1 vectors: {tmc1_vecs.shape}')\n", (6206, 6257), False, 'from loguru import logger\n'), ((6965, 6991), 'numpy.zeros', 'np.zeros', (['dummies.shape[0]'], {}), '(dummies.shape[0])\n', (6973, 6991), True, 'import numpy as np\n'), ((6996, 7034), 'loguru.logger.info', 'logger.info', (['"""Preparing training data"""'], {}), "('Preparing training data')\n", (7007, 7034), False, 'from loguru import logger\n'), ((7103, 7126), 'numpy.vstack', 'np.vstack', (['[X, dummies]'], {}), '([X, dummies])\n', (7112, 7126), True, 'import numpy as np\n'), ((7141, 7164), 'numpy.hstack', 'np.hstack', (['[Y, dummy_y]'], {}), '([Y, dummy_y])\n', (7150, 7164), True, 'import numpy as np\n'), ((7169, 7235), 'loguru.logger.info', 'logger.info', (['f"""Shape of X: {train_x.shape} and Y: {train_y.shape}"""'], {}), "(f'Shape of X: {train_x.shape} and Y: {train_y.shape}')\n", (7180, 7235), False, 'from loguru import logger\n'), ((9974, 9980), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (9978, 9980), False, 'from ruamel.yaml import YAML\n'), ((764, 801), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (780, 801), False, 'from sklearn.linear_model import LinearRegression\n'), ((872, 877), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (875, 877), False, 'from sklearn.svm import SVR\n'), ((1195, 1216), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (1214, 1216), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((1457, 1516), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'max_features': 'None', 'random_state': '(1205)'}), '(max_features=None, random_state=1205)\n', (1478, 1516), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((1819, 1863), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': '(1205)'}), '(random_state=1205)\n', (1844, 1863), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((7266, 7299), 'h5py.File', 'h5py.File', (['prediction_output', '"""a"""'], {}), "(prediction_output, 'a')\n", (7275, 7299), False, 'import h5py\n'), ((7779, 7834), 'loguru.logger.info', 'logger.info', (['f"""Performing {cv}-fold CV on {model_name}"""'], {}), "(f'Performing {cv}-fold CV on {model_name}')\n", (7790, 7834), False, 'from loguru import logger\n'), ((8623, 8677), 'loguru.logger.info', 'logger.info', (['f"""Best errors for {model_name}: {errors}"""'], {}), "(f'Best errors for {model_name}: {errors}')\n", (8634, 8677), False, 'from loguru import logger\n'), ((8723, 8769), 'joblib.dump', 'dump', (['grid', 'f"""../models/{model_name}_grid.pkl"""'], {}), "(grid, f'../models/{model_name}_grid.pkl')\n", (8727, 8769), False, 'from joblib import load, dump\n'), ((8790, 8830), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['grid.cv_results_'], {}), '(grid.cv_results_)\n', (8812, 8830), True, 'import pandas as pd\n'), ((8925, 8975), 'loguru.logger.info', 'logger.info', (['f"""Caching predictions for best model"""'], {}), "(f'Caching predictions for best model')\n", (8936, 8975), False, 'from loguru import logger\n'), ((8323, 8374), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', (['kernel'], {'random_state': '(1205)'}), '(kernel, random_state=1205)\n', (8347, 8374), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((1012, 1037), 'numpy.linspace', 'np.linspace', (['(1.5)', '(2.0)', '(20)'], {}), '(1.5, 2.0, 20)\n', (1023, 1037), True, 'import numpy as np\n'), ((1115, 1142), 'numpy.linspace', 'np.linspace', (['(-3.0)', '(-1.0)', '(20)'], {}), '(-3.0, -1.0, 20)\n', (1126, 1142), True, 'import numpy as np\n'), ((1928, 1954), 'numpy.linspace', 'np.linspace', (['(-3.0)', '(1.0)', '(20)'], {}), '(-3.0, 1.0, 20)\n', (1939, 1954), True, 'import numpy as np\n'), ((2217, 2243), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(1.0)', '(5)'], {}), '(-10.0, 1.0, 5)\n', (2228, 2243), True, 'import numpy as np\n'), ((8274, 8298), 'sklearn.gaussian_process.kernels.ConstantKernel', 'kernels.ConstantKernel', ([], {}), '()\n', (8296, 8298), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8007, 8031), 'sklearn.gaussian_process.kernels.ConstantKernel', 'kernels.ConstantKernel', ([], {}), '()\n', (8029, 8031), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8034, 8063), 'sklearn.gaussian_process.kernels.RBF', 'kernels.RBF', (['(3.0)', '(0.1, 10.0)'], {}), '(3.0, (0.1, 10.0))\n', (8045, 8063), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8105, 8213), 'sklearn.gaussian_process.kernels.RationalQuadratic', 'kernels.RationalQuadratic', (['(200.0)', '(20.0)'], {'alpha_bounds': '(0.001, 500.0)', 'length_scale_bounds': '(50.0, 10000.0)'}), '(200.0, 20.0, alpha_bounds=(0.001, 500.0),\n length_scale_bounds=(50.0, 10000.0))\n', (8130, 8213), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n'), ((8247, 8271), 'sklearn.gaussian_process.kernels.ConstantKernel', 'kernels.ConstantKernel', ([], {}), '()\n', (8269, 8271), False, 'from sklearn.gaussian_process import GaussianProcessRegressor, kernels\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8
import numpy as np
from ibidem.advent_of_code.util import get_input_name
BASE_PATTERN = [0, 1, 0, -1]
def pattern_generator(i):
first = True
while True:
for v in BASE_PATTERN:
for _ in range(i + 1):
if first:
first = False
continue
yield v
def process_signal(i, signal):
return abs(sum((v * p) for (v, p) in zip(signal, pattern_generator(i)))) % 10
def process_phase(signal):
return [process_signal(i, signal) for i in range(len(signal))]
def process_phase_offset(signal):
cs = np.flip(np.cumsum(np.flip(signal)))
return np.mod(np.abs(cs), 10)
def process(data, repetitions=1, offset=None):
print("Starting new process")
signal = np.fromiter((int(c) for c in data), dtype=np.int8)
signal = np.tile(signal, repetitions)
print("Signal is {} digits long".format(len(signal)))
if offset is None:
offset = int(data[:7])
assert offset > len(signal) / 2
print("Dropping first {} digits".format(offset))
pp = process_phase_offset
else:
pp = process_phase
signal = np.array(signal[offset:])
print("Signal is {} digits long after dropping offset".format(len(signal)))
for phase in range(100):
signal = pp(signal)
if phase % 10 == 0:
print("Completed phase {}".format(phase))
return "".join(str(d) for d in signal)[:8]
def part1():
with open(get_input_name(16, 2019)) as fobj:
data = fobj.read().strip()
result = process(data, offset=0)
print("After 100 phases, the cleaned signal starts with these 8 digits: {}".format(result))
def part2():
with open(get_input_name(16, 2019)) as fobj:
data = fobj.read().strip()
result = process(data, repetitions=10000)
print("After 100 phases, the cleaned signal starts with these 8 digits: {}".format(result))
if __name__ == "__main__":
part1()
part2()
|
[
"numpy.tile",
"numpy.flip",
"numpy.abs",
"numpy.array",
"ibidem.advent_of_code.util.get_input_name"
] |
[((873, 901), 'numpy.tile', 'np.tile', (['signal', 'repetitions'], {}), '(signal, repetitions)\n', (880, 901), True, 'import numpy as np\n'), ((1195, 1220), 'numpy.array', 'np.array', (['signal[offset:]'], {}), '(signal[offset:])\n', (1203, 1220), True, 'import numpy as np\n'), ((697, 707), 'numpy.abs', 'np.abs', (['cs'], {}), '(cs)\n', (703, 707), True, 'import numpy as np\n'), ((661, 676), 'numpy.flip', 'np.flip', (['signal'], {}), '(signal)\n', (668, 676), True, 'import numpy as np\n'), ((1516, 1540), 'ibidem.advent_of_code.util.get_input_name', 'get_input_name', (['(16)', '(2019)'], {}), '(16, 2019)\n', (1530, 1540), False, 'from ibidem.advent_of_code.util import get_input_name\n'), ((1748, 1772), 'ibidem.advent_of_code.util.get_input_name', 'get_input_name', (['(16)', '(2019)'], {}), '(16, 2019)\n', (1762, 1772), False, 'from ibidem.advent_of_code.util import get_input_name\n')]
|
import pandas as pd
import os
import numpy as np
import math
import ast
sigma_list = [ math.pow(2,i) for i in range(8)]
for sigma in sigma_list:
test_case = 'mnist'
data_dict={}
data_dict_sum={}
# for key in def_data.keys():
# data_dict[key] = def_data[key].tolist()
file_name=os.path.join('saved_results_1000',test_case+str(sigma).zfill(3))
file_name_sum=file_name+ '_sum'
df = pd.read_csv(file_name,sep='\t')
df_sum = pd.read_csv(file_name_sum,sep='\t')
a0 = df['1'][0].strip('][').split(', ')
a1 = df['1'][1].strip('][').split(', ')
a2 = df['1'][2].strip('][').split(', ')
a3 = df['1'][3].strip('][').split(', ')
a4 = df['1'][4].strip('][').split(', ')
a5 = df['1'][5].strip('][').split(', ')
data_dict['deformed_labels'] = np.asarray([ int(i) for i in a0])
data_dict['original_labels'] = np.asarray([ int(i) for i in a1])
data_dict['norms'] = np.asarray([ float(i) for i in a2])
data_dict['iterations'] = np.asarray([ int(i) for i in a3])
data_dict['overshot'] = np.asarray([ bool(i) for i in a4])
data_dict['same_label'] = np.asarray([ bool(i) for i in a5])
data_dict_sum['test_case'] = test_case
data_dict_sum['sigma'] = sigma
data_dict_sum['def_suc_rate'] = np.sum(data_dict['same_label'])/data_dict['same_label'].shape[0]
data_dict_sum['avg_iter'] = np.sum(data_dict['iterations'])/data_dict['iterations'].shape[0]
data_dict_sum['norm'] = np.sum(data_dict['norms'])/data_dict['norms'].shape[0]
df = pd.DataFrame.from_dict(data_dict)
df_sum = pd.DataFrame.from_dict(data_dict_sum)
df.to_csv(file_name, sep='\t')
df_sum.to_csv(file_name_sum, sep='\t')
|
[
"math.pow",
"numpy.sum",
"pandas.DataFrame.from_dict",
"pandas.read_csv"
] |
[((88, 102), 'math.pow', 'math.pow', (['(2)', 'i'], {}), '(2, i)\n', (96, 102), False, 'import math\n'), ((423, 455), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '"""\t"""'}), "(file_name, sep='\\t')\n", (434, 455), True, 'import pandas as pd\n'), ((468, 504), 'pandas.read_csv', 'pd.read_csv', (['file_name_sum'], {'sep': '"""\t"""'}), "(file_name_sum, sep='\\t')\n", (479, 504), True, 'import pandas as pd\n'), ((1534, 1567), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict'], {}), '(data_dict)\n', (1556, 1567), True, 'import pandas as pd\n'), ((1581, 1618), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict_sum'], {}), '(data_dict_sum)\n', (1603, 1618), True, 'import pandas as pd\n'), ((1278, 1309), 'numpy.sum', 'np.sum', (["data_dict['same_label']"], {}), "(data_dict['same_label'])\n", (1284, 1309), True, 'import numpy as np\n'), ((1375, 1406), 'numpy.sum', 'np.sum', (["data_dict['iterations']"], {}), "(data_dict['iterations'])\n", (1381, 1406), True, 'import numpy as np\n'), ((1468, 1494), 'numpy.sum', 'np.sum', (["data_dict['norms']"], {}), "(data_dict['norms'])\n", (1474, 1494), True, 'import numpy as np\n')]
|
from keras.models import load_model
import numpy as np
import cv2
import pickle
from image_segmentation import segment_image
from neural_network import resize_to_fit
MODEL_FILENAME = "captcha_model.hdf5"
MODEL_LABELS_FILENAME = "model_labels.dat"
def solve_captcha(image):
# Load up the model labels
with open(MODEL_LABELS_FILENAME, "rb") as f:
lb = pickle.load(f)
# Load up the trained model
model = load_model(MODEL_FILENAME)
# We do not know the number of characters here
chars = segment_image(image, -1)
if len(chars) > 0:
output = cv2.merge([image] * 3)
predictions = []
# Loop over the characters
for bounding_box in chars:
x, y, w, h = bounding_box
# Extract the char from the input image
char_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
# Re-size the letter image to 60x60 pixels to match training data
char_image = resize_to_fit(char_image, 60, 60)
if char_image is not None:
# Expand dimensions
char_image = np.expand_dims(char_image, axis=2)
char_image = np.expand_dims(char_image, axis=0)
# Use the model to make a prediction
prediction = model.predict(char_image)
# Convert the encoded prediction to specific label
label = lb.inverse_transform(prediction)[0]
predictions.append(label)
# draw the prediction on the output image
cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)
cv2.putText(output, label, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
# Print captcha
captcha_text = "".join(predictions)
print("CAPTCHA is: {}".format(captcha_text))
return output, captcha_text
return None, ''
|
[
"cv2.rectangle",
"cv2.merge",
"keras.models.load_model",
"pickle.load",
"cv2.putText",
"numpy.expand_dims",
"image_segmentation.segment_image",
"neural_network.resize_to_fit"
] |
[((429, 455), 'keras.models.load_model', 'load_model', (['MODEL_FILENAME'], {}), '(MODEL_FILENAME)\n', (439, 455), False, 'from keras.models import load_model\n'), ((520, 544), 'image_segmentation.segment_image', 'segment_image', (['image', '(-1)'], {}), '(image, -1)\n', (533, 544), False, 'from image_segmentation import segment_image\n'), ((369, 383), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (380, 383), False, 'import pickle\n'), ((586, 608), 'cv2.merge', 'cv2.merge', (['([image] * 3)'], {}), '([image] * 3)\n', (595, 608), False, 'import cv2\n'), ((963, 996), 'neural_network.resize_to_fit', 'resize_to_fit', (['char_image', '(60)', '(60)'], {}), '(char_image, 60, 60)\n', (976, 996), False, 'from neural_network import resize_to_fit\n'), ((1102, 1136), 'numpy.expand_dims', 'np.expand_dims', (['char_image'], {'axis': '(2)'}), '(char_image, axis=2)\n', (1116, 1136), True, 'import numpy as np\n'), ((1166, 1200), 'numpy.expand_dims', 'np.expand_dims', (['char_image'], {'axis': '(0)'}), '(char_image, axis=0)\n', (1180, 1200), True, 'import numpy as np\n'), ((1555, 1632), 'cv2.rectangle', 'cv2.rectangle', (['output', '(x - 2, y - 2)', '(x + w + 4, y + h + 4)', '(0, 255, 0)', '(1)'], {}), '(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)\n', (1568, 1632), False, 'import cv2\n'), ((1649, 1743), 'cv2.putText', 'cv2.putText', (['output', 'label', '(x - 5, y - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.55)', '(0, 255, 0)', '(2)'], {}), '(output, label, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55,\n (0, 255, 0), 2)\n', (1660, 1743), False, 'import cv2\n')]
|
from bs4 import BeautifulSoup as bs
import threading
import time
import numpy as np
import sys
from io import StringIO
import scrapeconfig as cng
import consoleconfig as ccng
import os
def print_html(html_test):
'''To print html containers returned by beautifulsoup4'''
try:
strhtml = str(html_test.prettify())
except:
strhtml = str(html_test)
print(strhtml)
return strhtml
def join_threads(threads: list, verbose: bool = False, blink_interval: int = cng.BLINK_INTERVAL):
'''
Join ongoing threads from threading module, has a verbose functionality showing
the number of active threads.
'''
if verbose:
space = ' '
backspace = '\b'
basemsg = "Active threads: "
basemsglen = len(basemsg)
sys.stdout.write(basemsg)
while threading.activeCount() > 1:
countstring = str(threading.activeCount()-1)
countlen = len(countstring)
sys.stdout.write(countstring)
sys.stdout.flush()
time.sleep(blink_interval)
# Clears current number of threads from terminal and "resets" cursor
sys.stdout.write(backspace*countlen + space*countlen + backspace*countlen)
sys.stdout.flush()
time.sleep(blink_interval)
sys.stdout.write(f'\r{space*basemsglen}\r')
sys.stdout.write('All threads done!')
[worker.join() for worker in threads]
return
def case_decorator(func):
'''Decorator to enforce commmon behavior for cases'''
def wrapboi(*args, **kwargs):
clear_screen()
retobj = func(*args, **kwargs)
time.sleep(ccng.CASE_EXIT_WAIT_TIME)
return retobj
# "Inherit docstring"
wrapboi.__doc__ = func.__doc__
return wrapboi
if __name__ == '__main__':
def test_join_threads():
'''Test join_threads using dummy threads'''
def dummywaiter(maxwait: int=10):
'''Dummy thread, sleeps for random time between 1 and maxwait (seconds)'''
time.sleep(np.random.randint(1, maxwait))
return
workers = [threading.Thread(target=dummywaiter) for i in range(500)]
[worker.start() for worker in workers]
join_threads(workers, verbose=True)
test_join_threads()
|
[
"threading.activeCount",
"time.sleep",
"numpy.random.randint",
"threading.Thread",
"sys.stdout.flush",
"sys.stdout.write"
] |
[((820, 845), 'sys.stdout.write', 'sys.stdout.write', (['basemsg'], {}), '(basemsg)\n', (836, 845), False, 'import sys\n'), ((1388, 1433), 'sys.stdout.write', 'sys.stdout.write', (["f'\\r{space * basemsglen}\\r'"], {}), "(f'\\r{space * basemsglen}\\r')\n", (1404, 1433), False, 'import sys\n'), ((1441, 1478), 'sys.stdout.write', 'sys.stdout.write', (['"""All threads done!"""'], {}), "('All threads done!')\n", (1457, 1478), False, 'import sys\n'), ((1732, 1768), 'time.sleep', 'time.sleep', (['ccng.CASE_EXIT_WAIT_TIME'], {}), '(ccng.CASE_EXIT_WAIT_TIME)\n', (1742, 1768), False, 'import time\n'), ((861, 884), 'threading.activeCount', 'threading.activeCount', ([], {}), '()\n', (882, 884), False, 'import threading\n'), ((1002, 1031), 'sys.stdout.write', 'sys.stdout.write', (['countstring'], {}), '(countstring)\n', (1018, 1031), False, 'import sys\n'), ((1045, 1063), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1061, 1063), False, 'import sys\n'), ((1079, 1105), 'time.sleep', 'time.sleep', (['blink_interval'], {}), '(blink_interval)\n', (1089, 1105), False, 'import time\n'), ((1216, 1301), 'sys.stdout.write', 'sys.stdout.write', (['(backspace * countlen + space * countlen + backspace * countlen)'], {}), '(backspace * countlen + space * countlen + backspace * countlen\n )\n', (1232, 1301), False, 'import sys\n'), ((1304, 1322), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1320, 1322), False, 'import sys\n'), ((1350, 1376), 'time.sleep', 'time.sleep', (['blink_interval'], {}), '(blink_interval)\n', (1360, 1376), False, 'import time\n'), ((2221, 2257), 'threading.Thread', 'threading.Thread', ([], {'target': 'dummywaiter'}), '(target=dummywaiter)\n', (2237, 2257), False, 'import threading\n'), ((2148, 2177), 'numpy.random.randint', 'np.random.randint', (['(1)', 'maxwait'], {}), '(1, maxwait)\n', (2165, 2177), True, 'import numpy as np\n'), ((921, 944), 'threading.activeCount', 'threading.activeCount', ([], {}), '()\n', (942, 944), False, 'import threading\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='talk')
palette = sns.color_palette()
beta = 1
landa = 1./beta
reps = 25
pois = np.random.exponential(beta, reps)
pois = pois.cumsum()
psra = np.arange(reps)*beta + np.random.exponential(beta, reps) - landa
psra.sort()
f, ax = plt.subplots(1, 2, sharex=True, figsize=(24, 10))
yy = np.arange(reps) + 1
for x, y in zip(pois, yy):
ax[0].plot([x, x], [0, y], c=palette[0], ls='--', lw=2)
ax[0].step(pois, yy, lw=5)
ax[0].scatter(pois, np.zeros(reps))
ax[0].set_title(r'Poisson arrivals, $\lambda$ = {:.1f}'.format(landa))
ax[0].set_xlabel('time')
ax[0].set_ylabel('count')
for x, y in zip(psra, yy):
ax[1].plot([x, x], [0, y], c=palette[0], ls='--', lw=2)
ax[1].step(psra, yy, lw=5)
ax[1].scatter(psra, np.zeros(reps))
title = r'Pre-scheduled random arrivals, $\sigma$ = {:.1f}'.format(landa)
ax[1].set_title(title)
ax[1].set_xlabel('time')
plt.savefig('pois_psra.png')
|
[
"seaborn.set",
"matplotlib.pyplot.savefig",
"seaborn.color_palette",
"numpy.random.exponential",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((74, 97), 'seaborn.set', 'sns.set', ([], {'context': '"""talk"""'}), "(context='talk')\n", (81, 97), True, 'import seaborn as sns\n'), ((108, 127), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (125, 127), True, 'import seaborn as sns\n'), ((172, 205), 'numpy.random.exponential', 'np.random.exponential', (['beta', 'reps'], {}), '(beta, reps)\n', (193, 205), True, 'import numpy as np\n'), ((321, 370), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)', 'figsize': '(24, 10)'}), '(1, 2, sharex=True, figsize=(24, 10))\n', (333, 370), True, 'import matplotlib.pyplot as plt\n'), ((944, 972), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pois_psra.png"""'], {}), "('pois_psra.png')\n", (955, 972), True, 'import matplotlib.pyplot as plt\n'), ((377, 392), 'numpy.arange', 'np.arange', (['reps'], {}), '(reps)\n', (386, 392), True, 'import numpy as np\n'), ((532, 546), 'numpy.zeros', 'np.zeros', (['reps'], {}), '(reps)\n', (540, 546), True, 'import numpy as np\n'), ((805, 819), 'numpy.zeros', 'np.zeros', (['reps'], {}), '(reps)\n', (813, 819), True, 'import numpy as np\n'), ((258, 291), 'numpy.random.exponential', 'np.random.exponential', (['beta', 'reps'], {}), '(beta, reps)\n', (279, 291), True, 'import numpy as np\n'), ((235, 250), 'numpy.arange', 'np.arange', (['reps'], {}), '(reps)\n', (244, 250), True, 'import numpy as np\n')]
|
import classy.datasets
from .Struct import Struct
import numpy as np
from numpy import sqrt,sum,exp,pi,min,max,linspace
def normal(x,mu,sd):
return 1.0/sqrt(2*pi*sd**2)*exp(-(x-mu)**2/(2*sd**2))
def overlap(means_,covars_):
# http://en.wikipedia.org/wiki/Bhattacharyya_distance
# overlap is a dot product
s1,s2=covars_
m1,m2=means_
minx=min([m1-4*s1,m2-4*s2])
maxx=min([m1+4*s1,m2+4*s2])
x=linspace(minx,maxx,1000)
dx=x[1]-x[0]
BC=sum(dx*sqrt(normal(x,m1,s1)*normal(x,m2,s2)))
return BC
def GMM_features_from_1D_vectors2(origdata,number_of_gaussians_list,verbose=True):
from sklearn.mixture import GMM
data=Struct(origdata)
data.vectors=[]
data.feature_names=[]
for M in number_of_gaussians_list:
for G in range(M):
data.feature_names+=['M%d mu%d' % (M,G+1),'M%d sd%d' % (M,G+1)]
for X in origdata.vectors:
vec=[]
for M in number_of_gaussians_list:
model = GMM(M).fit(X)
means=model.means_.ravel()
stddevs=model.covars_.ravel()
for m,s in zip(means,stddevs):
vec.append(m)
vec.append(s)
data.vectors.append(vec)
data.vectors=np.array(data.vectors)
if verbose:
classy.datasets.summary(data)
return data
def GMM_features_from_1D_vectors(origdata,number_of_gaussians,verbose=True):
from sklearn.mixture import GMM
data=Struct(origdata)
data.vectors=[]
data.feature_names=[]
for i in range(number_of_gaussians):
data.feature_names+=['mu%d' % (i+1),'sd%d' % (i+1)]
L=number_of_gaussians
for i in range(L):
for j in range(i+1,L):
data.feature_names+=['overlap %d-%d' % (i+1,j+1)]
for X in origdata.vectors:
model = GMM(number_of_gaussians).fit(X)
means=model.means_.ravel()
stddevs=model.covars_.ravel()
vec=[]
for m,s in zip(means,stddevs):
vec.append(m)
vec.append(s)
L=number_of_gaussians
for i in range(L):
for j in range(i+1,L):
vec.append(overlap([means[i],means[j]],[stddevs[i],stddevs[j]]))
data.vectors.append(vec)
data.vectors=np.array(data.vectors)
if verbose:
classy.datasets.summary(data)
return data
|
[
"numpy.sqrt",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"numpy.min",
"sklearn.mixture.GMM"
] |
[((365, 396), 'numpy.min', 'min', (['[m1 - 4 * s1, m2 - 4 * s2]'], {}), '([m1 - 4 * s1, m2 - 4 * s2])\n', (368, 396), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((397, 428), 'numpy.min', 'min', (['[m1 + 4 * s1, m2 + 4 * s2]'], {}), '([m1 + 4 * s1, m2 + 4 * s2])\n', (400, 428), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((431, 457), 'numpy.linspace', 'linspace', (['minx', 'maxx', '(1000)'], {}), '(minx, maxx, 1000)\n', (439, 457), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((1276, 1298), 'numpy.array', 'np.array', (['data.vectors'], {}), '(data.vectors)\n', (1284, 1298), True, 'import numpy as np\n'), ((2329, 2351), 'numpy.array', 'np.array', (['data.vectors'], {}), '(data.vectors)\n', (2337, 2351), True, 'import numpy as np\n'), ((174, 209), 'numpy.exp', 'exp', (['(-(x - mu) ** 2 / (2 * sd ** 2))'], {}), '(-(x - mu) ** 2 / (2 * sd ** 2))\n', (177, 209), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((157, 179), 'numpy.sqrt', 'sqrt', (['(2 * pi * sd ** 2)'], {}), '(2 * pi * sd ** 2)\n', (161, 179), False, 'from numpy import sqrt, sum, exp, pi, min, max, linspace\n'), ((1871, 1895), 'sklearn.mixture.GMM', 'GMM', (['number_of_gaussians'], {}), '(number_of_gaussians)\n', (1874, 1895), False, 'from sklearn.mixture import GMM\n'), ((1013, 1019), 'sklearn.mixture.GMM', 'GMM', (['M'], {}), '(M)\n', (1016, 1019), False, 'from sklearn.mixture import GMM\n')]
|
import pandas as pd
import numpy as np
import sys
import matplotlib.pyplot as plt
import seaborn as sns
def plot_conservation(out_path):
"""
Plotting the fraction of conserved binding sites for Brn2, Ebf2 and
Onecut2, based on multiGPS and edgeR results from Aydin et al., 2019
(Nature Neurosciece: PMID 31086315)
Parameters:
out_path: Filepath prefix for output bar plots (Manuscript Fig. 6A)
Returns: None
"""
# Defining the dataFrames using multiGPS and edgeR results \
# from Aydin et al., (2019) Nat. Neuroscience.
# Brn2
brn2 = pd.DataFrame([['shared', 6776], ['iA>iN', 2432], ['iN>iA', 1242]],
columns=['category', '#'])
brn2['#'] = brn2['#']/np.sum(brn2['#'])
# Ebf2
ebf2 = pd.DataFrame([['shared', 23331], ['iA>iN', 10687], ['iN>iA', 7921]],
columns=['category', '#'])
ebf2['#'] = ebf2['#']/np.sum(ebf2['#'])
# Onecut2
onecut2 = pd.DataFrame([['shared', 45416], ['iA>iN', 4622], ['iN>iA', 2965]],
columns=['category', '#'])
onecut2['#'] = onecut2['#']/np.sum(onecut2['#'])
# plot bar plots
sns.set_style('ticks')
fig, ax = plt.subplots()
plt.subplot(1, 3, 1)
plt.bar([0, 1, 2], onecut2['#'], width=0.5, color='#687466')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
plt.subplot(1, 3, 2)
plt.bar([0, 1, 2], brn2['#'], width=0.5, color='#cd8d7b')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
plt.subplot(1, 3, 3)
plt.bar([0, 1, 2], ebf2['#'], width=0.5, color='#fbc490')
plt.yticks(fontsize=12)
plt.ylim(0, 1)
#
sns.despine()
fig.tight_layout()
fig.set_size_inches(6, 4)
plt.savefig(out_path + 'Fig_6a.pdf')
def plot_embeddings(data_path, outpath):
"""
Plot 2-D latent embeddings for Brn2, Ebf2 and Onecut2.
Parameters:
data_path: Input file paths (N rows * 2 columns) storing the 2-D co-ordinates
for each binding site in the latent space. The embeddings must be derived
using latent_embeddings/get_latent_embeddings.py
Note: This function assumes that the files are saved with an \
".embedding.txt" extension. Provide only the prefix as an argument.
For example, if the 2-D embedding is stored in "~/Prefix/Oct4.embedding.txt",
call function as: plot_embeddings("~/Prefix/Oct4")
outpath: Output file path.
Returns: None
"""
transcription_factors = ['Brn2', 'Ebf2', 'Onecut2']
for tf in transcription_factors:
dat = np.loadtxt(data_path + tf + '.embedding.txt')
plt.scatter(dat[:, 0], dat[:, 1], s=3, alpha=0.3)
plt.savefig(outpath)
def plot_correlation(data_path, outpath):
"""
Plotting the correlation between ATAC-seq data at individual sites and the
associated chromatin sub-network (Bichrom-CHR) scores.
Parameters:
data_path: Prefix for the ".bound.chromtracks.npy" file. This file stores the
chromatin data at each binding site.
outpath: Output file path.
Returns: None
"""
sns.set_style('whitegrid')
fig, axs = plt.subplots()
for idx, tf in enumerate(['Onecut2', 'Brn2', 'Ebf2']):
# load chromatin data
chrom_data = np.load(data_path + tf + '.bound.chromtracks.npy')
chrom_sum = np.sum(chrom_data, axis=1)
# load scores
embedding = np.loadtxt(data_path + tf + '.embedding.txt')
chrom_score = embedding[:, 1]
plt.subplot(1, 3, idx+1)
plt.scatter(chrom_sum, chrom_score, color='#084177', s=1,
alpha=0.05)
fig.set_size_inches(6, 2)
plt.subplots_adjust(left=0.1, bottom=0.2, right=0.95, top=0.95)
plt.savefig(outpath + 'fig_b.png', dpi=960, layout='tight')
def plot_motif_heatmaps(out_path):
"""
Run MEME-ChIP & FIMO to get the number of motifs enriched at \
chromatin predicted (CP) and sequence predicted (SP) sites.
Parameters:
out_path: Output file path
"""
# Brn2
fig, ax = plt.subplots()
brn2 =np.array([[919.0, 320], [999, 305], [318, 717], [142, 1769], [72, 612]])
brn2[:, 0] = brn2[:, 0]/933.0 # Total # of sites: 933
brn2[:, 1] = brn2[:, 1]/1055.0 # Total # of sites: 1055
sns.heatmap(brn2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,
linewidths=5.3, linecolor='white')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.95)
fig.set_size_inches(2, 3)
plt.savefig(out_path + 'fig_c1.pdf')
# Ebf2
fig, ax = plt.subplots()
ebf2 = np.array([[3146.0, 700], [2922, 1864], [3544, 1228], [1865, 6496],
[2882, 2124], [104, 1214]])
ebf2[:, 0] = ebf2[:, 0] / 4146.0 # Total # of sites: 4146
ebf2[:, 1] = ebf2[:, 1] / 3469.0 # Total # of sites: 3469
sns.heatmap(ebf2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,
linewidths=5.3, linecolor='white')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.95)
fig.set_size_inches(2, 3)
plt.savefig(out_path + 'fig_c2.pdf')
# Onecut2
fig, ax = plt.subplots()
oc2 =np.array([[1055.0, 6234], [3637, 542], [5227, 1245], [1282, 10372],
[1266, 10067]])
oc2[:, 0] = oc2[:, 0]/5771.0 # Total # of sites: 5771
oc2[:, 1] = oc2[:, 1]/4627.0 # Total # of sites: 4627
sns.heatmap(oc2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,
linewidths=5.3, linecolor='white')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.95)
fig.set_size_inches(2, 3)
plt.savefig(out_path + 'fig_c3.pdf')
def plot_ebo_boxplots(data_path, outpath):
"""
Plot violin plots (manuscript figure 6) for the iAscl1 TFs.
Parameters:
metrics_path: Path the directory which contains TF.iA.summary files
For example, the GATA summary file looks as follows:
...
bichrom, GATA, 0.49097278959035834
bichrom, GATA, 0.515491844830841
bichrom, GATA, 0.572293273059536
bichrom, GATA, 0.4909197931794813
bichrom, GATA, 0.519433898153947
seq, GATA, 0.40140515853838615
seq, GATA, 0.4071458624248806
seq, GATA, 0.4944029049796368
seq, GATA, 0.3942885914448734
seq, GATA, 0.4207938581419808
...
Note that seq refers to a sequence-only model.
outpath: Output file path.
Returns:
None
"""
sns.set_style('darkgrid')
fig, ax = plt.subplots()
for idx, tf in enumerate(['Brn2', 'Ebf2', 'Onecut2']):
dat = pd.read_csv(data_path + tf + '.iA.summary', sep=',', header=None,
names=['condition', 'tf', 'auprc'])
plt.subplot(1, 3, idx+1)
sns.violinplot(x=dat['condition'], y=dat['auprc'],
palette=('#ecce6d', '#5b8c85'),
order=['seq', 'bichrom'], cut=0)
plt.ylim(0, 1)
plt.xlabel("")
plt.ylabel("")
fig.set_size_inches(6, 3)
plt.savefig(data_path + 'violinplots.pdf')
if __name__ == "__main__":
out_path = sys.argv[1]
data_path = sys.argv[2]
plot_conservation(out_path)
plot_embeddings(data_path=data_path, outpath=out_path)
plot_correlation(data_path=data_path, outpath=out_path)
plot_motif_heatmaps(out_path=out_path)
plot_ebo_boxplots(data_path=data_path, outpath=out_path)
|
[
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"seaborn.set_style",
"numpy.array",
"seaborn.violinplot",
"seaborn.despine",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"seaborn.heatmap",
"matplotlib.pyplot.subplots_adjust",
"numpy.sum",
"matplotlib.pyplot.bar",
"numpy.loadtxt",
"numpy.load",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots"
] |
[((589, 687), 'pandas.DataFrame', 'pd.DataFrame', (["[['shared', 6776], ['iA>iN', 2432], ['iN>iA', 1242]]"], {'columns': "['category', '#']"}), "([['shared', 6776], ['iA>iN', 2432], ['iN>iA', 1242]], columns=\n ['category', '#'])\n", (601, 687), True, 'import pandas as pd\n'), ((774, 873), 'pandas.DataFrame', 'pd.DataFrame', (["[['shared', 23331], ['iA>iN', 10687], ['iN>iA', 7921]]"], {'columns': "['category', '#']"}), "([['shared', 23331], ['iA>iN', 10687], ['iN>iA', 7921]],\n columns=['category', '#'])\n", (786, 873), True, 'import pandas as pd\n'), ((967, 1066), 'pandas.DataFrame', 'pd.DataFrame', (["[['shared', 45416], ['iA>iN', 4622], ['iN>iA', 2965]]"], {'columns': "['category', '#']"}), "([['shared', 45416], ['iA>iN', 4622], ['iN>iA', 2965]], columns\n =['category', '#'])\n", (979, 1066), True, 'import pandas as pd\n'), ((1168, 1190), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (1181, 1190), True, 'import seaborn as sns\n'), ((1205, 1219), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1217, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1244), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (1235, 1244), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1309), 'matplotlib.pyplot.bar', 'plt.bar', (['[0, 1, 2]', "onecut2['#']"], {'width': '(0.5)', 'color': '"""#687466"""'}), "([0, 1, 2], onecut2['#'], width=0.5, color='#687466')\n", (1256, 1309), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1337), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1324, 1337), True, 'import matplotlib.pyplot as plt\n'), ((1342, 1356), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1350, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1387), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (1378, 1387), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1449), 'matplotlib.pyplot.bar', 'plt.bar', (['[0, 1, 2]', "brn2['#']"], {'width': '(0.5)', 'color': '"""#cd8d7b"""'}), "([0, 1, 2], brn2['#'], width=0.5, color='#cd8d7b')\n", (1399, 1449), True, 'import matplotlib.pyplot as plt\n'), ((1454, 1477), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1464, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1482, 1496), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1490, 1496), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1527), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (1518, 1527), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1589), 'matplotlib.pyplot.bar', 'plt.bar', (['[0, 1, 2]', "ebf2['#']"], {'width': '(0.5)', 'color': '"""#fbc490"""'}), "([0, 1, 2], ebf2['#'], width=0.5, color='#fbc490')\n", (1539, 1589), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1617), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (1604, 1617), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1636), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1630, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1660), 'seaborn.despine', 'sns.despine', ([], {}), '()\n', (1658, 1660), True, 'import seaborn as sns\n'), ((1718, 1754), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'Fig_6a.pdf')"], {}), "(out_path + 'Fig_6a.pdf')\n", (1729, 1754), True, 'import matplotlib.pyplot as plt\n'), ((3102, 3128), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (3115, 3128), True, 'import seaborn as sns\n'), ((3144, 3158), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3156, 3158), True, 'import matplotlib.pyplot as plt\n'), ((3659, 3722), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.1)', 'bottom': '(0.2)', 'right': '(0.95)', 'top': '(0.95)'}), '(left=0.1, bottom=0.2, right=0.95, top=0.95)\n', (3678, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3786), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outpath + 'fig_b.png')"], {'dpi': '(960)', 'layout': '"""tight"""'}), "(outpath + 'fig_b.png', dpi=960, layout='tight')\n", (3738, 3786), True, 'import matplotlib.pyplot as plt\n'), ((4047, 4061), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4059, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4144), 'numpy.array', 'np.array', (['[[919.0, 320], [999, 305], [318, 717], [142, 1769], [72, 612]]'], {}), '([[919.0, 320], [999, 305], [318, 717], [142, 1769], [72, 612]])\n', (4080, 4144), True, 'import numpy as np\n'), ((4270, 4377), 'seaborn.heatmap', 'sns.heatmap', (['brn2'], {'cmap': '"""bone_r"""', 'cbar_kws': "{'shrink': 0.5}", 'vmax': '(1.5)', 'linewidths': '(5.3)', 'linecolor': '"""white"""'}), "(brn2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,\n linewidths=5.3, linecolor='white')\n", (4281, 4377), True, 'import seaborn as sns\n'), ((4394, 4458), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.1)', 'right': '(0.85)', 'top': '(0.95)'}), '(left=0.15, bottom=0.1, right=0.85, top=0.95)\n', (4413, 4458), True, 'import matplotlib.pyplot as plt\n'), ((4493, 4529), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'fig_c1.pdf')"], {}), "(out_path + 'fig_c1.pdf')\n", (4504, 4529), True, 'import matplotlib.pyplot as plt\n'), ((4556, 4570), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4568, 4570), True, 'import matplotlib.pyplot as plt\n'), ((4582, 4681), 'numpy.array', 'np.array', (['[[3146.0, 700], [2922, 1864], [3544, 1228], [1865, 6496], [2882, 2124], [\n 104, 1214]]'], {}), '([[3146.0, 700], [2922, 1864], [3544, 1228], [1865, 6496], [2882, \n 2124], [104, 1214]])\n', (4590, 4681), True, 'import numpy as np\n'), ((4827, 4934), 'seaborn.heatmap', 'sns.heatmap', (['ebf2'], {'cmap': '"""bone_r"""', 'cbar_kws': "{'shrink': 0.5}", 'vmax': '(1.5)', 'linewidths': '(5.3)', 'linecolor': '"""white"""'}), "(ebf2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,\n linewidths=5.3, linecolor='white')\n", (4838, 4934), True, 'import seaborn as sns\n'), ((4951, 5015), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.1)', 'right': '(0.85)', 'top': '(0.95)'}), '(left=0.15, bottom=0.1, right=0.85, top=0.95)\n', (4970, 5015), True, 'import matplotlib.pyplot as plt\n'), ((5050, 5086), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'fig_c2.pdf')"], {}), "(out_path + 'fig_c2.pdf')\n", (5061, 5086), True, 'import matplotlib.pyplot as plt\n'), ((5116, 5130), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5128, 5130), True, 'import matplotlib.pyplot as plt\n'), ((5140, 5228), 'numpy.array', 'np.array', (['[[1055.0, 6234], [3637, 542], [5227, 1245], [1282, 10372], [1266, 10067]]'], {}), '([[1055.0, 6234], [3637, 542], [5227, 1245], [1282, 10372], [1266, \n 10067]])\n', (5148, 5228), True, 'import numpy as np\n'), ((5364, 5470), 'seaborn.heatmap', 'sns.heatmap', (['oc2'], {'cmap': '"""bone_r"""', 'cbar_kws': "{'shrink': 0.5}", 'vmax': '(1.5)', 'linewidths': '(5.3)', 'linecolor': '"""white"""'}), "(oc2, cmap='bone_r', cbar_kws={'shrink': 0.5}, vmax=1.5,\n linewidths=5.3, linecolor='white')\n", (5375, 5470), True, 'import seaborn as sns\n'), ((5487, 5551), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.1)', 'right': '(0.85)', 'top': '(0.95)'}), '(left=0.15, bottom=0.1, right=0.85, top=0.95)\n', (5506, 5551), True, 'import matplotlib.pyplot as plt\n'), ((5586, 5622), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'fig_c3.pdf')"], {}), "(out_path + 'fig_c3.pdf')\n", (5597, 5622), True, 'import matplotlib.pyplot as plt\n'), ((6444, 6469), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (6457, 6469), True, 'import seaborn as sns\n'), ((6484, 6498), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6496, 6498), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7048), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(data_path + 'violinplots.pdf')"], {}), "(data_path + 'violinplots.pdf')\n", (7017, 7048), True, 'import matplotlib.pyplot as plt\n'), ((733, 750), 'numpy.sum', 'np.sum', (["brn2['#']"], {}), "(brn2['#'])\n", (739, 750), True, 'import numpy as np\n'), ((920, 937), 'numpy.sum', 'np.sum', (["ebf2['#']"], {}), "(ebf2['#'])\n", (926, 937), True, 'import numpy as np\n'), ((1121, 1141), 'numpy.sum', 'np.sum', (["onecut2['#']"], {}), "(onecut2['#'])\n", (1127, 1141), True, 'import numpy as np\n'), ((2567, 2612), 'numpy.loadtxt', 'np.loadtxt', (["(data_path + tf + '.embedding.txt')"], {}), "(data_path + tf + '.embedding.txt')\n", (2577, 2612), True, 'import numpy as np\n'), ((2621, 2670), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dat[:, 0]', 'dat[:, 1]'], {'s': '(3)', 'alpha': '(0.3)'}), '(dat[:, 0], dat[:, 1], s=3, alpha=0.3)\n', (2632, 2670), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2699), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outpath'], {}), '(outpath)\n', (2690, 2699), True, 'import matplotlib.pyplot as plt\n'), ((3270, 3320), 'numpy.load', 'np.load', (["(data_path + tf + '.bound.chromtracks.npy')"], {}), "(data_path + tf + '.bound.chromtracks.npy')\n", (3277, 3320), True, 'import numpy as np\n'), ((3341, 3367), 'numpy.sum', 'np.sum', (['chrom_data'], {'axis': '(1)'}), '(chrom_data, axis=1)\n', (3347, 3367), True, 'import numpy as np\n'), ((3410, 3455), 'numpy.loadtxt', 'np.loadtxt', (["(data_path + tf + '.embedding.txt')"], {}), "(data_path + tf + '.embedding.txt')\n", (3420, 3455), True, 'import numpy as np\n'), ((3502, 3528), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(idx + 1)'], {}), '(1, 3, idx + 1)\n', (3513, 3528), True, 'import matplotlib.pyplot as plt\n'), ((3535, 3604), 'matplotlib.pyplot.scatter', 'plt.scatter', (['chrom_sum', 'chrom_score'], {'color': '"""#084177"""', 's': '(1)', 'alpha': '(0.05)'}), "(chrom_sum, chrom_score, color='#084177', s=1, alpha=0.05)\n", (3546, 3604), True, 'import matplotlib.pyplot as plt\n'), ((6572, 6678), 'pandas.read_csv', 'pd.read_csv', (["(data_path + tf + '.iA.summary')"], {'sep': '""","""', 'header': 'None', 'names': "['condition', 'tf', 'auprc']"}), "(data_path + tf + '.iA.summary', sep=',', header=None, names=[\n 'condition', 'tf', 'auprc'])\n", (6583, 6678), True, 'import pandas as pd\n'), ((6708, 6734), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(idx + 1)'], {}), '(1, 3, idx + 1)\n', (6719, 6734), True, 'import matplotlib.pyplot as plt\n'), ((6741, 6860), 'seaborn.violinplot', 'sns.violinplot', ([], {'x': "dat['condition']", 'y': "dat['auprc']", 'palette': "('#ecce6d', '#5b8c85')", 'order': "['seq', 'bichrom']", 'cut': '(0)'}), "(x=dat['condition'], y=dat['auprc'], palette=('#ecce6d',\n '#5b8c85'), order=['seq', 'bichrom'], cut=0)\n", (6755, 6860), True, 'import seaborn as sns\n'), ((6911, 6925), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (6919, 6925), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6948), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""""""'], {}), "('')\n", (6944, 6948), True, 'import matplotlib.pyplot as plt\n'), ((6957, 6971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (6967, 6971), True, 'import matplotlib.pyplot as plt\n')]
|
"""
refer to https://github.com/jfzhang95/pytorch-deeplab-xception/blob/master/utils/metrics.py
"""
import numpy as np
__all__ = ['SegmentationMetric']
"""
confusionMetric
P\L P N
P TP FP
N FN TN
"""
class SegmentationMetric(object):
def __init__(self, numClass):
self.numClass = numClass
self.confusionMatrix = np.zeros((self.numClass,) * 2)
def pixelAccuracy(self):
# return all class overall pixel accuracy
# acc = (TP + TN) / (TP + TN + FP + TN)
acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()
return acc
def classPixelAccuracy(self):
# return each category pixel accuracy(A more accurate way to call it precision)
# acc = (TP) / TP + FP
classAcc = np.diag(self.confusionMatrix) / self.confusionMatrix.sum(axis=1)
return classAcc
def meanPixelAccuracy(self):
classAcc = self.classPixelAccuracy()
meanAcc = np.nanmean(classAcc)
return meanAcc
def meanIntersectionOverUnion(self):
# Intersection = TP Union = TP + FP + FN
# IoU = TP / (TP + FP + FN)
intersection = np.diag(self.confusionMatrix)
union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(
self.confusionMatrix)
IoU = intersection / union
mIoU = np.nanmean(IoU)
return mIoU
def genConfusionMatrix(self, imgPredict, imgLabel):
# remove classes from unlabeled pixels in gt image and predict
mask = (imgLabel >= 0) & (imgLabel < self.numClass)
label = self.numClass * imgLabel[mask] + imgPredict[mask]
count = np.bincount(label, minlength=self.numClass ** 2)
confusionMatrix = count.reshape(self.numClass, self.numClass)
return confusionMatrix
def Frequency_Weighted_Intersection_over_Union(self):
# FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]
freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)
iu = np.diag(self.confusionMatrix) / (
np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -
np.diag(self.confusionMatrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def addBatch(self, imgPredict, imgLabel):
assert imgPredict.shape == imgLabel.shape
self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)
def reset(self):
self.confusionMatrix = np.zeros((self.numClass, self.numClass))
if __name__ == '__main__':
imgPredict = np.array([0, 0, 1, 1, 2, 2])
imgLabel = np.array([0, 0, 1, 1, 2, 2])
metric = SegmentationMetric(3)
metric.addBatch(imgPredict, imgLabel)
acc = metric.pixelAccuracy()
mIoU = metric.meanIntersectionOverUnion()
print(acc, mIoU)
|
[
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.nanmean",
"numpy.sum",
"numpy.bincount"
] |
[((2630, 2658), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (2638, 2658), True, 'import numpy as np\n'), ((2674, 2702), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 2, 2]'], {}), '([0, 0, 1, 1, 2, 2])\n', (2682, 2702), True, 'import numpy as np\n'), ((359, 389), 'numpy.zeros', 'np.zeros', (['((self.numClass,) * 2)'], {}), '((self.numClass,) * 2)\n', (367, 389), True, 'import numpy as np\n'), ((975, 995), 'numpy.nanmean', 'np.nanmean', (['classAcc'], {}), '(classAcc)\n', (985, 995), True, 'import numpy as np\n'), ((1169, 1198), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (1176, 1198), True, 'import numpy as np\n'), ((1386, 1401), 'numpy.nanmean', 'np.nanmean', (['IoU'], {}), '(IoU)\n', (1396, 1401), True, 'import numpy as np\n'), ((1692, 1740), 'numpy.bincount', 'np.bincount', (['label'], {'minlength': '(self.numClass ** 2)'}), '(label, minlength=self.numClass ** 2)\n', (1703, 1740), True, 'import numpy as np\n'), ((2543, 2583), 'numpy.zeros', 'np.zeros', (['(self.numClass, self.numClass)'], {}), '((self.numClass, self.numClass))\n', (2551, 2583), True, 'import numpy as np\n'), ((789, 818), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (796, 818), True, 'import numpy as np\n'), ((1293, 1322), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (1300, 1322), True, 'import numpy as np\n'), ((1985, 2021), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(1)'}), '(self.confusionMatrix, axis=1)\n', (1991, 2021), True, 'import numpy as np\n'), ((2024, 2052), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (2030, 2052), True, 'import numpy as np\n'), ((2066, 2095), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (2073, 2095), True, 'import numpy as np\n'), ((1215, 1251), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(1)'}), '(self.confusionMatrix, axis=1)\n', (1221, 1251), True, 'import numpy as np\n'), ((1254, 1290), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(0)'}), '(self.confusionMatrix, axis=0)\n', (1260, 1290), True, 'import numpy as np\n'), ((2210, 2239), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (2217, 2239), True, 'import numpy as np\n'), ((532, 561), 'numpy.diag', 'np.diag', (['self.confusionMatrix'], {}), '(self.confusionMatrix)\n', (539, 561), True, 'import numpy as np\n'), ((2116, 2152), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(1)'}), '(self.confusionMatrix, axis=1)\n', (2122, 2152), True, 'import numpy as np\n'), ((2155, 2191), 'numpy.sum', 'np.sum', (['self.confusionMatrix'], {'axis': '(0)'}), '(self.confusionMatrix, axis=0)\n', (2161, 2191), True, 'import numpy as np\n')]
|
'''
@inproceedings{golestaneh2017spatially,
title={Spatially-Varying Blur Detection Based on Multiscale Fused and Sorted Transform Coefficients of Gradient Magnitudes},
author={<NAME> and Karam, <NAME>},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
year={2017}
}
'''
import cv2
import numpy as np
import os
from skimage.filters.rank import entropy
from skimage.morphology import square
import copy
import time
class BlurDetector(object):
def __init__(self, downsampling_factor=4, num_scales=4, scale_start=3, entropy_filt_kernel_sze=7, sigma_s_RF_filter=15, sigma_r_RF_filter=0.25, num_iterations_RF_filter=3):
self.downsampling_factor = downsampling_factor
self.num_scales = num_scales
self.scale_start = scale_start
self.entropy_filt_kernel_sze = entropy_filt_kernel_sze
self.sigma_s_RF_filter = sigma_s_RF_filter
self.sigma_r_RF_filter = sigma_r_RF_filter
self.num_iterations_RF_filter = num_iterations_RF_filter
self.scales = self.createScalePyramid()
self.__freqBands = []
self.__dct_matrices = []
self.freq_index = []
def disp_progress(self, i, rows, old_progress):
progress_dict = {10:'[| ] 10%',
20:'[| | ] 20%',
30:'[| | | ] 30%',
40:'[| | | | ] 40%',
50:'[| | | | | ] 50%',
60:'[| | | | | | ] 60%',
70:'[| | | | | | | ] 70%',
80:'[| | | | | | | | ] 80%',
90:'[| | | | | | | | | ] 90%',
100:'[| | | | | | | | | |] 100%'}
i_done = i / rows * 100;
p_done = round(i_done / 10) * 10;
if(p_done != old_progress):
os.system('cls' if os.name == 'nt' else 'clear')
print(progress_dict[p_done])
old_progress = p_done
return(p_done)
def createScalePyramid(self):
scales = []
for i in range(self.num_scales):
scales.append((2**(self.scale_start + i)) - 1) # Scales would be 7, 15, 31, 63 ...
return(scales)
def computeImageGradientMagnitude(self, img):
__sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, borderType=cv2.BORDER_REFLECT) # Find x and y gradients
__sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, borderType=cv2.BORDER_REFLECT)
# Find gradient magnitude
__magnitude = np.sqrt(__sobelx ** 2.0 + __sobely ** 2.0)
return(__magnitude)
def __computeFrequencyBands(self):
for current_scale in self.scales:
matrixInds = np.zeros((current_scale, current_scale))
for i in range(current_scale):
matrixInds[0 : max(0, int(((current_scale-1)/2) - i +1)), i] = 1
for i in range(current_scale):
if (current_scale-((current_scale-1)/2) - i) <= 0:
matrixInds[0:current_scale - i - 1, i] = 2
else:
matrixInds[int(current_scale - ((current_scale - 1) / 2) - i - 1): int(current_scale - i - 1), i]=2;
matrixInds[0, 0] = 3
self.__freqBands.append(matrixInds)
def __dctmtx(self, n):
[mesh_cols, mesh_rows] = np.meshgrid(np.linspace(0, n-1, n), np.linspace(0, n-1, n))
dct_matrix = np.sqrt(2/n) * np.cos(np.pi * np.multiply((2 * mesh_cols + 1), mesh_rows) / (2*n));
dct_matrix[0, :] = dct_matrix[0, :] / np.sqrt(2)
return(dct_matrix)
def __createDCT_Matrices(self):
if(len(self.__dct_matrices) > 0):
raise TypeError("dct matrices are already defined. Redefinition is not allowed.")
for curr_scale in self.scales:
dct_matrix = self.__dctmtx(curr_scale)
self.__dct_matrices.append(dct_matrix)
def __getDCTCoefficients(self, img_blk, ind):
rows, cols = np.shape(img_blk)
# D = self.__dctmtx(rows)
D = self.__dct_matrices[ind]
dct_coeff = np.matmul(np.matmul(D, img_blk), np.transpose(D))
return(dct_coeff)
def entropyFilt(self, img):
return(entropy(img, square(self.entropy_filt_kernel_sze)))
def computeScore(self, weighted_local_entropy, T_max):
# normalize weighted T max matrix
min_val = weighted_local_entropy.min()
weighted_T_Max = weighted_local_entropy - min_val
max_val = weighted_local_entropy.max()
weighted_T_Max = weighted_local_entropy / max_val
score = np.median(weighted_local_entropy)
return(score)
def TransformedDomainRecursiveFilter_Horizontal(self, I, D, sigma):
# Feedback Coefficient (Appendix of the paper)
a = np.exp(-np.sqrt(2) / sigma)
F = copy.deepcopy(I)
V = a ** D
rows, cols = np.shape(I)
# Left --> Right Filter
for i in range(1, cols):
F[:, i] = F[:, i] + np.multiply(V[:, i], (F[:, i-1] - F[:, i]))
# Right --> Left Filter
for i in range(cols-2, 1, -1):
F[:, i] = F[:, i] + np.multiply(V[:, i+1], (F[:, i + 1] - F[:, i]))
return(F)
def RF(self, img, joint_img):
if(len(joint_img) == 0):
joint_img = img
joint_img = joint_img.astype('float64')
joint_img = joint_img / 255
if(len(np.shape(joint_img)) == 2):
cols, rows = np.shape(joint_img)
channels = 1
elif(len(np.shape(joint_img)) == 3):
cols, rows, channels = np.shape(joint_img)
# Estimate horizontal and vertical partial derivatives using finite differences.
dIcdx = np.diff(joint_img, n=1, axis=1)
dIcdy = np.diff(joint_img, n=1, axis=0)
dIdx = np.zeros((cols, rows));
dIdy = np.zeros((cols, rows));
# Compute the l1 - norm distance of neighbor pixels.
dIdx[:, 1::] = abs(dIcdx)
dIdy[1::, :] = abs(dIcdy)
dHdx = (1 + self.sigma_s_RF_filter / self.sigma_r_RF_filter * dIdx)
dVdy = (1 + self.sigma_s_RF_filter / self.sigma_r_RF_filter * dIdy)
dVdy = np.transpose(dVdy)
N = self.num_iterations_RF_filter
F = copy.deepcopy(img)
for i in range(self.num_iterations_RF_filter):
# Compute the sigma value for this iteration (Equation 14 of our paper).
sigma_H_i = self.sigma_s_RF_filter * np.sqrt(3) * 2 ** (N - (i + 1)) / np.sqrt(4 ** N - 1)
F = self.TransformedDomainRecursiveFilter_Horizontal(F, dHdx, sigma_H_i)
F = np.transpose(F)
F = self.TransformedDomainRecursiveFilter_Horizontal(F, dVdy, sigma_H_i)
F = np.transpose(F)
return(F)
def detectBlur(self, img, ):
ori_rows, ori_cols = np.shape(img)
# perform initial gausssian smoothing
InputImageGaus = cv2.GaussianBlur(img, (3, 3), sigmaX=0.5, sigmaY=0.5)
__gradient_image = self.computeImageGradientMagnitude(InputImageGaus)
total_num_layers = 1 + sum(self.scales)
# create all dct_matrices beforehand to save computation time
self.__createDCT_Matrices()
# Create Frequency Labels at all the scalesv
self.__computeFrequencyBands()
# Compute the indices of the high frequency content inside each frequency band
for i in range(self.num_scales):
curr_freq_band = self.__freqBands[i]
self.freq_index.append(np.where(curr_freq_band == 0))
__padded_image = np.pad(__gradient_image, int(np.floor(max(self.scales)/2)), mode='constant')
rows, cols = np.shape(__padded_image)
L = []
total_num_points = len([i for i in range(int(max(self.scales)/2), rows - int(max(self.scales)/2), self.downsampling_factor)]) * len([j for j in range(int(max(self.scales) / 2), cols - int(max(self.scales) / 2), self.downsampling_factor)])
L = np.zeros((total_num_points, total_num_layers))
iter = 0
n = 0
old_progress = 0
for i in range(int(max(self.scales)/2), rows - int(max(self.scales)/2), self.downsampling_factor):
old_progress = self.disp_progress(i, rows, old_progress)
m = 0
n += 1
for j in range(int(max(self.scales) / 2), cols - int(max(self.scales) / 2), self.downsampling_factor):
m += 1
high_freq_components = []
for ind, curr_scale in enumerate(self.scales):
Patch = __padded_image[i-np.int(curr_scale/2) : i+np.int(curr_scale/2) + 1, j-np.int(curr_scale/2) : j+np.int(curr_scale/2) + 1]
dct_coefficients = np.abs(self.__getDCTCoefficients(Patch, ind))
# store all high frequency components
high_freq_components.append(dct_coefficients[self.freq_index[ind]])
# Find the first `total_num_layers` smallest values in all the high frequency components - we must not sort the entire array since that is very inefficient
high_freq_components = np.hstack(high_freq_components)
result = np.argpartition(high_freq_components, total_num_layers)
L[iter, :] = high_freq_components[result[:total_num_layers]]
iter += 1
L = np.array(L)
# normalize the L matrix
for i in range(total_num_layers):
max_val = max(L[:, i])
L[:, i] = L[:, i] / max_val
# perform max pooling on the normalized frequencies
ind1d = 0
T_max = np.zeros((n, m))
max_val = 0
min_val = 99999
for i in range(n):
for j in range(m):
T_max[i][j] = max(L[ind1d, :])
max_val = max(max_val, T_max[i][j])
min_val = min(min_val, T_max[i][j])
ind1d += 1
# Final Map and Post Processing
local_entropy = self.entropyFilt(T_max)
weighted_local_entropy = np.multiply(local_entropy, T_max)
score = self.computeScore(weighted_local_entropy, T_max)
rows, cols = np.shape(weighted_local_entropy)
# resize the input image to match the size of local_entropy matrix
resized_input_image = cv2.resize(InputImageGaus, (cols, rows))
aSmooth = cv2.GaussianBlur(resized_input_image, (3, 3), sigmaX=1, sigmaY=1)
final_map = self.RF(weighted_local_entropy, aSmooth)
# resize the map to the original resolution
final_map = cv2.resize(final_map, (ori_cols, ori_rows))
# normalize the map
final_map = final_map / np.max(final_map)
return(final_map)
|
[
"numpy.sqrt",
"numpy.hstack",
"numpy.array",
"copy.deepcopy",
"numpy.multiply",
"numpy.where",
"numpy.diff",
"numpy.max",
"numpy.linspace",
"numpy.matmul",
"skimage.morphology.square",
"cv2.resize",
"cv2.GaussianBlur",
"numpy.shape",
"numpy.transpose",
"numpy.int",
"numpy.median",
"numpy.argpartition",
"numpy.zeros",
"os.system",
"cv2.Sobel"
] |
[((2361, 2424), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(1)', '(0)'], {'borderType': 'cv2.BORDER_REFLECT'}), '(img, cv2.CV_64F, 1, 0, borderType=cv2.BORDER_REFLECT)\n', (2370, 2424), False, 'import cv2\n'), ((2470, 2533), 'cv2.Sobel', 'cv2.Sobel', (['img', 'cv2.CV_64F', '(0)', '(1)'], {'borderType': 'cv2.BORDER_REFLECT'}), '(img, cv2.CV_64F, 0, 1, borderType=cv2.BORDER_REFLECT)\n', (2479, 2533), False, 'import cv2\n'), ((2591, 2633), 'numpy.sqrt', 'np.sqrt', (['(__sobelx ** 2.0 + __sobely ** 2.0)'], {}), '(__sobelx ** 2.0 + __sobely ** 2.0)\n', (2598, 2633), True, 'import numpy as np\n'), ((4029, 4046), 'numpy.shape', 'np.shape', (['img_blk'], {}), '(img_blk)\n', (4037, 4046), True, 'import numpy as np\n'), ((4643, 4676), 'numpy.median', 'np.median', (['weighted_local_entropy'], {}), '(weighted_local_entropy)\n', (4652, 4676), True, 'import numpy as np\n'), ((4879, 4895), 'copy.deepcopy', 'copy.deepcopy', (['I'], {}), '(I)\n', (4892, 4895), False, 'import copy\n'), ((4936, 4947), 'numpy.shape', 'np.shape', (['I'], {}), '(I)\n', (4944, 4947), True, 'import numpy as np\n'), ((5760, 5791), 'numpy.diff', 'np.diff', (['joint_img'], {'n': '(1)', 'axis': '(1)'}), '(joint_img, n=1, axis=1)\n', (5767, 5791), True, 'import numpy as np\n'), ((5808, 5839), 'numpy.diff', 'np.diff', (['joint_img'], {'n': '(1)', 'axis': '(0)'}), '(joint_img, n=1, axis=0)\n', (5815, 5839), True, 'import numpy as np\n'), ((5856, 5878), 'numpy.zeros', 'np.zeros', (['(cols, rows)'], {}), '((cols, rows))\n', (5864, 5878), True, 'import numpy as np\n'), ((5895, 5917), 'numpy.zeros', 'np.zeros', (['(cols, rows)'], {}), '((cols, rows))\n', (5903, 5917), True, 'import numpy as np\n'), ((6218, 6236), 'numpy.transpose', 'np.transpose', (['dVdy'], {}), '(dVdy)\n', (6230, 6236), True, 'import numpy as np\n'), ((6292, 6310), 'copy.deepcopy', 'copy.deepcopy', (['img'], {}), '(img)\n', (6305, 6310), False, 'import copy\n'), ((6871, 6884), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (6879, 6884), True, 'import numpy as np\n'), ((6956, 7009), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(3, 3)'], {'sigmaX': '(0.5)', 'sigmaY': '(0.5)'}), '(img, (3, 3), sigmaX=0.5, sigmaY=0.5)\n', (6972, 7009), False, 'import cv2\n'), ((7706, 7730), 'numpy.shape', 'np.shape', (['__padded_image'], {}), '(__padded_image)\n', (7714, 7730), True, 'import numpy as np\n'), ((8006, 8052), 'numpy.zeros', 'np.zeros', (['(total_num_points, total_num_layers)'], {}), '((total_num_points, total_num_layers))\n', (8014, 8052), True, 'import numpy as np\n'), ((9389, 9400), 'numpy.array', 'np.array', (['L'], {}), '(L)\n', (9397, 9400), True, 'import numpy as np\n'), ((9647, 9663), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (9655, 9663), True, 'import numpy as np\n'), ((10066, 10099), 'numpy.multiply', 'np.multiply', (['local_entropy', 'T_max'], {}), '(local_entropy, T_max)\n', (10077, 10099), True, 'import numpy as np\n'), ((10187, 10219), 'numpy.shape', 'np.shape', (['weighted_local_entropy'], {}), '(weighted_local_entropy)\n', (10195, 10219), True, 'import numpy as np\n'), ((10326, 10366), 'cv2.resize', 'cv2.resize', (['InputImageGaus', '(cols, rows)'], {}), '(InputImageGaus, (cols, rows))\n', (10336, 10366), False, 'import cv2\n'), ((10385, 10450), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['resized_input_image', '(3, 3)'], {'sigmaX': '(1)', 'sigmaY': '(1)'}), '(resized_input_image, (3, 3), sigmaX=1, sigmaY=1)\n', (10401, 10450), False, 'import cv2\n'), ((10585, 10628), 'cv2.resize', 'cv2.resize', (['final_map', '(ori_cols, ori_rows)'], {}), '(final_map, (ori_cols, ori_rows))\n', (10595, 10628), False, 'import cv2\n'), ((1921, 1969), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (1930, 1969), False, 'import os\n'), ((2769, 2809), 'numpy.zeros', 'np.zeros', (['(current_scale, current_scale)'], {}), '((current_scale, current_scale))\n', (2777, 2809), True, 'import numpy as np\n'), ((3406, 3430), 'numpy.linspace', 'np.linspace', (['(0)', '(n - 1)', 'n'], {}), '(0, n - 1, n)\n', (3417, 3430), True, 'import numpy as np\n'), ((3430, 3454), 'numpy.linspace', 'np.linspace', (['(0)', '(n - 1)', 'n'], {}), '(0, n - 1, n)\n', (3441, 3454), True, 'import numpy as np\n'), ((3475, 3489), 'numpy.sqrt', 'np.sqrt', (['(2 / n)'], {}), '(2 / n)\n', (3482, 3489), True, 'import numpy as np\n'), ((3605, 3615), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3612, 3615), True, 'import numpy as np\n'), ((4148, 4169), 'numpy.matmul', 'np.matmul', (['D', 'img_blk'], {}), '(D, img_blk)\n', (4157, 4169), True, 'import numpy as np\n'), ((4171, 4186), 'numpy.transpose', 'np.transpose', (['D'], {}), '(D)\n', (4183, 4186), True, 'import numpy as np\n'), ((4275, 4311), 'skimage.morphology.square', 'square', (['self.entropy_filt_kernel_sze'], {}), '(self.entropy_filt_kernel_sze)\n', (4281, 4311), False, 'from skimage.morphology import square\n'), ((5510, 5529), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5518, 5529), True, 'import numpy as np\n'), ((6655, 6670), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (6667, 6670), True, 'import numpy as np\n'), ((6773, 6788), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (6785, 6788), True, 'import numpy as np\n'), ((10690, 10707), 'numpy.max', 'np.max', (['final_map'], {}), '(final_map)\n', (10696, 10707), True, 'import numpy as np\n'), ((5046, 5089), 'numpy.multiply', 'np.multiply', (['V[:, i]', '(F[:, i - 1] - F[:, i])'], {}), '(V[:, i], F[:, i - 1] - F[:, i])\n', (5057, 5089), True, 'import numpy as np\n'), ((5194, 5241), 'numpy.multiply', 'np.multiply', (['V[:, i + 1]', '(F[:, i + 1] - F[:, i])'], {}), '(V[:, i + 1], F[:, i + 1] - F[:, i])\n', (5205, 5241), True, 'import numpy as np\n'), ((5457, 5476), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5465, 5476), True, 'import numpy as np\n'), ((5635, 5654), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5643, 5654), True, 'import numpy as np\n'), ((6534, 6553), 'numpy.sqrt', 'np.sqrt', (['(4 ** N - 1)'], {}), '(4 ** N - 1)\n', (6541, 6553), True, 'import numpy as np\n'), ((7550, 7579), 'numpy.where', 'np.where', (['(curr_freq_band == 0)'], {}), '(curr_freq_band == 0)\n', (7558, 7579), True, 'import numpy as np\n'), ((9159, 9190), 'numpy.hstack', 'np.hstack', (['high_freq_components'], {}), '(high_freq_components)\n', (9168, 9190), True, 'import numpy as np\n'), ((9216, 9271), 'numpy.argpartition', 'np.argpartition', (['high_freq_components', 'total_num_layers'], {}), '(high_freq_components, total_num_layers)\n', (9231, 9271), True, 'import numpy as np\n'), ((4847, 4857), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4854, 4857), True, 'import numpy as np\n'), ((5572, 5591), 'numpy.shape', 'np.shape', (['joint_img'], {}), '(joint_img)\n', (5580, 5591), True, 'import numpy as np\n'), ((3505, 3546), 'numpy.multiply', 'np.multiply', (['(2 * mesh_cols + 1)', 'mesh_rows'], {}), '(2 * mesh_cols + 1, mesh_rows)\n', (3516, 3546), True, 'import numpy as np\n'), ((6500, 6510), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (6507, 6510), True, 'import numpy as np\n'), ((8611, 8633), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8617, 8633), True, 'import numpy as np\n'), ((8664, 8686), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8670, 8686), True, 'import numpy as np\n'), ((8636, 8658), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8642, 8658), True, 'import numpy as np\n'), ((8689, 8711), 'numpy.int', 'np.int', (['(curr_scale / 2)'], {}), '(curr_scale / 2)\n', (8695, 8711), True, 'import numpy as np\n')]
|
"""
This module contains all the functions needed for extracting satellite-derived
shorelines (SDS)
Author: <NAME>, Water Research Laboratory, University of New South Wales
"""
# load modules
import os
import numpy as np
import matplotlib.pyplot as plt
import pdb
# image processing modules
import skimage.filters as filters
import skimage.measure as measure
import skimage.morphology as morphology
# machine learning modules
import sklearn
if sklearn.__version__[:4] == '0.20':
from sklearn.externals import joblib
else:
import joblib
from shapely.geometry import LineString
# other modules
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.cm as cm
from matplotlib import gridspec
import pickle
from datetime import datetime
from pylab import ginput
# CoastSat modules
from coastsat import SDS_tools, SDS_preprocess
np.seterr(all='ignore') # raise/ignore divisions by 0 and nans
# Main function for batch shoreline detection
def extract_shorelines(metadata, settings):
"""
Main function to extract shorelines from satellite images
KV WRL 2018
Arguments:
-----------
metadata: dict
contains all the information about the satellite images that were downloaded
settings: dict with the following keys
'inputs': dict
input parameters (sitename, filepath, polygon, dates, sat_list)
'cloud_thresh': float
value between 0 and 1 indicating the maximum cloud fraction in
the cropped image that is accepted
'cloud_mask_issue': boolean
True if there is an issue with the cloud mask and sand pixels
are erroneously being masked on the images
'buffer_size': int
size of the buffer (m) around the sandy pixels over which the pixels
are considered in the thresholding algorithm
'min_beach_area': int
minimum allowable object area (in metres^2) for the class 'sand',
the area is converted to number of connected pixels
'min_length_sl': int
minimum length (in metres) of shoreline contour to be valid
'sand_color': str
default', 'dark' (for grey/black sand beaches) or 'bright' (for white sand beaches)
'output_epsg': int
output spatial reference system as EPSG code
'check_detection': bool
if True, lets user manually accept/reject the mapped shorelines
'save_figure': bool
if True, saves a -jpg file for each mapped shoreline
'adjust_detection': bool
if True, allows user to manually adjust the detected shoreline
Returns:
-----------
output: dict
contains the extracted shorelines and corresponding dates + metadata
"""
sitename = settings['inputs']['sitename']
filepath_data = settings['inputs']['filepath']
filepath_models = os.path.join(os.getcwd(), 'classification', 'models')
# initialise output structure
output = dict([])
# create a subfolder to store the .jpg images showing the detection
filepath_jpg = os.path.join(filepath_data, sitename, 'jpg_files', 'detection')
if not os.path.exists(filepath_jpg):
os.makedirs(filepath_jpg)
# close all open figures
plt.close('all')
print('Mapping shorelines:')
# loop through satellite list
for satname in metadata.keys():
# get images
filepath = SDS_tools.get_filepath(settings['inputs'],satname)
filenames = metadata[satname]['filenames']
# initialise the output variables
output_timestamp = [] # datetime at which the image was acquired (UTC time)
output_shoreline = [] # vector of shoreline points
output_filename = [] # filename of the images from which the shorelines where derived
output_cloudcover = [] # cloud cover of the images
output_geoaccuracy = []# georeferencing accuracy of the images
output_idxkeep = [] # index that were kept during the analysis (cloudy images are skipped)
output_t_mndwi = [] # MNDWI threshold used to map the shoreline
# load classifiers (if sklearn version above 0.20, learn the new files)
str_new = ''
if not sklearn.__version__[:4] == '0.20':
str_new = '_new'
if satname in ['L5','L7','L8']:
pixel_size = 15
if settings['sand_color'] == 'dark':
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat_dark%s.pkl'%str_new))
elif settings['sand_color'] == 'bright':
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat_bright%s.pkl'%str_new))
else:
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_Landsat%s.pkl'%str_new))
elif satname == 'S2':
pixel_size = 10
clf = joblib.load(os.path.join(filepath_models, 'NN_4classes_S2%s.pkl'%str_new))
# convert settings['min_beach_area'] and settings['buffer_size'] from metres to pixels
buffer_size_pixels = np.ceil(settings['buffer_size']/pixel_size)
min_beach_area_pixels = np.ceil(settings['min_beach_area']/pixel_size**2)
# loop through the images
for i in range(len(filenames)):
print('\r%s: %d%%' % (satname,int(((i+1)/len(filenames))*100)), end='')
# get image filename
fn = SDS_tools.get_filenames(filenames[i],filepath, satname)
# preprocess image (cloud mask + pansharpening/downsampling)
im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata = SDS_preprocess.preprocess_single(fn, satname, settings['cloud_mask_issue'])
# get image spatial reference system (epsg code) from metadata dict
image_epsg = metadata[satname]['epsg'][i]
# compute cloud_cover percentage (with no data pixels)
cloud_cover_combined = np.divide(sum(sum(cloud_mask.astype(int))),
(cloud_mask.shape[0]*cloud_mask.shape[1]))
if cloud_cover_combined > 0.99: # if 99% of cloudy pixels in image skip
continue
# remove no data pixels from the cloud mask
# (for example L7 bands of no data should not be accounted for)
cloud_mask_adv = np.logical_xor(cloud_mask, im_nodata)
# compute updated cloud cover percentage (without no data pixels)
cloud_cover = np.divide(sum(sum(cloud_mask_adv.astype(int))),
(sum(sum((~im_nodata).astype(int)))))
# skip image if cloud cover is above user-defined threshold
if cloud_cover > settings['cloud_thresh']:
continue
# calculate a buffer around the reference shoreline (if any has been digitised)
im_ref_buffer = create_shoreline_buffer(cloud_mask.shape, georef, image_epsg,
pixel_size, settings)
# classify image in 4 classes (sand, whitewater, water, other) with NN classifier
im_classif, im_labels = classify_image_NN(im_ms, im_extra, cloud_mask,
min_beach_area_pixels, clf)
# if adjust_detection is True, let the user adjust the detected shoreline
if settings['adjust_detection']:
date = filenames[i][:19]
skip_image, shoreline, t_mndwi = adjust_detection(im_ms, cloud_mask, im_labels,
im_ref_buffer, image_epsg, georef,
settings, date, satname, buffer_size_pixels)
# if the user decides to skip the image, continue and do not save the mapped shoreline
if skip_image:
continue
# otherwise map the contours automatically with one of the two following functions:
# if there are pixels in the 'sand' class --> use find_wl_contours2 (enhanced)
# otherwise use find_wl_contours2 (traditional)
else:
try: # use try/except structure for long runs
if sum(sum(im_labels[:,:,0])) < 10 : # minimum number of sand pixels
# compute MNDWI image (SWIR-G)
im_mndwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# find water contours on MNDWI grayscale image
contours_mwi, t_mndwi = find_wl_contours1(im_mndwi, cloud_mask, im_ref_buffer)
else:
# use classification to refine threshold and extract the sand/water interface
contours_mwi, t_mndwi = find_wl_contours2(im_ms, im_labels, cloud_mask,
buffer_size_pixels, im_ref_buffer)
except:
print('Could not map shoreline for this image: ' + filenames[i])
continue
# process the water contours into a shoreline
shoreline = process_shoreline(contours_mwi, cloud_mask, georef, image_epsg, settings)
# visualise the mapped shorelines, there are two options:
# if settings['check_detection'] = True, shows the detection to the user for accept/reject
# if settings['save_figure'] = True, saves a figure for each mapped shoreline
if settings['check_detection'] or settings['save_figure']:
date = filenames[i][:19]
if not settings['check_detection']:
plt.ioff() # turning interactive plotting off
skip_image = show_detection(im_ms, cloud_mask, im_labels, shoreline,
image_epsg, georef, settings, date, satname)
# if the user decides to skip the image, continue and do not save the mapped shoreline
if skip_image:
continue
# append to output variables
output_timestamp.append(metadata[satname]['dates'][i])
output_shoreline.append(shoreline)
output_filename.append(filenames[i])
output_cloudcover.append(cloud_cover)
output_geoaccuracy.append(metadata[satname]['acc_georef'][i])
output_idxkeep.append(i)
output_t_mndwi.append(t_mndwi)
# create dictionnary of output
output[satname] = {
'dates': output_timestamp,
'shorelines': output_shoreline,
'filename': output_filename,
'cloud_cover': output_cloudcover,
'geoaccuracy': output_geoaccuracy,
'idx': output_idxkeep,
'MNDWI_threshold': output_t_mndwi,
}
print('')
# close figure window if still open
if plt.get_fignums():
plt.close()
# change the format to have one list sorted by date with all the shorelines (easier to use)
output = SDS_tools.merge_output(output)
# save outputput structure as output.pkl
filepath = os.path.join(filepath_data, sitename)
with open(os.path.join(filepath, sitename + '_output.pkl'), 'wb') as f:
pickle.dump(output, f)
return output
###################################################################################################
# IMAGE CLASSIFICATION FUNCTIONS
###################################################################################################
def calculate_features(im_ms, cloud_mask, im_bool):
"""
Calculates features on the image that are used for the supervised classification.
The features include spectral normalized-difference indices and standard
deviation of the image for all the bands and indices.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_bool: np.array
2D array of boolean indicating where on the image to calculate the features
Returns:
-----------
features: np.array
matrix containing each feature (columns) calculated for all
the pixels (rows) indicated in im_bool
"""
# add all the multispectral bands
features = np.expand_dims(im_ms[im_bool,0],axis=1)
for k in range(1,im_ms.shape[2]):
feature = np.expand_dims(im_ms[im_bool,k],axis=1)
features = np.append(features, feature, axis=-1)
# NIR-G
im_NIRG = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,1], cloud_mask)
features = np.append(features, np.expand_dims(im_NIRG[im_bool],axis=1), axis=-1)
# SWIR-G
im_SWIRG = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
features = np.append(features, np.expand_dims(im_SWIRG[im_bool],axis=1), axis=-1)
# NIR-R
im_NIRR = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,2], cloud_mask)
features = np.append(features, np.expand_dims(im_NIRR[im_bool],axis=1), axis=-1)
# SWIR-NIR
im_SWIRNIR = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,3], cloud_mask)
features = np.append(features, np.expand_dims(im_SWIRNIR[im_bool],axis=1), axis=-1)
# B-R
im_BR = SDS_tools.nd_index(im_ms[:,:,0], im_ms[:,:,2], cloud_mask)
features = np.append(features, np.expand_dims(im_BR[im_bool],axis=1), axis=-1)
# calculate standard deviation of individual bands
for k in range(im_ms.shape[2]):
im_std = SDS_tools.image_std(im_ms[:,:,k], 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
# calculate standard deviation of the spectral indices
im_std = SDS_tools.image_std(im_NIRG, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_SWIRG, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_NIRR, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_SWIRNIR, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
im_std = SDS_tools.image_std(im_BR, 1)
features = np.append(features, np.expand_dims(im_std[im_bool],axis=1), axis=-1)
return features
def classify_image_NN(im_ms, im_extra, cloud_mask, min_beach_area, clf):
"""
Classifies every pixel in the image in one of 4 classes:
- sand --> label = 1
- whitewater (breaking waves and swash) --> label = 2
- water --> label = 3
- other (vegetation, buildings, rocks...) --> label = 0
The classifier is a Neural Network that is already trained.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
Pansharpened RGB + downsampled NIR and SWIR
im_extra:
only used for Landsat 7 and 8 where im_extra is the panchromatic band
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
min_beach_area: int
minimum number of pixels that have to be connected to belong to the SAND class
clf: joblib object
pre-trained classifier
Returns:
-----------
im_classif: np.array
2D image containing labels
im_labels: np.array of booleans
3D image containing a boolean image for each class (im_classif == label)
"""
# calculate features
vec_features = calculate_features(im_ms, cloud_mask, np.ones(cloud_mask.shape).astype(bool))
vec_features[np.isnan(vec_features)] = 1e-9 # NaN values are create when std is too close to 0
# remove NaNs and cloudy pixels
vec_cloud = cloud_mask.reshape(cloud_mask.shape[0]*cloud_mask.shape[1])
vec_nan = np.any(np.isnan(vec_features), axis=1)
vec_inf = np.any(np.isinf(vec_features), axis=1)
vec_mask = np.logical_or(vec_cloud,np.logical_or(vec_nan,vec_inf))
vec_features = vec_features[~vec_mask, :]
# classify pixels
labels = clf.predict(vec_features)
# recompose image
vec_classif = np.nan*np.ones((cloud_mask.shape[0]*cloud_mask.shape[1]))
vec_classif[~vec_mask] = labels
im_classif = vec_classif.reshape((cloud_mask.shape[0], cloud_mask.shape[1]))
# create a stack of boolean images for each label
im_sand = im_classif == 1
im_swash = im_classif == 2
im_water = im_classif == 3
# remove small patches of sand or water that could be around the image (usually noise)
im_sand = morphology.remove_small_objects(im_sand, min_size=min_beach_area, connectivity=2)
im_water = morphology.remove_small_objects(im_water, min_size=min_beach_area, connectivity=2)
im_labels = np.stack((im_sand,im_swash,im_water), axis=-1)
return im_classif, im_labels
###################################################################################################
# CONTOUR MAPPING FUNCTIONS
###################################################################################################
def find_wl_contours1(im_ndwi, cloud_mask, im_ref_buffer):
"""
Traditional method for shoreline detection using a global threshold.
Finds the water line by thresholding the Normalized Difference Water Index
and applying the Marching Squares Algorithm to contour the iso-value
corresponding to the threshold.
KV WRL 2018
Arguments:
-----------
im_ndwi: np.ndarray
Image (2D) with the NDWI (water index)
cloud_mask: np.ndarray
2D cloud mask with True where cloud pixels are
im_ref_buffer: np.array
Binary image containing a buffer around the reference shoreline
Returns:
-----------
contours: list of np.arrays
contains the coordinates of the contour lines
t_mwi: float
Otsu threshold used to map the contours
"""
# reshape image to vector
vec_ndwi = im_ndwi.reshape(im_ndwi.shape[0] * im_ndwi.shape[1])
vec_mask = cloud_mask.reshape(cloud_mask.shape[0] * cloud_mask.shape[1])
vec = vec_ndwi[~vec_mask]
# apply otsu's threshold
vec = vec[~np.isnan(vec)]
t_otsu = filters.threshold_otsu(vec)
# use Marching Squares algorithm to detect contours on ndwi image
im_ndwi_buffer = np.copy(im_ndwi)
im_ndwi_buffer[~im_ref_buffer] = np.nan
contours = measure.find_contours(im_ndwi_buffer, t_otsu)
# remove contours that contain NaNs (due to cloud pixels in the contour)
contours = process_contours(contours)
return contours, t_otsu
def find_wl_contours2(im_ms, im_labels, cloud_mask, buffer_size, im_ref_buffer):
"""
New robust method for extracting shorelines. Incorporates the classification
component to refine the treshold and make it specific to the sand/water interface.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
im_labels: np.array
3D image containing a boolean image for each class in the order (sand, swash, water)
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
buffer_size: int
size of the buffer around the sandy beach over which the pixels are considered in the
thresholding algorithm.
im_ref_buffer: np.array
binary image containing a buffer around the reference shoreline
Returns:
-----------
contours_mwi: list of np.arrays
contains the coordinates of the contour lines extracted from the
MNDWI (Modified Normalized Difference Water Index) image
t_mwi: float
Otsu sand/water threshold used to map the contours
"""
nrows = cloud_mask.shape[0]
ncols = cloud_mask.shape[1]
# calculate Normalized Difference Modified Water Index (SWIR - G)
im_mwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# calculate Normalized Difference Modified Water Index (NIR - G)
im_wi = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,1], cloud_mask)
# stack indices together
im_ind = np.stack((im_wi, im_mwi), axis=-1)
vec_ind = im_ind.reshape(nrows*ncols,2)
# reshape labels into vectors
vec_sand = im_labels[:,:,0].reshape(ncols*nrows)
vec_water = im_labels[:,:,2].reshape(ncols*nrows)
# create a buffer around the sandy beach
se = morphology.disk(buffer_size)
im_buffer = morphology.binary_dilation(im_labels[:,:,0], se)
vec_buffer = im_buffer.reshape(nrows*ncols)
# select water/sand/swash pixels that are within the buffer
int_water = vec_ind[np.logical_and(vec_buffer,vec_water),:]
int_sand = vec_ind[np.logical_and(vec_buffer,vec_sand),:]
# make sure both classes have the same number of pixels before thresholding
if len(int_water) > 0 and len(int_sand) > 0:
if np.argmin([int_sand.shape[0],int_water.shape[0]]) == 1:
int_sand = int_sand[np.random.choice(int_sand.shape[0],int_water.shape[0], replace=False),:]
else:
int_water = int_water[np.random.choice(int_water.shape[0],int_sand.shape[0], replace=False),:]
# threshold the sand/water intensities
int_all = np.append(int_water,int_sand, axis=0)
t_mwi = filters.threshold_otsu(int_all[:,0])
t_wi = filters.threshold_otsu(int_all[:,1])
# find contour with MS algorithm
im_wi_buffer = np.copy(im_wi)
im_wi_buffer[~im_ref_buffer] = np.nan
im_mwi_buffer = np.copy(im_mwi)
im_mwi_buffer[~im_ref_buffer] = np.nan
contours_wi = measure.find_contours(im_wi_buffer, t_wi)
contours_mwi = measure.find_contours(im_mwi_buffer, t_mwi)
# remove contour points that are NaNs (around clouds)
contours_wi = process_contours(contours_wi)
contours_mwi = process_contours(contours_mwi)
# only return MNDWI contours and threshold
return contours_mwi, t_mwi
###################################################################################################
# SHORELINE PROCESSING FUNCTIONS
###################################################################################################
def create_shoreline_buffer(im_shape, georef, image_epsg, pixel_size, settings):
"""
Creates a buffer around the reference shoreline. The size of the buffer is
given by settings['max_dist_ref'].
KV WRL 2018
Arguments:
-----------
im_shape: np.array
size of the image (rows,columns)
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
image_epsg: int
spatial reference system of the image from which the contours were extracted
pixel_size: int
size of the pixel in metres (15 for Landsat, 10 for Sentinel-2)
settings: dict with the following keys
'output_epsg': int
output spatial reference system
'reference_shoreline': np.array
coordinates of the reference shoreline
'max_dist_ref': int
maximum distance from the reference shoreline in metres
Returns:
-----------
im_buffer: np.array
binary image, True where the buffer is, False otherwise
"""
# initialise the image buffer
im_buffer = np.ones(im_shape).astype(bool)
if 'reference_shoreline' in settings.keys():
# convert reference shoreline to pixel coordinates
ref_sl = settings['reference_shoreline']
ref_sl_conv = SDS_tools.convert_epsg(ref_sl, settings['output_epsg'],image_epsg)[:,:-1]
ref_sl_pix = SDS_tools.convert_world2pix(ref_sl_conv, georef)
ref_sl_pix_rounded = np.round(ref_sl_pix).astype(int)
# make sure that the pixel coordinates of the reference shoreline are inside the image
idx_row = np.logical_and(ref_sl_pix_rounded[:,0] > 0, ref_sl_pix_rounded[:,0] < im_shape[1])
idx_col = np.logical_and(ref_sl_pix_rounded[:,1] > 0, ref_sl_pix_rounded[:,1] < im_shape[0])
idx_inside = np.logical_and(idx_row, idx_col)
ref_sl_pix_rounded = ref_sl_pix_rounded[idx_inside,:]
# create binary image of the reference shoreline (1 where the shoreline is 0 otherwise)
im_binary = np.zeros(im_shape)
for j in range(len(ref_sl_pix_rounded)):
im_binary[ref_sl_pix_rounded[j,1], ref_sl_pix_rounded[j,0]] = 1
im_binary = im_binary.astype(bool)
# dilate the binary image to create a buffer around the reference shoreline
max_dist_ref_pixels = np.ceil(settings['max_dist_ref']/pixel_size)
se = morphology.disk(max_dist_ref_pixels)
im_buffer = morphology.binary_dilation(im_binary, se)
return im_buffer
def process_contours(contours):
"""
Remove contours that contain NaNs, usually these are contours that are in contact
with clouds.
KV WRL 2020
Arguments:
-----------
contours: list of np.array
image contours as detected by the function skimage.measure.find_contours
Returns:
-----------
contours: list of np.array
processed image contours (only the ones that do not contains NaNs)
"""
# initialise variable
contours_nonans = []
# loop through contours and only keep the ones without NaNs
for k in range(len(contours)):
if np.any(np.isnan(contours[k])):
index_nan = np.where(np.isnan(contours[k]))[0]
contours_temp = np.delete(contours[k], index_nan, axis=0)
if len(contours_temp) > 1:
contours_nonans.append(contours_temp)
else:
contours_nonans.append(contours[k])
return contours_nonans
def process_shoreline(contours, cloud_mask, georef, image_epsg, settings):
"""
Converts the contours from image coordinates to world coordinates.
This function also removes the contours that are too small to be a shoreline
(based on the parameter settings['min_length_sl'])
KV WRL 2018
Arguments:
-----------
contours: np.array or list of np.array
image contours as detected by the function find_contours
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
image_epsg: int
spatial reference system of the image from which the contours were extracted
settings: dict with the following keys
'output_epsg': int
output spatial reference system
'min_length_sl': float
minimum length of shoreline contour to be kept (in meters)
Returns:
-----------
shoreline: np.array
array of points with the X and Y coordinates of the shoreline
"""
# convert pixel coordinates to world coordinates
contours_world = SDS_tools.convert_pix2world(contours, georef)
# convert world coordinates to desired spatial reference system
contours_epsg = SDS_tools.convert_epsg(contours_world, image_epsg, settings['output_epsg'])
# remove contours that have a perimeter < min_length_sl (provided in settings dict)
# this enables to remove the very small contours that do not correspond to the shoreline
contours_long = []
for l, wl in enumerate(contours_epsg):
coords = [(wl[k,0], wl[k,1]) for k in range(len(wl))]
a = LineString(coords) # shapely LineString structure
if a.length >= settings['min_length_sl']:
contours_long.append(wl)
# format points into np.array
x_points = np.array([])
y_points = np.array([])
for k in range(len(contours_long)):
x_points = np.append(x_points,contours_long[k][:,0])
y_points = np.append(y_points,contours_long[k][:,1])
contours_array = np.transpose(np.array([x_points,y_points]))
shoreline = contours_array
# now remove any shoreline points that are attached to cloud pixels
if sum(sum(cloud_mask)) > 0:
# get the coordinates of the cloud pixels
idx_cloud = np.where(cloud_mask)
idx_cloud = np.array([(idx_cloud[0][k], idx_cloud[1][k]) for k in range(len(idx_cloud[0]))])
# convert to world coordinates and same epsg as the shoreline points
coords_cloud = SDS_tools.convert_epsg(SDS_tools.convert_pix2world(idx_cloud, georef),
image_epsg, settings['output_epsg'])[:,:-1]
# only keep the shoreline points that are at least 30m from any cloud pixel
idx_keep = np.ones(len(shoreline)).astype(bool)
for k in range(len(shoreline)):
if np.any(np.linalg.norm(shoreline[k,:] - coords_cloud, axis=1) < 30):
idx_keep[k] = False
shoreline = shoreline[idx_keep]
return shoreline
###################################################################################################
# PLOTTING FUNCTIONS
###################################################################################################
def show_detection(im_ms, cloud_mask, im_labels, shoreline,image_epsg, georef,
settings, date, satname):
"""
Shows the detected shoreline to the user for visual quality control.
The user can accept/reject the detected shorelines by using keep/skip
buttons.
KV WRL 2018
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_labels: np.array
3D image containing a boolean image for each class in the order (sand, swash, water)
shoreline: np.array
array of points with the X and Y coordinates of the shoreline
image_epsg: int
spatial reference system of the image from which the contours were extracted
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
date: string
date at which the image was taken
satname: string
indicates the satname (L5,L7,L8 or S2)
settings: dict with the following keys
'inputs': dict
input parameters (sitename, filepath, polygon, dates, sat_list)
'output_epsg': int
output spatial reference system as EPSG code
'check_detection': bool
if True, lets user manually accept/reject the mapped shorelines
'save_figure': bool
if True, saves a -jpg file for each mapped shoreline
Returns:
-----------
skip_image: boolean
True if the user wants to skip the image, False otherwise
"""
sitename = settings['inputs']['sitename']
filepath_data = settings['inputs']['filepath']
# subfolder where the .jpg file is stored if the user accepts the shoreline detection
filepath = os.path.join(filepath_data, sitename, 'jpg_files', 'detection')
im_RGB = SDS_preprocess.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)
# compute classified image
im_class = np.copy(im_RGB)
cmap = cm.get_cmap('tab20c')
colorpalette = cmap(np.arange(0,13,1))
colours = np.zeros((3,4))
colours[0,:] = colorpalette[5]
colours[1,:] = np.array([204/255,1,1,1])
colours[2,:] = np.array([0,91/255,1,1])
for k in range(0,im_labels.shape[2]):
im_class[im_labels[:,:,k],0] = colours[k,0]
im_class[im_labels[:,:,k],1] = colours[k,1]
im_class[im_labels[:,:,k],2] = colours[k,2]
# compute MNDWI grayscale image
im_mwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# transform world coordinates of shoreline into pixel coordinates
# use try/except in case there are no coordinates to be transformed (shoreline = [])
try:
sl_pix = SDS_tools.convert_world2pix(SDS_tools.convert_epsg(shoreline,
settings['output_epsg'],
image_epsg)[:,[0,1]], georef)
except:
# if try fails, just add nan into the shoreline vector so the next parts can still run
sl_pix = np.array([[np.nan, np.nan],[np.nan, np.nan]])
if plt.get_fignums():
# get open figure if it exists
fig = plt.gcf()
ax1 = fig.axes[0]
ax2 = fig.axes[1]
ax3 = fig.axes[2]
else:
# else create a new figure
fig = plt.figure()
fig.set_size_inches([18, 9])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
# according to the image shape, decide whether it is better to have the images
# in vertical subplots or horizontal subplots
if im_RGB.shape[1] > 2.5*im_RGB.shape[0]:
# vertical subplots
gs = gridspec.GridSpec(3, 1)
gs.update(bottom=0.03, top=0.97, left=0.03, right=0.97)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[2,0], sharex=ax1, sharey=ax1)
else:
# horizontal subplots
gs = gridspec.GridSpec(1, 3)
gs.update(bottom=0.05, top=0.95, left=0.05, right=0.95)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[0,1], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[0,2], sharex=ax1, sharey=ax1)
# change the color of nans to either black (0.0) or white (1.0) or somewhere in between
nan_color = 1.0
im_RGB = np.where(np.isnan(im_RGB), nan_color, im_RGB)
im_class = np.where(np.isnan(im_class), 1.0, im_class)
# create image 1 (RGB)
ax1.imshow(im_RGB)
ax1.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
ax1.axis('off')
ax1.set_title(sitename, fontweight='bold', fontsize=16)
# create image 2 (classification)
ax2.imshow(im_class)
ax2.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
ax2.axis('off')
orange_patch = mpatches.Patch(color=colours[0,:], label='sand')
white_patch = mpatches.Patch(color=colours[1,:], label='whitewater')
blue_patch = mpatches.Patch(color=colours[2,:], label='water')
black_line = mlines.Line2D([],[],color='k',linestyle='-', label='shoreline')
ax2.legend(handles=[orange_patch,white_patch,blue_patch, black_line],
bbox_to_anchor=(1, 0.5), fontsize=10)
ax2.set_title(date, fontweight='bold', fontsize=16)
# create image 3 (MNDWI)
ax3.imshow(im_mwi, cmap='bwr')
ax3.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
ax3.axis('off')
ax3.set_title(satname, fontweight='bold', fontsize=16)
# additional options
# ax1.set_anchor('W')
# ax2.set_anchor('W')
# cb = plt.colorbar()
# cb.ax.tick_params(labelsize=10)
# cb.set_label('MNDWI values')
# ax3.set_anchor('W')
# if check_detection is True, let user manually accept/reject the images
skip_image = False
if settings['check_detection']:
# set a key event to accept/reject the detections (see https://stackoverflow.com/a/15033071)
# this variable needs to be immuatable so we can access it after the keypress event
key_event = {}
def press(event):
# store what key was pressed in the dictionary
key_event['pressed'] = event.key
# let the user press a key, right arrow to keep the image, left arrow to skip it
# to break the loop the user can press 'escape'
while True:
btn_keep = plt.text(1.1, 0.9, 'keep ⇨', size=12, ha="right", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_skip = plt.text(-0.1, 0.9, '⇦ skip', size=12, ha="left", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_esc = plt.text(0.5, 0, '<esc> to quit', size=12, ha="center", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
plt.draw()
fig.canvas.mpl_connect('key_press_event', press)
plt.waitforbuttonpress()
# after button is pressed, remove the buttons
btn_skip.remove()
btn_keep.remove()
btn_esc.remove()
# keep/skip image according to the pressed key, 'escape' to break the loop
if key_event.get('pressed') == 'right':
skip_image = False
break
elif key_event.get('pressed') == 'left':
skip_image = True
break
elif key_event.get('pressed') == 'escape':
plt.close()
raise StopIteration('User cancelled checking shoreline detection')
else:
plt.waitforbuttonpress()
# if save_figure is True, save a .jpg under /jpg_files/detection
if settings['save_figure'] and not skip_image:
fig.savefig(os.path.join(filepath, date + '_' + satname + '.jpg'), dpi=150)
# don't close the figure window, but remove all axes and settings, ready for next plot
for ax in fig.axes:
ax.clear()
return skip_image
def adjust_detection(im_ms, cloud_mask, im_labels, im_ref_buffer, image_epsg, georef,
settings, date, satname, buffer_size_pixels):
"""
Advanced version of show detection where the user can adjust the detected
shorelines with a slide bar.
KV WRL 2020
Arguments:
-----------
im_ms: np.array
RGB + downsampled NIR and SWIR
cloud_mask: np.array
2D cloud mask with True where cloud pixels are
im_labels: np.array
3D image containing a boolean image for each class in the order (sand, swash, water)
im_ref_buffer: np.array
Binary image containing a buffer around the reference shoreline
image_epsg: int
spatial reference system of the image from which the contours were extracted
georef: np.array
vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
date: string
date at which the image was taken
satname: string
indicates the satname (L5,L7,L8 or S2)
buffer_size_pixels: int
buffer_size converted to number of pixels
settings: dict with the following keys
'inputs': dict
input parameters (sitename, filepath, polygon, dates, sat_list)
'output_epsg': int
output spatial reference system as EPSG code
'save_figure': bool
if True, saves a -jpg file for each mapped shoreline
Returns:
-----------
skip_image: boolean
True if the user wants to skip the image, False otherwise
shoreline: np.array
array of points with the X and Y coordinates of the shoreline
t_mndwi: float
value of the MNDWI threshold used to map the shoreline
"""
sitename = settings['inputs']['sitename']
filepath_data = settings['inputs']['filepath']
# subfolder where the .jpg file is stored if the user accepts the shoreline detection
filepath = os.path.join(filepath_data, sitename, 'jpg_files', 'detection')
# format date
date_str = datetime.strptime(date,'%Y-%m-%d-%H-%M-%S').strftime('%Y-%m-%d %H:%M:%S')
im_RGB = SDS_preprocess.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)
# compute classified image
im_class = np.copy(im_RGB)
cmap = cm.get_cmap('tab20c')
colorpalette = cmap(np.arange(0,13,1))
colours = np.zeros((3,4))
colours[0,:] = colorpalette[5]
colours[1,:] = np.array([204/255,1,1,1])
colours[2,:] = np.array([0,91/255,1,1])
for k in range(0,im_labels.shape[2]):
im_class[im_labels[:,:,k],0] = colours[k,0]
im_class[im_labels[:,:,k],1] = colours[k,1]
im_class[im_labels[:,:,k],2] = colours[k,2]
# compute MNDWI grayscale image
im_mndwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
# buffer MNDWI using reference shoreline
im_mndwi_buffer = np.copy(im_mndwi)
im_mndwi_buffer[~im_ref_buffer] = np.nan
# get MNDWI pixel intensity in each class (for histogram plot)
int_sand = im_mndwi[im_labels[:,:,0]]
int_ww = im_mndwi[im_labels[:,:,1]]
int_water = im_mndwi[im_labels[:,:,2]]
labels_other = np.logical_and(np.logical_and(~im_labels[:,:,0],~im_labels[:,:,1]),~im_labels[:,:,2])
int_other = im_mndwi[labels_other]
# create figure
if plt.get_fignums():
# if it exists, open the figure
fig = plt.gcf()
ax1 = fig.axes[0]
ax2 = fig.axes[1]
ax3 = fig.axes[2]
ax4 = fig.axes[3]
else:
# else create a new figure
fig = plt.figure()
fig.set_size_inches([18, 9])
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
gs = gridspec.GridSpec(2, 3, height_ratios=[4,1])
gs.update(bottom=0.05, top=0.95, left=0.03, right=0.97)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[0,1], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[0,2], sharex=ax1, sharey=ax1)
ax4 = fig.add_subplot(gs[1,:])
##########################################################################
# to do: rotate image if too wide
##########################################################################
# change the color of nans to either black (0.0) or white (1.0) or somewhere in between
nan_color = 1.0
im_RGB = np.where(np.isnan(im_RGB), nan_color, im_RGB)
im_class = np.where(np.isnan(im_class), 1.0, im_class)
# plot image 1 (RGB)
ax1.imshow(im_RGB)
ax1.axis('off')
ax1.set_title('%s - %s'%(sitename, satname), fontsize=12)
# plot image 2 (classification)
ax2.imshow(im_class)
ax2.axis('off')
orange_patch = mpatches.Patch(color=colours[0,:], label='sand')
white_patch = mpatches.Patch(color=colours[1,:], label='whitewater')
blue_patch = mpatches.Patch(color=colours[2,:], label='water')
black_line = mlines.Line2D([],[],color='k',linestyle='-', label='shoreline')
ax2.legend(handles=[orange_patch,white_patch,blue_patch, black_line],
bbox_to_anchor=(1.1, 0.5), fontsize=10)
ax2.set_title(date_str, fontsize=12)
# plot image 3 (MNDWI)
ax3.imshow(im_mndwi, cmap='bwr')
ax3.axis('off')
ax3.set_title('MNDWI', fontsize=12)
# plot histogram of MNDWI values
binwidth = 0.01
ax4.set_facecolor('0.75')
ax4.yaxis.grid(color='w', linestyle='--', linewidth=0.5)
ax4.set(ylabel='PDF',yticklabels=[], xlim=[-1,1])
if len(int_sand) > 0 and sum(~np.isnan(int_sand)) > 0:
bins = np.arange(np.nanmin(int_sand), np.nanmax(int_sand) + binwidth, binwidth)
ax4.hist(int_sand, bins=bins, density=True, color=colours[0,:], label='sand')
if len(int_ww) > 0 and sum(~np.isnan(int_ww)) > 0:
bins = np.arange(np.nanmin(int_ww), np.nanmax(int_ww) + binwidth, binwidth)
ax4.hist(int_ww, bins=bins, density=True, color=colours[1,:], label='whitewater', alpha=0.75)
if len(int_water) > 0 and sum(~np.isnan(int_water)) > 0:
bins = np.arange(np.nanmin(int_water), np.nanmax(int_water) + binwidth, binwidth)
ax4.hist(int_water, bins=bins, density=True, color=colours[2,:], label='water', alpha=0.75)
if len(int_other) > 0 and sum(~np.isnan(int_other)) > 0:
bins = np.arange(np.nanmin(int_other), np.nanmax(int_other) + binwidth, binwidth)
ax4.hist(int_other, bins=bins, density=True, color='C4', label='other', alpha=0.5)
# automatically map the shoreline based on the classifier if enough sand pixels
try:
if sum(sum(im_labels[:,:,0])) > 10:
# use classification to refine threshold and extract the sand/water interface
contours_mndwi, t_mndwi = find_wl_contours2(im_ms, im_labels, cloud_mask,
buffer_size_pixels, im_ref_buffer)
else:
# find water contours on MNDWI grayscale image
contours_mndwi, t_mndwi = find_wl_contours1(im_mndwi, cloud_mask, im_ref_buffer)
except:
print('Could not map shoreline so image was skipped')
# clear axes and return skip_image=True, so that image is skipped above
for ax in fig.axes:
ax.clear()
return True,[],[]
# process the water contours into a shoreline
shoreline = process_shoreline(contours_mndwi, cloud_mask, georef, image_epsg, settings)
# convert shoreline to pixels
if len(shoreline) > 0:
sl_pix = SDS_tools.convert_world2pix(SDS_tools.convert_epsg(shoreline,
settings['output_epsg'],
image_epsg)[:,[0,1]], georef)
else: sl_pix = np.array([[np.nan, np.nan],[np.nan, np.nan]])
# plot the shoreline on the images
sl_plot1 = ax1.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
sl_plot2 = ax2.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
sl_plot3 = ax3.plot(sl_pix[:,0], sl_pix[:,1], 'k.', markersize=3)
t_line = ax4.axvline(x=t_mndwi,ls='--', c='k', lw=1.5, label='threshold')
ax4.legend(loc=1)
plt.draw() # to update the plot
# adjust the threshold manually by letting the user change the threshold
ax4.set_title('Click on the plot below to change the location of the threhsold and adjust the shoreline detection. When finished, press <Enter>')
while True:
# let the user click on the threshold plot
pt = ginput(n=1, show_clicks=True, timeout=-1)
# if a point was clicked
if len(pt) > 0:
# if user clicked somewhere wrong and value is not between -1 and 1
if np.abs(pt[0][0]) >= 1: continue
# update the threshold value
t_mndwi = pt[0][0]
# update the plot
t_line.set_xdata([t_mndwi,t_mndwi])
# map contours with new threshold
contours = measure.find_contours(im_mndwi_buffer, t_mndwi)
# remove contours that contain NaNs (due to cloud pixels in the contour)
contours = process_contours(contours)
# process the water contours into a shoreline
shoreline = process_shoreline(contours, cloud_mask, georef, image_epsg, settings)
# convert shoreline to pixels
if len(shoreline) > 0:
sl_pix = SDS_tools.convert_world2pix(SDS_tools.convert_epsg(shoreline,
settings['output_epsg'],
image_epsg)[:,[0,1]], georef)
else: sl_pix = np.array([[np.nan, np.nan],[np.nan, np.nan]])
# update the plotted shorelines
sl_plot1[0].set_data([sl_pix[:,0], sl_pix[:,1]])
sl_plot2[0].set_data([sl_pix[:,0], sl_pix[:,1]])
sl_plot3[0].set_data([sl_pix[:,0], sl_pix[:,1]])
fig.canvas.draw_idle()
else:
ax4.set_title('MNDWI pixel intensities and threshold')
break
# let user manually accept/reject the image
skip_image = False
# set a key event to accept/reject the detections (see https://stackoverflow.com/a/15033071)
# this variable needs to be immuatable so we can access it after the keypress event
key_event = {}
def press(event):
# store what key was pressed in the dictionary
key_event['pressed'] = event.key
# let the user press a key, right arrow to keep the image, left arrow to skip it
# to break the loop the user can press 'escape'
while True:
btn_keep = plt.text(1.1, 0.9, 'keep ⇨', size=12, ha="right", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_skip = plt.text(-0.1, 0.9, '⇦ skip', size=12, ha="left", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
btn_esc = plt.text(0.5, 0, '<esc> to quit', size=12, ha="center", va="top",
transform=ax1.transAxes,
bbox=dict(boxstyle="square", ec='k',fc='w'))
plt.draw()
fig.canvas.mpl_connect('key_press_event', press)
plt.waitforbuttonpress()
# after button is pressed, remove the buttons
btn_skip.remove()
btn_keep.remove()
btn_esc.remove()
# keep/skip image according to the pressed key, 'escape' to break the loop
if key_event.get('pressed') == 'right':
skip_image = False
break
elif key_event.get('pressed') == 'left':
skip_image = True
break
elif key_event.get('pressed') == 'escape':
plt.close()
raise StopIteration('User cancelled checking shoreline detection')
else:
plt.waitforbuttonpress()
# if save_figure is True, save a .jpg under /jpg_files/detection
if settings['save_figure'] and not skip_image:
fig.savefig(os.path.join(filepath, date + '_' + satname + '.jpg'), dpi=150)
# don't close the figure window, but remove all axes and settings, ready for next plot
for ax in fig.axes:
ax.clear()
return skip_image, shoreline, t_mndwi
|
[
"coastsat.SDS_tools.get_filepath",
"skimage.filters.threshold_otsu",
"numpy.array",
"coastsat.SDS_preprocess.preprocess_single",
"coastsat.SDS_tools.merge_output",
"numpy.linalg.norm",
"numpy.nanmin",
"matplotlib.lines.Line2D",
"numpy.arange",
"skimage.morphology.binary_dilation",
"os.path.exists",
"matplotlib.pyplot.waitforbuttonpress",
"coastsat.SDS_preprocess.rescale_image_intensity",
"numpy.where",
"numpy.delete",
"coastsat.SDS_tools.convert_world2pix",
"matplotlib.pyplot.close",
"numpy.stack",
"matplotlib.gridspec.GridSpec",
"numpy.nanmax",
"numpy.argmin",
"numpy.isinf",
"matplotlib.cm.get_cmap",
"numpy.round",
"numpy.abs",
"numpy.ceil",
"numpy.ones",
"numpy.random.choice",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.ioff",
"numpy.logical_xor",
"coastsat.SDS_tools.get_filenames",
"shapely.geometry.LineString",
"numpy.isnan",
"matplotlib.patches.Patch",
"skimage.measure.find_contours",
"matplotlib.pyplot.draw",
"skimage.morphology.disk",
"numpy.copy",
"skimage.morphology.remove_small_objects",
"coastsat.SDS_tools.convert_pix2world",
"pickle.dump",
"os.makedirs",
"numpy.logical_and",
"coastsat.SDS_tools.image_std",
"datetime.datetime.strptime",
"matplotlib.pyplot.get_fignums",
"os.path.join",
"numpy.logical_or",
"os.getcwd",
"coastsat.SDS_tools.nd_index",
"numpy.append",
"coastsat.SDS_tools.convert_epsg",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.expand_dims",
"matplotlib.pyplot.get_current_fig_manager",
"numpy.seterr",
"pylab.ginput"
] |
[((874, 897), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (883, 897), True, 'import numpy as np\n'), ((3131, 3194), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename', '"""jpg_files"""', '"""detection"""'], {}), "(filepath_data, sitename, 'jpg_files', 'detection')\n", (3143, 3194), False, 'import os\n'), ((3307, 3323), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3316, 3323), True, 'import matplotlib.pyplot as plt\n'), ((11121, 11138), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (11136, 11138), True, 'import matplotlib.pyplot as plt\n'), ((11270, 11300), 'coastsat.SDS_tools.merge_output', 'SDS_tools.merge_output', (['output'], {}), '(output)\n', (11292, 11300), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((11362, 11399), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename'], {}), '(filepath_data, sitename)\n', (11374, 11399), False, 'import os\n'), ((12581, 12622), 'numpy.expand_dims', 'np.expand_dims', (['im_ms[im_bool, 0]'], {'axis': '(1)'}), '(im_ms[im_bool, 0], axis=1)\n', (12595, 12622), True, 'import numpy as np\n'), ((12800, 12862), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 3]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 3], im_ms[:, :, 1], cloud_mask)\n', (12818, 12862), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((12972, 13034), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (12990, 13034), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13143, 13205), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 3]', 'im_ms[:, :, 2]', 'cloud_mask'], {}), '(im_ms[:, :, 3], im_ms[:, :, 2], cloud_mask)\n', (13161, 13205), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13319, 13381), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 3]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 3], cloud_mask)\n', (13337, 13381), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13488, 13550), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 0]', 'im_ms[:, :, 2]', 'cloud_mask'], {}), '(im_ms[:, :, 0], im_ms[:, :, 2], cloud_mask)\n', (13506, 13550), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((13936, 13967), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_NIRG', '(1)'], {}), '(im_NIRG, 1)\n', (13955, 13967), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14065, 14097), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_SWIRG', '(1)'], {}), '(im_SWIRG, 1)\n', (14084, 14097), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14195, 14226), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_NIRR', '(1)'], {}), '(im_NIRR, 1)\n', (14214, 14226), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14324, 14358), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_SWIRNIR', '(1)'], {}), '(im_SWIRNIR, 1)\n', (14343, 14358), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14456, 14485), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_BR', '(1)'], {}), '(im_BR, 1)\n', (14475, 14485), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((16849, 16934), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['im_sand'], {'min_size': 'min_beach_area', 'connectivity': '(2)'}), '(im_sand, min_size=min_beach_area,\n connectivity=2)\n', (16880, 16934), True, 'import skimage.morphology as morphology\n'), ((16946, 17032), 'skimage.morphology.remove_small_objects', 'morphology.remove_small_objects', (['im_water'], {'min_size': 'min_beach_area', 'connectivity': '(2)'}), '(im_water, min_size=min_beach_area,\n connectivity=2)\n', (16977, 17032), True, 'import skimage.morphology as morphology\n'), ((17046, 17094), 'numpy.stack', 'np.stack', (['(im_sand, im_swash, im_water)'], {'axis': '(-1)'}), '((im_sand, im_swash, im_water), axis=-1)\n', (17054, 17094), True, 'import numpy as np\n'), ((18461, 18488), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['vec'], {}), '(vec)\n', (18483, 18488), True, 'import skimage.filters as filters\n'), ((18580, 18596), 'numpy.copy', 'np.copy', (['im_ndwi'], {}), '(im_ndwi)\n', (18587, 18596), True, 'import numpy as np\n'), ((18656, 18701), 'skimage.measure.find_contours', 'measure.find_contours', (['im_ndwi_buffer', 't_otsu'], {}), '(im_ndwi_buffer, t_otsu)\n', (18677, 18701), True, 'import skimage.measure as measure\n'), ((20102, 20164), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (20120, 20164), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((20242, 20304), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 3]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 3], im_ms[:, :, 1], cloud_mask)\n', (20260, 20304), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((20343, 20377), 'numpy.stack', 'np.stack', (['(im_wi, im_mwi)'], {'axis': '(-1)'}), '((im_wi, im_mwi), axis=-1)\n', (20351, 20377), True, 'import numpy as np\n'), ((20619, 20647), 'skimage.morphology.disk', 'morphology.disk', (['buffer_size'], {}), '(buffer_size)\n', (20634, 20647), True, 'import skimage.morphology as morphology\n'), ((20664, 20714), 'skimage.morphology.binary_dilation', 'morphology.binary_dilation', (['im_labels[:, :, 0]', 'se'], {}), '(im_labels[:, :, 0], se)\n', (20690, 20714), True, 'import skimage.morphology as morphology\n'), ((21433, 21471), 'numpy.append', 'np.append', (['int_water', 'int_sand'], {'axis': '(0)'}), '(int_water, int_sand, axis=0)\n', (21442, 21471), True, 'import numpy as np\n'), ((21483, 21520), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['int_all[:, 0]'], {}), '(int_all[:, 0])\n', (21505, 21520), True, 'import skimage.filters as filters\n'), ((21531, 21568), 'skimage.filters.threshold_otsu', 'filters.threshold_otsu', (['int_all[:, 1]'], {}), '(int_all[:, 1])\n', (21553, 21568), True, 'import skimage.filters as filters\n'), ((21625, 21639), 'numpy.copy', 'np.copy', (['im_wi'], {}), '(im_wi)\n', (21632, 21639), True, 'import numpy as np\n'), ((21702, 21717), 'numpy.copy', 'np.copy', (['im_mwi'], {}), '(im_mwi)\n', (21709, 21717), True, 'import numpy as np\n'), ((21779, 21820), 'skimage.measure.find_contours', 'measure.find_contours', (['im_wi_buffer', 't_wi'], {}), '(im_wi_buffer, t_wi)\n', (21800, 21820), True, 'import skimage.measure as measure\n'), ((21840, 21883), 'skimage.measure.find_contours', 'measure.find_contours', (['im_mwi_buffer', 't_mwi'], {}), '(im_mwi_buffer, t_mwi)\n', (21861, 21883), True, 'import skimage.measure as measure\n'), ((27022, 27067), 'coastsat.SDS_tools.convert_pix2world', 'SDS_tools.convert_pix2world', (['contours', 'georef'], {}), '(contours, georef)\n', (27049, 27067), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((27156, 27231), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['contours_world', 'image_epsg', "settings['output_epsg']"], {}), "(contours_world, image_epsg, settings['output_epsg'])\n", (27178, 27231), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((27739, 27751), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (27747, 27751), True, 'import numpy as np\n'), ((27767, 27779), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (27775, 27779), True, 'import numpy as np\n'), ((30959, 31022), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename', '"""jpg_files"""', '"""detection"""'], {}), "(filepath_data, sitename, 'jpg_files', 'detection')\n", (30971, 31022), False, 'import os\n'), ((31037, 31122), 'coastsat.SDS_preprocess.rescale_image_intensity', 'SDS_preprocess.rescale_image_intensity', (['im_ms[:, :, [2, 1, 0]]', 'cloud_mask', '(99.9)'], {}), '(im_ms[:, :, [2, 1, 0]], cloud_mask, 99.9\n )\n', (31075, 31122), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((31161, 31176), 'numpy.copy', 'np.copy', (['im_RGB'], {}), '(im_RGB)\n', (31168, 31176), True, 'import numpy as np\n'), ((31188, 31209), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""tab20c"""'], {}), "('tab20c')\n", (31199, 31209), True, 'import matplotlib.cm as cm\n'), ((31267, 31283), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (31275, 31283), True, 'import numpy as np\n'), ((31337, 31367), 'numpy.array', 'np.array', (['[204 / 255, 1, 1, 1]'], {}), '([204 / 255, 1, 1, 1])\n', (31345, 31367), True, 'import numpy as np\n'), ((31382, 31411), 'numpy.array', 'np.array', (['[0, 91 / 255, 1, 1]'], {}), '([0, 91 / 255, 1, 1])\n', (31390, 31411), True, 'import numpy as np\n'), ((31655, 31717), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (31673, 31717), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((32331, 32348), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (32346, 32348), True, 'import matplotlib.pyplot as plt\n'), ((34126, 34175), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[0, :]', 'label': '"""sand"""'}), "(color=colours[0, :], label='sand')\n", (34140, 34175), True, 'import matplotlib.patches as mpatches\n'), ((34193, 34248), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[1, :]', 'label': '"""whitewater"""'}), "(color=colours[1, :], label='whitewater')\n", (34207, 34248), True, 'import matplotlib.patches as mpatches\n'), ((34265, 34315), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[2, :]', 'label': '"""water"""'}), "(color=colours[2, :], label='water')\n", (34279, 34315), True, 'import matplotlib.patches as mpatches\n'), ((34332, 34398), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""k"""', 'linestyle': '"""-"""', 'label': '"""shoreline"""'}), "([], [], color='k', linestyle='-', label='shoreline')\n", (34345, 34398), True, 'import matplotlib.lines as mlines\n'), ((39381, 39444), 'os.path.join', 'os.path.join', (['filepath_data', 'sitename', '"""jpg_files"""', '"""detection"""'], {}), "(filepath_data, sitename, 'jpg_files', 'detection')\n", (39393, 39444), False, 'import os\n'), ((39566, 39651), 'coastsat.SDS_preprocess.rescale_image_intensity', 'SDS_preprocess.rescale_image_intensity', (['im_ms[:, :, [2, 1, 0]]', 'cloud_mask', '(99.9)'], {}), '(im_ms[:, :, [2, 1, 0]], cloud_mask, 99.9\n )\n', (39604, 39651), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((39690, 39705), 'numpy.copy', 'np.copy', (['im_RGB'], {}), '(im_RGB)\n', (39697, 39705), True, 'import numpy as np\n'), ((39717, 39738), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""tab20c"""'], {}), "('tab20c')\n", (39728, 39738), True, 'import matplotlib.cm as cm\n'), ((39796, 39812), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (39804, 39812), True, 'import numpy as np\n'), ((39866, 39896), 'numpy.array', 'np.array', (['[204 / 255, 1, 1, 1]'], {}), '([204 / 255, 1, 1, 1])\n', (39874, 39896), True, 'import numpy as np\n'), ((39911, 39940), 'numpy.array', 'np.array', (['[0, 91 / 255, 1, 1]'], {}), '([0, 91 / 255, 1, 1])\n', (39919, 39940), True, 'import numpy as np\n'), ((40186, 40248), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (40204, 40248), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((40312, 40329), 'numpy.copy', 'np.copy', (['im_mndwi'], {}), '(im_mndwi)\n', (40319, 40329), True, 'import numpy as np\n'), ((40744, 40761), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (40759, 40761), True, 'import matplotlib.pyplot as plt\n'), ((42135, 42184), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[0, :]', 'label': '"""sand"""'}), "(color=colours[0, :], label='sand')\n", (42149, 42184), True, 'import matplotlib.patches as mpatches\n'), ((42202, 42257), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[1, :]', 'label': '"""whitewater"""'}), "(color=colours[1, :], label='whitewater')\n", (42216, 42257), True, 'import matplotlib.patches as mpatches\n'), ((42274, 42324), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[2, :]', 'label': '"""water"""'}), "(color=colours[2, :], label='water')\n", (42288, 42324), True, 'import matplotlib.patches as mpatches\n'), ((42341, 42407), 'matplotlib.lines.Line2D', 'mlines.Line2D', (['[]', '[]'], {'color': '"""k"""', 'linestyle': '"""-"""', 'label': '"""shoreline"""'}), "([], [], color='k', linestyle='-', label='shoreline')\n", (42354, 42407), True, 'import matplotlib.lines as mlines\n'), ((45586, 45596), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (45594, 45596), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2954), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2952, 2954), False, 'import os\n'), ((3206, 3234), 'os.path.exists', 'os.path.exists', (['filepath_jpg'], {}), '(filepath_jpg)\n', (3220, 3234), False, 'import os\n'), ((3248, 3273), 'os.makedirs', 'os.makedirs', (['filepath_jpg'], {}), '(filepath_jpg)\n', (3259, 3273), False, 'import os\n'), ((3470, 3521), 'coastsat.SDS_tools.get_filepath', 'SDS_tools.get_filepath', (["settings['inputs']", 'satname'], {}), "(settings['inputs'], satname)\n", (3492, 3521), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((5135, 5180), 'numpy.ceil', 'np.ceil', (["(settings['buffer_size'] / pixel_size)"], {}), "(settings['buffer_size'] / pixel_size)\n", (5142, 5180), True, 'import numpy as np\n'), ((5211, 5264), 'numpy.ceil', 'np.ceil', (["(settings['min_beach_area'] / pixel_size ** 2)"], {}), "(settings['min_beach_area'] / pixel_size ** 2)\n", (5218, 5264), True, 'import numpy as np\n'), ((11148, 11159), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11157, 11159), True, 'import matplotlib.pyplot as plt\n'), ((11484, 11506), 'pickle.dump', 'pickle.dump', (['output', 'f'], {}), '(output, f)\n', (11495, 11506), False, 'import pickle\n'), ((12677, 12718), 'numpy.expand_dims', 'np.expand_dims', (['im_ms[im_bool, k]'], {'axis': '(1)'}), '(im_ms[im_bool, k], axis=1)\n', (12691, 12718), True, 'import numpy as np\n'), ((12736, 12773), 'numpy.append', 'np.append', (['features', 'feature'], {'axis': '(-1)'}), '(features, feature, axis=-1)\n', (12745, 12773), True, 'import numpy as np\n'), ((12894, 12934), 'numpy.expand_dims', 'np.expand_dims', (['im_NIRG[im_bool]'], {'axis': '(1)'}), '(im_NIRG[im_bool], axis=1)\n', (12908, 12934), True, 'import numpy as np\n'), ((13066, 13107), 'numpy.expand_dims', 'np.expand_dims', (['im_SWIRG[im_bool]'], {'axis': '(1)'}), '(im_SWIRG[im_bool], axis=1)\n', (13080, 13107), True, 'import numpy as np\n'), ((13237, 13277), 'numpy.expand_dims', 'np.expand_dims', (['im_NIRR[im_bool]'], {'axis': '(1)'}), '(im_NIRR[im_bool], axis=1)\n', (13251, 13277), True, 'import numpy as np\n'), ((13413, 13456), 'numpy.expand_dims', 'np.expand_dims', (['im_SWIRNIR[im_bool]'], {'axis': '(1)'}), '(im_SWIRNIR[im_bool], axis=1)\n', (13427, 13456), True, 'import numpy as np\n'), ((13582, 13620), 'numpy.expand_dims', 'np.expand_dims', (['im_BR[im_bool]'], {'axis': '(1)'}), '(im_BR[im_bool], axis=1)\n', (13596, 13620), True, 'import numpy as np\n'), ((13739, 13777), 'coastsat.SDS_tools.image_std', 'SDS_tools.image_std', (['im_ms[:, :, k]', '(1)'], {}), '(im_ms[:, :, k], 1)\n', (13758, 13777), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((14003, 14042), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14017, 14042), True, 'import numpy as np\n'), ((14133, 14172), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14147, 14172), True, 'import numpy as np\n'), ((14262, 14301), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14276, 14301), True, 'import numpy as np\n'), ((14394, 14433), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14408, 14433), True, 'import numpy as np\n'), ((14521, 14560), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (14535, 14560), True, 'import numpy as np\n'), ((15897, 15919), 'numpy.isnan', 'np.isnan', (['vec_features'], {}), '(vec_features)\n', (15905, 15919), True, 'import numpy as np\n'), ((16113, 16135), 'numpy.isnan', 'np.isnan', (['vec_features'], {}), '(vec_features)\n', (16121, 16135), True, 'import numpy as np\n'), ((16166, 16188), 'numpy.isinf', 'np.isinf', (['vec_features'], {}), '(vec_features)\n', (16174, 16188), True, 'import numpy as np\n'), ((16241, 16272), 'numpy.logical_or', 'np.logical_or', (['vec_nan', 'vec_inf'], {}), '(vec_nan, vec_inf)\n', (16254, 16272), True, 'import numpy as np\n'), ((16429, 16479), 'numpy.ones', 'np.ones', (['(cloud_mask.shape[0] * cloud_mask.shape[1])'], {}), '(cloud_mask.shape[0] * cloud_mask.shape[1])\n', (16436, 16479), True, 'import numpy as np\n'), ((23754, 23802), 'coastsat.SDS_tools.convert_world2pix', 'SDS_tools.convert_world2pix', (['ref_sl_conv', 'georef'], {}), '(ref_sl_conv, georef)\n', (23781, 23802), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((23979, 24067), 'numpy.logical_and', 'np.logical_and', (['(ref_sl_pix_rounded[:, 0] > 0)', '(ref_sl_pix_rounded[:, 0] < im_shape[1])'], {}), '(ref_sl_pix_rounded[:, 0] > 0, ref_sl_pix_rounded[:, 0] <\n im_shape[1])\n', (23993, 24067), True, 'import numpy as np\n'), ((24080, 24168), 'numpy.logical_and', 'np.logical_and', (['(ref_sl_pix_rounded[:, 1] > 0)', '(ref_sl_pix_rounded[:, 1] < im_shape[0])'], {}), '(ref_sl_pix_rounded[:, 1] > 0, ref_sl_pix_rounded[:, 1] <\n im_shape[0])\n', (24094, 24168), True, 'import numpy as np\n'), ((24184, 24216), 'numpy.logical_and', 'np.logical_and', (['idx_row', 'idx_col'], {}), '(idx_row, idx_col)\n', (24198, 24216), True, 'import numpy as np\n'), ((24396, 24414), 'numpy.zeros', 'np.zeros', (['im_shape'], {}), '(im_shape)\n', (24404, 24414), True, 'import numpy as np\n'), ((24698, 24744), 'numpy.ceil', 'np.ceil', (["(settings['max_dist_ref'] / pixel_size)"], {}), "(settings['max_dist_ref'] / pixel_size)\n", (24705, 24744), True, 'import numpy as np\n'), ((24756, 24792), 'skimage.morphology.disk', 'morphology.disk', (['max_dist_ref_pixels'], {}), '(max_dist_ref_pixels)\n', (24771, 24792), True, 'import skimage.morphology as morphology\n'), ((24813, 24854), 'skimage.morphology.binary_dilation', 'morphology.binary_dilation', (['im_binary', 'se'], {}), '(im_binary, se)\n', (24839, 24854), True, 'import skimage.morphology as morphology\n'), ((27553, 27571), 'shapely.geometry.LineString', 'LineString', (['coords'], {}), '(coords)\n', (27563, 27571), False, 'from shapely.geometry import LineString\n'), ((27839, 27882), 'numpy.append', 'np.append', (['x_points', 'contours_long[k][:, 0]'], {}), '(x_points, contours_long[k][:, 0])\n', (27848, 27882), True, 'import numpy as np\n'), ((27900, 27943), 'numpy.append', 'np.append', (['y_points', 'contours_long[k][:, 1]'], {}), '(y_points, contours_long[k][:, 1])\n', (27909, 27943), True, 'import numpy as np\n'), ((27976, 28006), 'numpy.array', 'np.array', (['[x_points, y_points]'], {}), '([x_points, y_points])\n', (27984, 28006), True, 'import numpy as np\n'), ((28215, 28235), 'numpy.where', 'np.where', (['cloud_mask'], {}), '(cloud_mask)\n', (28223, 28235), True, 'import numpy as np\n'), ((31234, 31253), 'numpy.arange', 'np.arange', (['(0)', '(13)', '(1)'], {}), '(0, 13, 1)\n', (31243, 31253), True, 'import numpy as np\n'), ((32411, 32420), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (32418, 32420), True, 'import matplotlib.pyplot as plt\n'), ((32570, 32582), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (32580, 32582), True, 'import matplotlib.pyplot as plt\n'), ((32634, 32663), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (32661, 32663), True, 'import matplotlib.pyplot as plt\n'), ((33678, 33694), 'numpy.isnan', 'np.isnan', (['im_RGB'], {}), '(im_RGB)\n', (33686, 33694), True, 'import numpy as np\n'), ((33739, 33757), 'numpy.isnan', 'np.isnan', (['im_class'], {}), '(im_class)\n', (33747, 33757), True, 'import numpy as np\n'), ((39763, 39782), 'numpy.arange', 'np.arange', (['(0)', '(13)', '(1)'], {}), '(0, 13, 1)\n', (39772, 39782), True, 'import numpy as np\n'), ((40602, 40658), 'numpy.logical_and', 'np.logical_and', (['(~im_labels[:, :, 0])', '(~im_labels[:, :, 1])'], {}), '(~im_labels[:, :, 0], ~im_labels[:, :, 1])\n', (40616, 40658), True, 'import numpy as np\n'), ((40826, 40835), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (40833, 40835), True, 'import matplotlib.pyplot as plt\n'), ((41021, 41033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (41031, 41033), True, 'import matplotlib.pyplot as plt\n'), ((41085, 41114), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (41112, 41114), True, 'import matplotlib.pyplot as plt\n'), ((41163, 41208), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(3)'], {'height_ratios': '[4, 1]'}), '(2, 3, height_ratios=[4, 1])\n', (41180, 41208), False, 'from matplotlib import gridspec\n'), ((41807, 41823), 'numpy.isnan', 'np.isnan', (['im_RGB'], {}), '(im_RGB)\n', (41815, 41823), True, 'import numpy as np\n'), ((41868, 41886), 'numpy.isnan', 'np.isnan', (['im_class'], {}), '(im_class)\n', (41876, 41886), True, 'import numpy as np\n'), ((45187, 45233), 'numpy.array', 'np.array', (['[[np.nan, np.nan], [np.nan, np.nan]]'], {}), '([[np.nan, np.nan], [np.nan, np.nan]])\n', (45195, 45233), True, 'import numpy as np\n'), ((45927, 45968), 'pylab.ginput', 'ginput', ([], {'n': '(1)', 'show_clicks': '(True)', 'timeout': '(-1)'}), '(n=1, show_clicks=True, timeout=-1)\n', (45933, 45968), False, 'from pylab import ginput\n'), ((48693, 48703), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (48701, 48703), True, 'import matplotlib.pyplot as plt\n'), ((48769, 48793), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (48791, 48793), True, 'import matplotlib.pyplot as plt\n'), ((5474, 5530), 'coastsat.SDS_tools.get_filenames', 'SDS_tools.get_filenames', (['filenames[i]', 'filepath', 'satname'], {}), '(filenames[i], filepath, satname)\n', (5497, 5530), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((5671, 5746), 'coastsat.SDS_preprocess.preprocess_single', 'SDS_preprocess.preprocess_single', (['fn', 'satname', "settings['cloud_mask_issue']"], {}), "(fn, satname, settings['cloud_mask_issue'])\n", (5703, 5746), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((6390, 6427), 'numpy.logical_xor', 'np.logical_xor', (['cloud_mask', 'im_nodata'], {}), '(cloud_mask, im_nodata)\n', (6404, 6427), True, 'import numpy as np\n'), ((11414, 11462), 'os.path.join', 'os.path.join', (['filepath', "(sitename + '_output.pkl')"], {}), "(filepath, sitename + '_output.pkl')\n", (11426, 11462), False, 'import os\n'), ((13815, 13854), 'numpy.expand_dims', 'np.expand_dims', (['im_std[im_bool]'], {'axis': '(1)'}), '(im_std[im_bool], axis=1)\n', (13829, 13854), True, 'import numpy as np\n'), ((18433, 18446), 'numpy.isnan', 'np.isnan', (['vec'], {}), '(vec)\n', (18441, 18446), True, 'import numpy as np\n'), ((20850, 20887), 'numpy.logical_and', 'np.logical_and', (['vec_buffer', 'vec_water'], {}), '(vec_buffer, vec_water)\n', (20864, 20887), True, 'import numpy as np\n'), ((20913, 20949), 'numpy.logical_and', 'np.logical_and', (['vec_buffer', 'vec_sand'], {}), '(vec_buffer, vec_sand)\n', (20927, 20949), True, 'import numpy as np\n'), ((21093, 21143), 'numpy.argmin', 'np.argmin', (['[int_sand.shape[0], int_water.shape[0]]'], {}), '([int_sand.shape[0], int_water.shape[0]])\n', (21102, 21143), True, 'import numpy as np\n'), ((23447, 23464), 'numpy.ones', 'np.ones', (['im_shape'], {}), '(im_shape)\n', (23454, 23464), True, 'import numpy as np\n'), ((23659, 23726), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['ref_sl', "settings['output_epsg']", 'image_epsg'], {}), "(ref_sl, settings['output_epsg'], image_epsg)\n", (23681, 23726), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((25526, 25547), 'numpy.isnan', 'np.isnan', (['contours[k]'], {}), '(contours[k])\n', (25534, 25547), True, 'import numpy as np\n'), ((25637, 25678), 'numpy.delete', 'np.delete', (['contours[k]', 'index_nan'], {'axis': '(0)'}), '(contours[k], index_nan, axis=0)\n', (25646, 25678), True, 'import numpy as np\n'), ((32277, 32323), 'numpy.array', 'np.array', (['[[np.nan, np.nan], [np.nan, np.nan]]'], {}), '([[np.nan, np.nan], [np.nan, np.nan]])\n', (32285, 32323), True, 'import numpy as np\n'), ((32940, 32963), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(3)', '(1)'], {}), '(3, 1)\n', (32957, 32963), False, 'from matplotlib import gridspec\n'), ((33274, 33297), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(3)'], {}), '(1, 3)\n', (33291, 33297), False, 'from matplotlib import gridspec\n'), ((36320, 36330), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (36328, 36330), True, 'import matplotlib.pyplot as plt\n'), ((36404, 36428), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (36426, 36428), True, 'import matplotlib.pyplot as plt\n'), ((37248, 37301), 'os.path.join', 'os.path.join', (['filepath', "(date + '_' + satname + '.jpg')"], {}), "(filepath, date + '_' + satname + '.jpg')\n", (37260, 37301), False, 'import os\n'), ((39478, 39522), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d-%H-%M-%S"""'], {}), "(date, '%Y-%m-%d-%H-%M-%S')\n", (39495, 39522), False, 'from datetime import datetime\n'), ((42991, 43010), 'numpy.nanmin', 'np.nanmin', (['int_sand'], {}), '(int_sand)\n', (43000, 43010), True, 'import numpy as np\n'), ((43220, 43237), 'numpy.nanmin', 'np.nanmin', (['int_ww'], {}), '(int_ww)\n', (43229, 43237), True, 'import numpy as np\n'), ((43468, 43488), 'numpy.nanmin', 'np.nanmin', (['int_water'], {}), '(int_water)\n', (43477, 43488), True, 'import numpy as np\n'), ((43720, 43740), 'numpy.nanmin', 'np.nanmin', (['int_other'], {}), '(int_other)\n', (43729, 43740), True, 'import numpy as np\n'), ((46373, 46420), 'skimage.measure.find_contours', 'measure.find_contours', (['im_mndwi_buffer', 't_mndwi'], {}), '(im_mndwi_buffer, t_mndwi)\n', (46394, 46420), True, 'import skimage.measure as measure\n'), ((49549, 49602), 'os.path.join', 'os.path.join', (['filepath', "(date + '_' + satname + '.jpg')"], {}), "(filepath, date + '_' + satname + '.jpg')\n", (49561, 49602), False, 'import os\n'), ((15840, 15865), 'numpy.ones', 'np.ones', (['cloud_mask.shape'], {}), '(cloud_mask.shape)\n', (15847, 15865), True, 'import numpy as np\n'), ((23832, 23852), 'numpy.round', 'np.round', (['ref_sl_pix'], {}), '(ref_sl_pix)\n', (23840, 23852), True, 'import numpy as np\n'), ((28460, 28506), 'coastsat.SDS_tools.convert_pix2world', 'SDS_tools.convert_pix2world', (['idx_cloud', 'georef'], {}), '(idx_cloud, georef)\n', (28487, 28506), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((31928, 31998), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['shoreline', "settings['output_epsg']", 'image_epsg'], {}), "(shoreline, settings['output_epsg'], image_epsg)\n", (31950, 31998), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((43012, 43031), 'numpy.nanmax', 'np.nanmax', (['int_sand'], {}), '(int_sand)\n', (43021, 43031), True, 'import numpy as np\n'), ((43239, 43256), 'numpy.nanmax', 'np.nanmax', (['int_ww'], {}), '(int_ww)\n', (43248, 43256), True, 'import numpy as np\n'), ((43490, 43510), 'numpy.nanmax', 'np.nanmax', (['int_water'], {}), '(int_water)\n', (43499, 43510), True, 'import numpy as np\n'), ((43742, 43762), 'numpy.nanmax', 'np.nanmax', (['int_other'], {}), '(int_other)\n', (43751, 43762), True, 'import numpy as np\n'), ((44943, 45013), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['shoreline', "settings['output_epsg']", 'image_epsg'], {}), "(shoreline, settings['output_epsg'], image_epsg)\n", (44965, 45013), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((46122, 46138), 'numpy.abs', 'np.abs', (['pt[0][0]'], {}), '(pt[0][0])\n', (46128, 46138), True, 'import numpy as np\n'), ((47107, 47153), 'numpy.array', 'np.array', (['[[np.nan, np.nan], [np.nan, np.nan]]'], {}), '([[np.nan, np.nan], [np.nan, np.nan]])\n', (47115, 47153), True, 'import numpy as np\n'), ((4503, 4576), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_Landsat_dark%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_Landsat_dark%s.pkl' % str_new)\n", (4515, 4576), False, 'import os\n'), ((4947, 5010), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_S2%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_S2%s.pkl' % str_new)\n", (4959, 5010), False, 'import os\n'), ((21181, 21251), 'numpy.random.choice', 'np.random.choice', (['int_sand.shape[0]', 'int_water.shape[0]'], {'replace': '(False)'}), '(int_sand.shape[0], int_water.shape[0], replace=False)\n', (21197, 21251), True, 'import numpy as np\n'), ((21302, 21372), 'numpy.random.choice', 'np.random.choice', (['int_water.shape[0]', 'int_sand.shape[0]'], {'replace': '(False)'}), '(int_water.shape[0], int_sand.shape[0], replace=False)\n', (21318, 21372), True, 'import numpy as np\n'), ((25583, 25604), 'numpy.isnan', 'np.isnan', (['contours[k]'], {}), '(contours[k])\n', (25591, 25604), True, 'import numpy as np\n'), ((28801, 28855), 'numpy.linalg.norm', 'np.linalg.norm', (['(shoreline[k, :] - coords_cloud)'], {'axis': '(1)'}), '(shoreline[k, :] - coords_cloud, axis=1)\n', (28815, 28855), True, 'import numpy as np\n'), ((42941, 42959), 'numpy.isnan', 'np.isnan', (['int_sand'], {}), '(int_sand)\n', (42949, 42959), True, 'import numpy as np\n'), ((43172, 43188), 'numpy.isnan', 'np.isnan', (['int_ww'], {}), '(int_ww)\n', (43180, 43188), True, 'import numpy as np\n'), ((43417, 43436), 'numpy.isnan', 'np.isnan', (['int_water'], {}), '(int_water)\n', (43425, 43436), True, 'import numpy as np\n'), ((43669, 43688), 'numpy.isnan', 'np.isnan', (['int_other'], {}), '(int_other)\n', (43677, 43688), True, 'import numpy as np\n'), ((49266, 49277), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (49275, 49277), True, 'import matplotlib.pyplot as plt\n'), ((49383, 49407), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (49405, 49407), True, 'import matplotlib.pyplot as plt\n'), ((4663, 4738), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_Landsat_bright%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_Landsat_bright%s.pkl' % str_new)\n", (4675, 4738), False, 'import os\n'), ((4790, 4858), 'os.path.join', 'os.path.join', (['filepath_models', "('NN_4classes_Landsat%s.pkl' % str_new)"], {}), "(filepath_models, 'NN_4classes_Landsat%s.pkl' % str_new)\n", (4802, 4858), False, 'import os\n'), ((8485, 8547), 'coastsat.SDS_tools.nd_index', 'SDS_tools.nd_index', (['im_ms[:, :, 4]', 'im_ms[:, :, 1]', 'cloud_mask'], {}), '(im_ms[:, :, 4], im_ms[:, :, 1], cloud_mask)\n', (8503, 8547), False, 'from coastsat import SDS_tools, SDS_preprocess\n'), ((9830, 9840), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (9838, 9840), True, 'import matplotlib.pyplot as plt\n'), ((36953, 36964), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (36962, 36964), True, 'import matplotlib.pyplot as plt\n'), ((37082, 37106), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (37104, 37106), True, 'import matplotlib.pyplot as plt\n'), ((46839, 46909), 'coastsat.SDS_tools.convert_epsg', 'SDS_tools.convert_epsg', (['shoreline', "settings['output_epsg']", 'image_epsg'], {}), "(shoreline, settings['output_epsg'], image_epsg)\n", (46861, 46909), False, 'from coastsat import SDS_tools, SDS_preprocess\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File name: tool_func.py
"""
Created on Thu Apr 23 17:39:40 2020
@author: Neo(<EMAIL>)
Some tool functions. The comment will be added when I am free.
"""
from myprogs.vsh.vsh_fit import rotgli_fit_4_table
from myprogs.catalog.pos_diff import radio_cat_diff_calc
from myprogs.catalog.read_icrf import read_icrf3, read_icrf2
from astropy.stats import sigma_clip, mad_std
from astropy import units as u
from astropy.table import Table
from sklearn.utils import resample
import numpy as np
# np.random.seed(28)
# ----------------------------- FUNCTIONS -----------------------------
def bootstrap_sample(tab, sam_size=500, with_replace=True):
"""Randomly select items of some number.
"""
N = len(tab)
ind = resample(np.arange(N), replace=with_replace, n_samples=sam_size)
new_tab = tab[ind]
return new_tab
def sample_clean(pos_oft, rho0=10, print_log=False):
"""Outlier elimination for VSH fitting.
"""
# Remove the outlier (consider the normalized separation)
N0 = len(pos_oft)
X0 = np.sqrt(np.log(N0) * 2)
X0 = 10000000
rho0 = 1000000
mask = ((pos_oft["nor_sep"] <= X0)
& (pos_oft["ang_sep"] < rho0))
# Table of a clean sample
pos_oft_cln = pos_oft[mask]
N1 = len(pos_oft_cln)
if print_log:
print("For a sample of %d sources, "
"the number of the outlier is smaller than 1 when X >= %.2f." % (N0, X0))
print("After elimination, there are %d sources in the clean sample." % N1)
print("The outlier rate is %.2f%%.\n" % ((N0 - N1) / N0 * 100))
return pos_oft_cln
def vsh_fit_for_pos(pos_oft, print_log=False):
"""Estimate the VSH coefficient.
"""
output = rotgli_fit_4_table(pos_oft, verbose=False)
# Keep rotation only and mas -> uas
pmt = output["pmt1"] * 1.e3
sig = output["sig1"] * 1.e3
# Calculate the total rotation
w, w_err = output["R"] * 1.e3, output["R_err"] * 1.e3
g, g_err = output["G"] * 1.e3, output["G_err"] * 1.e3
# Concatenate the results
pmt = np.hstack((pmt, [g, w]))
sig = np.hstack((sig, [g_err, w_err]))
# Print resultss
if print_log:
print("Estimates (%6d sources)\n"
"----------------------------------------------\n"
" Rotation [uas] \n"
" x y z \n"
"----------------------------------------------\n"
" %+4.0f +/- %3.0f %+4.0f +/- %3.0f %+4.0f +/- %3.0f\n"
"----------------------------------------------\n" %
(len(pos_oft),
pmt[3], sig[3], pmt[4], sig[4], pmt[5], sig[5]))
return pmt, sig
def calc_orient(pos_oft):
"""Calculate orientation angles based on positional difference
"""
N0 = len(pos_oft)
pos_oft_cln = sample_clean(pos_oft, rho0=10)
N1 = len(pos_oft_cln)
pmt, sig = vsh_fit_for_pos(pos_oft_cln)
return N0, N1, pmt, sig
def calc_orient_new(pos_oft):
"""Calculate orientation angles based on positional difference
"""
N0 = len(pos_oft)
pmt, sig = vsh_fit_for_pos(pos_oft)
return N0, pmt, sig
def orientation_estimate(pos_oft, opt, clean=False):
"""Estimate the orientation offsets
"""
if clean:
pos_oft = sample_clean(pos_oft, rho0=10)
pmti, sigi = vsh_fit_for_pos(pos_oft)
opti = np.hstack((pmti, sigi))
opt = np.vstack((opt, opti))
return opt
def orient_angle_sampling(pos_oft, iter_time=1000, sam_size=2000, with_replace=False):
"""Orientation angle sampling.
"""
# Array to store data in the form of
# [g1, g2, g3, r1, r2, r3, g, r,
# sig_r1, sig_r2, sig_r3, sig_g1, sig_g2, sig_g3, sig_g, sig_r]
opt = np.empty(dtype=np.float, shape=(16,)) # For all sources
opt1 = np.empty(dtype=np.float, shape=(16,)) # For a clean sample
for i in range(iter_time):
print(">>>>{:4d}th iteration:".format(i+1), end="")
pos_oft_sub = bootstrap_sample(pos_oft, sam_size, with_replace)
# Sample size is around 60% of the common sources, 3410 * 0.6 = 2046
# All sources
opt = orientation_estimate(pos_oft_sub, opt)
# Removing Outliers
opt1 = orientation_estimate(pos_oft_sub, opt1, True)
print(" done!")
return opt, opt1
def save_data(data, fname):
tab = Table(data, names=["G1", "G2", "G3", "R1", "R2", "R3",
"G", "R",
"G1_err", "G2_err", "G3_err",
"R1_err", "R2_err", "R3_err",
"G_err", "R_err"])
tab.write(fname, overwrite=True)
def vsh_fit_for_pm(apm_table):
"""Estimate VSH coefficients from apparent proper motion.
"""
output = rotgli_fit_4_table(apm_table, verbose=False)
# Keep rotation only
pmt = output["pmt1"]
sig = output["sig1"]
# Calculate the total rotation
w, w_err = output["R"], output["R_err"]
g, g_err = output["G"], output["G_err"]
# Concatenate the results
pmt = np.hstack((pmt[3:], [w], pmt[:3], [g]))
sig = np.hstack((sig[3:], [w_err], sig[:3], [g_err]))
return pmt, sig, output
def vsh_fit_for_pm2(apm_table):
"""Estimate VSH coefficients from apparent proper motion.
Only rotation vector is estimated.
"""
output = rotgli_fit_4_table(apm_table, fit_type="T", verbose=False)
# Keep rotation only
pmt = output["pmt1"]
sig = output["sig1"]
# Calculate the total rotation
w, w_err = output["R"], output["R_err"]
# Concatenate the results
pmt = np.hstack((pmt, [w]))
sig = np.hstack((sig, [w_err]))
return pmt, sig, output
def calc_mean_std(y):
"""Esimate robustly the mean and standard deviation
"""
filtered_data = sigma_clip(y, sigma=3, maxiters=1, stdfunc=mad_std)
ymean, ystd = np.mean(filtered_data), np.std(filtered_data)
return ymean, ystd
def random_walk(epoch, t_scale=5, sigma_var=2):
"""
"""
dt = epoch[1:] - epoch[:-1]
dt = np.concatenate(([0], dt))
# Positional offset
dra = np.zeros(len(epoch))
ddec = np.zeros(len(epoch))
dra[0] = np.random.random() - 0.5
ddec[0] = np.random.random() - 0.5
for i in range(len(epoch)):
# Exponential factor
exp_fac_i = np.exp(-dt[i]/t_scale)
# Gaussian factor
sigma_i = sigma_var * np.sqrt(1-np.exp(-2*dt[i]/t_scale))
g_ra_i = (np.random.random_sample()-0.5) * sigma_i
g_dec_i = (np.random.random_sample()-0.5) * sigma_i
dra[i] = exp_fac_i * dra[i-1] + g_ra_i
ddec[i] = exp_fac_i * ddec[i-1] + g_dec_i
return dra, ddec
# --------------------------------- END --------------------------------
|
[
"numpy.mean",
"numpy.random.random_sample",
"astropy.table.Table",
"numpy.hstack",
"astropy.stats.sigma_clip",
"numpy.random.random",
"numpy.log",
"myprogs.vsh.vsh_fit.rotgli_fit_4_table",
"numpy.exp",
"numpy.empty",
"numpy.vstack",
"numpy.concatenate",
"numpy.std",
"numpy.arange"
] |
[((1757, 1799), 'myprogs.vsh.vsh_fit.rotgli_fit_4_table', 'rotgli_fit_4_table', (['pos_oft'], {'verbose': '(False)'}), '(pos_oft, verbose=False)\n', (1775, 1799), False, 'from myprogs.vsh.vsh_fit import rotgli_fit_4_table\n'), ((2098, 2122), 'numpy.hstack', 'np.hstack', (['(pmt, [g, w])'], {}), '((pmt, [g, w]))\n', (2107, 2122), True, 'import numpy as np\n'), ((2133, 2165), 'numpy.hstack', 'np.hstack', (['(sig, [g_err, w_err])'], {}), '((sig, [g_err, w_err]))\n', (2142, 2165), True, 'import numpy as np\n'), ((3452, 3475), 'numpy.hstack', 'np.hstack', (['(pmti, sigi)'], {}), '((pmti, sigi))\n', (3461, 3475), True, 'import numpy as np\n'), ((3486, 3508), 'numpy.vstack', 'np.vstack', (['(opt, opti)'], {}), '((opt, opti))\n', (3495, 3508), True, 'import numpy as np\n'), ((3814, 3851), 'numpy.empty', 'np.empty', ([], {'dtype': 'np.float', 'shape': '(16,)'}), '(dtype=np.float, shape=(16,))\n', (3822, 3851), True, 'import numpy as np\n'), ((3882, 3919), 'numpy.empty', 'np.empty', ([], {'dtype': 'np.float', 'shape': '(16,)'}), '(dtype=np.float, shape=(16,))\n', (3890, 3919), True, 'import numpy as np\n'), ((4435, 4582), 'astropy.table.Table', 'Table', (['data'], {'names': "['G1', 'G2', 'G3', 'R1', 'R2', 'R3', 'G', 'R', 'G1_err', 'G2_err', 'G3_err',\n 'R1_err', 'R2_err', 'R3_err', 'G_err', 'R_err']"}), "(data, names=['G1', 'G2', 'G3', 'R1', 'R2', 'R3', 'G', 'R', 'G1_err',\n 'G2_err', 'G3_err', 'R1_err', 'R2_err', 'R3_err', 'G_err', 'R_err'])\n", (4440, 4582), False, 'from astropy.table import Table\n'), ((4849, 4893), 'myprogs.vsh.vsh_fit.rotgli_fit_4_table', 'rotgli_fit_4_table', (['apm_table'], {'verbose': '(False)'}), '(apm_table, verbose=False)\n', (4867, 4893), False, 'from myprogs.vsh.vsh_fit import rotgli_fit_4_table\n'), ((5135, 5174), 'numpy.hstack', 'np.hstack', (['(pmt[3:], [w], pmt[:3], [g])'], {}), '((pmt[3:], [w], pmt[:3], [g]))\n', (5144, 5174), True, 'import numpy as np\n'), ((5185, 5232), 'numpy.hstack', 'np.hstack', (['(sig[3:], [w_err], sig[:3], [g_err])'], {}), '((sig[3:], [w_err], sig[:3], [g_err]))\n', (5194, 5232), True, 'import numpy as np\n'), ((5420, 5478), 'myprogs.vsh.vsh_fit.rotgli_fit_4_table', 'rotgli_fit_4_table', (['apm_table'], {'fit_type': '"""T"""', 'verbose': '(False)'}), "(apm_table, fit_type='T', verbose=False)\n", (5438, 5478), False, 'from myprogs.vsh.vsh_fit import rotgli_fit_4_table\n'), ((5676, 5697), 'numpy.hstack', 'np.hstack', (['(pmt, [w])'], {}), '((pmt, [w]))\n', (5685, 5697), True, 'import numpy as np\n'), ((5708, 5733), 'numpy.hstack', 'np.hstack', (['(sig, [w_err])'], {}), '((sig, [w_err]))\n', (5717, 5733), True, 'import numpy as np\n'), ((5872, 5923), 'astropy.stats.sigma_clip', 'sigma_clip', (['y'], {'sigma': '(3)', 'maxiters': '(1)', 'stdfunc': 'mad_std'}), '(y, sigma=3, maxiters=1, stdfunc=mad_std)\n', (5882, 5923), False, 'from astropy.stats import sigma_clip, mad_std\n'), ((6120, 6145), 'numpy.concatenate', 'np.concatenate', (['([0], dt)'], {}), '(([0], dt))\n', (6134, 6145), True, 'import numpy as np\n'), ((785, 797), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (794, 797), True, 'import numpy as np\n'), ((5942, 5964), 'numpy.mean', 'np.mean', (['filtered_data'], {}), '(filtered_data)\n', (5949, 5964), True, 'import numpy as np\n'), ((5966, 5987), 'numpy.std', 'np.std', (['filtered_data'], {}), '(filtered_data)\n', (5972, 5987), True, 'import numpy as np\n'), ((6248, 6266), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6264, 6266), True, 'import numpy as np\n'), ((6287, 6305), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6303, 6305), True, 'import numpy as np\n'), ((6394, 6418), 'numpy.exp', 'np.exp', (['(-dt[i] / t_scale)'], {}), '(-dt[i] / t_scale)\n', (6400, 6418), True, 'import numpy as np\n'), ((1093, 1103), 'numpy.log', 'np.log', (['N0'], {}), '(N0)\n', (1099, 1103), True, 'import numpy as np\n'), ((6528, 6553), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6551, 6553), True, 'import numpy as np\n'), ((6588, 6613), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6611, 6613), True, 'import numpy as np\n'), ((6484, 6512), 'numpy.exp', 'np.exp', (['(-2 * dt[i] / t_scale)'], {}), '(-2 * dt[i] / t_scale)\n', (6490, 6512), True, 'import numpy as np\n')]
|
import numpy as np
from opytimizer.optimizers.swarm import sso
from opytimizer.spaces import search
def test_sso_params():
params = {
'C_w': 0.1,
'C_p': 0.4,
'C_g': 0.9
}
new_sso = sso.SSO(params=params)
assert new_sso.C_w == 0.1
assert new_sso.C_p == 0.4
assert new_sso.C_g == 0.9
def test_sso_params_setter():
new_sso = sso.SSO()
try:
new_sso.C_w = 'a'
except:
new_sso.C_w = 0.1
try:
new_sso.C_w = -1
except:
new_sso.C_w = 0.1
assert new_sso.C_w == 0.1
try:
new_sso.C_p = 'b'
except:
new_sso.C_p = 0.4
try:
new_sso.C_p = 0.05
except:
new_sso.C_p = 0.4
assert new_sso.C_p == 0.4
try:
new_sso.C_g = 'c'
except:
new_sso.C_g = 0.9
try:
new_sso.C_g = 0.35
except:
new_sso.C_g = 0.9
assert new_sso.C_g == 0.9
def test_sso_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
try:
new_sso.local_position = 1
except:
new_sso.local_position = np.array([1])
assert new_sso.local_position == np.array([1])
def test_sso_evaluate():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.evaluate(search_space, square)
def test_sso_update():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.update(search_space)
|
[
"numpy.sum",
"opytimizer.spaces.search.SearchSpace",
"numpy.array",
"opytimizer.optimizers.swarm.sso.SSO"
] |
[((221, 243), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {'params': 'params'}), '(params=params)\n', (228, 243), False, 'from opytimizer.optimizers.swarm import sso\n'), ((383, 392), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (390, 392), False, 'from opytimizer.optimizers.swarm import sso\n'), ((976, 1068), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(10)', 'n_variables': '(2)', 'lower_bound': '[0, 0]', 'upper_bound': '[10, 10]'}), '(n_agents=10, n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n', (994, 1068), False, 'from opytimizer.spaces import search\n'), ((1118, 1127), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (1125, 1127), False, 'from opytimizer.optimizers.swarm import sso\n'), ((1412, 1504), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(10)', 'n_variables': '(2)', 'lower_bound': '[0, 0]', 'upper_bound': '[10, 10]'}), '(n_agents=10, n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n', (1430, 1504), False, 'from opytimizer.spaces import search\n'), ((1554, 1563), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (1561, 1563), False, 'from opytimizer.optimizers.swarm import sso\n'), ((1686, 1778), 'opytimizer.spaces.search.SearchSpace', 'search.SearchSpace', ([], {'n_agents': '(10)', 'n_variables': '(2)', 'lower_bound': '[0, 0]', 'upper_bound': '[10, 10]'}), '(n_agents=10, n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n', (1704, 1778), False, 'from opytimizer.spaces import search\n'), ((1828, 1837), 'opytimizer.optimizers.swarm.sso.SSO', 'sso.SSO', ([], {}), '()\n', (1835, 1837), False, 'from opytimizer.optimizers.swarm import sso\n'), ((1304, 1317), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1312, 1317), True, 'import numpy as np\n'), ((1379, 1393), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (1385, 1393), True, 'import numpy as np\n'), ((1252, 1265), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1260, 1265), True, 'import numpy as np\n')]
|
import numpy as np
from agents import Agent
from connectboard import ConnectBoard
import time
import math
class AlphaBeta(Agent):
"""Agent that implements minimax with alpha-beta pruning to select its next move."""
def get_move(self, game_board: np.ndarray) -> np.ndarray:
"""Recursively runs minimax to determine the best move to make.
Recursively runs minimax algorithm with alpha-beta pruning starting at the current game state.
This player is assumed to be maximizing.
Args:
game_board (np.ndarray): current board with a 1 for current player, -1 for
opponent, and 0 for open space
Returns:
An ndarray representing the move, with a 1 in the row,col of the new
piece, and all other entries zero.
"""
start = time.time()
move_val, move = self.alpha_beta(game_board, depth=5)
end = time.time()
print(
"Found optimal move with value: {}, in {}s".format(move_val, (end - start))
)
return move
def alpha_beta(
self,
game_board: np.ndarray,
alpha: float = -np.inf,
beta: float = np.inf,
depth: int = np.inf,
max_player: bool = True,
) -> (int, np.ndarray):
"""Perform minimax with alpha-beta pruning to determine best move to take from current game_board.
Performs minimax starting at the current position and ending after looking depth moves ahead, or when all leaf
nodes are end_game states.
TODO: If multiple winning moves, it picks the first one. Change so agent chooses the quickest win.
Args:
game_board (np.ndarray): 2D array representing the current pieces as 1 or -1 if they
are for the maximizing or minimizing player respectively.
alpha (float, optional): The best score achieved by the maximizing player. Defaults to -np.inf,
the worst possible value for the maximizing player.
beta (float, optional): The best score achieved by the minimizing player. Defaults to np.inf.
depth (int, optional): The number of layers to check using minimax. Defualt is np.inf which will
check all layers.
max_player (bool, optional): Indicates whether the turn at the root node belongs to the minimizing or
maximizing player. Default is True, meaning the maximizing player is next to move.
Returns:
move_val (int): The optimal value of this node.
move (np.ndarray): A 6x7 numpy array with a 1 in the spot of the move to take from the current
node that will result in the optimal value.
"""
legal_moves = ConnectBoard.get_legal_moves(game_board)
if legal_moves.size == 0 or depth == 0:
# Leaf node, perform static value checking.
return self.get_static_value(game_board), None
next_states = (
game_board + legal_moves if max_player else game_board - legal_moves
)
best_move = legal_moves[0]
while next_states.size > 0:
best_idx = self.get_most_valuable(next_states, max_player)
state = next_states[best_idx]
next_states = np.delete(next_states, best_idx, 0)
# Only recurse farther if the current state is not an end game state
if math.isinf(self.get_static_value(state)):
val = self.get_static_value(state)
else:
val, _ = self.alpha_beta(
state,
alpha=alpha,
beta=beta,
depth=depth - 1,
max_player=not max_player,
)
if max_player and val > alpha:
alpha = val
best_move = legal_moves[best_idx]
elif not max_player and val < beta:
best_move = legal_moves[best_idx]
beta = val
legal_moves = np.delete(legal_moves, best_idx, 0)
if beta < alpha:
break
if max_player:
return alpha, best_move
else:
return beta, best_move
def get_most_valuable(self, states: np.ndarray, max_player: bool) -> int:
"""Return the index of next_states corresponding to the best static value for current player.
Args:
states (np.ndarray): Numpy array of 6x7 board states. Maximizing player is 1,minimizing
player is -1.
max_player (bool): If max_player is true, return the index with maximum static value,
if false, return the index that minimizes static value.
"""
idx = 0
best_val = self.get_static_value(states[0])
for i in range(1, states.shape[0]):
val = self.get_static_value(states[i])
if max_player and val > best_val:
idx = i
best_val = val
elif val < best_val:
idx = i
best_val = val
return idx
def get_static_value(self, game_board: np.ndarray) -> float:
"""Returns the static value of game_board.
For each possible way to get four in a row, check if the line contains only 1 or -1.
If that row contains pieces from only one player, add the sum of their pieces to value.
If either player has 4 in a row, return +/- inf.
Args:
game_board (np.ndarray): The current minimax board with maximing player as 1
and minimizing player as -1.
Returns:
value (float): The static value of the current position.
"""
windows = game_board.flatten()[ConnectBoard.WINDOW_INDICES].reshape(-1, 4)
uncontested_windows = windows[windows.min(axis=1) != -windows.max(axis=1)]
if uncontested_windows.size == 0:
return 0
window_sums = uncontested_windows.sum(axis=1)
if window_sums.max() == 4:
return np.inf
elif window_sums.min() == -4:
return -np.inf
else:
return (abs(window_sums) * window_sums ** 2 / window_sums).sum()
def handle_invalid_move(self) -> None:
# Throw exception during development
# TODO: Add some nice handler later on
raise Exception
|
[
"numpy.delete",
"time.time",
"connectboard.ConnectBoard.get_legal_moves"
] |
[((833, 844), 'time.time', 'time.time', ([], {}), '()\n', (842, 844), False, 'import time\n'), ((921, 932), 'time.time', 'time.time', ([], {}), '()\n', (930, 932), False, 'import time\n'), ((2759, 2799), 'connectboard.ConnectBoard.get_legal_moves', 'ConnectBoard.get_legal_moves', (['game_board'], {}), '(game_board)\n', (2787, 2799), False, 'from connectboard import ConnectBoard\n'), ((3291, 3326), 'numpy.delete', 'np.delete', (['next_states', 'best_idx', '(0)'], {}), '(next_states, best_idx, 0)\n', (3300, 3326), True, 'import numpy as np\n'), ((4044, 4079), 'numpy.delete', 'np.delete', (['legal_moves', 'best_idx', '(0)'], {}), '(legal_moves, best_idx, 0)\n', (4053, 4079), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import netomaton as ntm
import numpy as np
if __name__ == '__main__':
# NKS page 443 - Rule 122R
network = ntm.topology.cellular_automaton(n=100)
# carefully chosen initial conditions
previous_state = [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1,
0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,
1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 1, 1]
initial_conditions = [1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,
1, 1, 1, 0, 1, 1, 1]
trajectory = ntm.evolve(initial_conditions=initial_conditions, network=network,
activity_rule=ntm.ReversibleRule(ntm.rules.nks_ca_rule(122)),
past_conditions=[previous_state], timesteps=1002)
timestep = []
average_node_entropies = []
activities = ntm.get_activities_over_time_as_list(trajectory)
for i, c in enumerate(activities):
timestep.append(i)
bit_string = ''.join([str(x) for x in c])
average_node_entropies.append(ntm.average_node_entropy(activities[:i+1]))
print("%s, %s" % (i, average_node_entropies[-1]))
plt.subplot(3, 1, (1, 2))
plt.title("Avg. Node (Shannon) Entropy")
plt.gca().set_xlim(0, 1002)
plt.gca().axes.xaxis.set_ticks([])
plt.plot(timestep, average_node_entropies)
plt.subplot(3, 1, 3)
plt.gca().axes.yaxis.set_ticks([])
ntm.plot_grid(np.array(activities).T.tolist())
|
[
"netomaton.rules.nks_ca_rule",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"numpy.array",
"netomaton.average_node_entropy",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"netomaton.get_activities_over_time_as_list",
"netomaton.topology.cellular_automaton"
] |
[((150, 188), 'netomaton.topology.cellular_automaton', 'ntm.topology.cellular_automaton', ([], {'n': '(100)'}), '(n=100)\n', (181, 188), True, 'import netomaton as ntm\n'), ((1346, 1394), 'netomaton.get_activities_over_time_as_list', 'ntm.get_activities_over_time_as_list', (['trajectory'], {}), '(trajectory)\n', (1382, 1394), True, 'import netomaton as ntm\n'), ((1656, 1681), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1, 2)'], {}), '(3, 1, (1, 2))\n', (1667, 1681), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1726), 'matplotlib.pyplot.title', 'plt.title', (['"""Avg. Node (Shannon) Entropy"""'], {}), "('Avg. Node (Shannon) Entropy')\n", (1695, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1844), 'matplotlib.pyplot.plot', 'plt.plot', (['timestep', 'average_node_entropies'], {}), '(timestep, average_node_entropies)\n', (1810, 1844), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1870), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (1861, 1870), True, 'import matplotlib.pyplot as plt\n'), ((1549, 1593), 'netomaton.average_node_entropy', 'ntm.average_node_entropy', (['activities[:i + 1]'], {}), '(activities[:i + 1])\n', (1573, 1593), True, 'import netomaton as ntm\n'), ((1731, 1740), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1738, 1740), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1196), 'netomaton.rules.nks_ca_rule', 'ntm.rules.nks_ca_rule', (['(122)'], {}), '(122)\n', (1191, 1196), True, 'import netomaton as ntm\n'), ((1763, 1772), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1770, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1884), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1882, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1948), 'numpy.array', 'np.array', (['activities'], {}), '(activities)\n', (1936, 1948), True, 'import numpy as np\n')]
|
import numpy as np
__all__ = ["plot_spectrum_datasets_off_regions", "plot_contour_line"]
def plot_spectrum_datasets_off_regions(datasets, ax=None):
"""Plot spectrum datasets of regions.
Parameters
----------
datasets : list of `SpectrumDatasetOnOff`
List of spectrum on-off datasets
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
ax = plt.gca(projection=datasets[0].counts_off.geom.wcs) or ax
color_cycle = plt.rcParams["axes.prop_cycle"]
colors = color_cycle.by_key()["color"]
handles = []
for color, dataset in zip(colors, datasets):
kwargs = {"edgecolor": color, "facecolor": "none"}
dataset.counts_off.plot_region(ax=ax, **kwargs)
# create proxy artist for the custom legend
handle = mpatches.Patch(label=dataset.name, **kwargs)
handles.append(handle)
plt.legend(handles=handles)
def plot_contour_line(ax, x, y, **kwargs):
"""Plot smooth curve from contour points"""
from scipy.interpolate import CubicSpline
# close countour
xf = np.append(x, x[0])
yf = np.append(y, y[0])
# curve parametrization must be strictly increasing
# so we use the cumulative distance of each point from the first one
dist = np.sqrt(np.diff(xf) ** 2.0 + np.diff(yf) ** 2.0)
dist = [0] + list(dist)
t = np.cumsum(dist)
ts = np.linspace(0, t[-1], 50)
# 1D cubic spline interpolation
cs = CubicSpline(t, np.c_[xf, yf], bc_type="periodic")
out = cs(ts)
# plot
if "marker" in kwargs.keys():
marker = kwargs.pop("marker")
else:
marker = "+"
if "color" in kwargs.keys():
color = kwargs.pop("color")
else:
color = "b"
ax.plot(out[:, 0], out[:, 1], "-", color=color, **kwargs)
ax.plot(xf, yf, linestyle='', marker=marker, color=color)
|
[
"scipy.interpolate.CubicSpline",
"matplotlib.pyplot.gca",
"numpy.diff",
"numpy.append",
"numpy.linspace",
"matplotlib.patches.Patch",
"numpy.cumsum",
"matplotlib.pyplot.legend"
] |
[((892, 919), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'handles'}), '(handles=handles)\n', (902, 919), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1108), 'numpy.append', 'np.append', (['x', 'x[0]'], {}), '(x, x[0])\n', (1099, 1108), True, 'import numpy as np\n'), ((1118, 1136), 'numpy.append', 'np.append', (['y', 'y[0]'], {}), '(y, y[0])\n', (1127, 1136), True, 'import numpy as np\n'), ((1363, 1378), 'numpy.cumsum', 'np.cumsum', (['dist'], {}), '(dist)\n', (1372, 1378), True, 'import numpy as np\n'), ((1388, 1413), 'numpy.linspace', 'np.linspace', (['(0)', 't[-1]', '(50)'], {}), '(0, t[-1], 50)\n', (1399, 1413), True, 'import numpy as np\n'), ((1460, 1509), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['t', 'np.c_[xf, yf]'], {'bc_type': '"""periodic"""'}), "(t, np.c_[xf, yf], bc_type='periodic')\n", (1471, 1509), False, 'from scipy.interpolate import CubicSpline\n'), ((407, 458), 'matplotlib.pyplot.gca', 'plt.gca', ([], {'projection': 'datasets[0].counts_off.geom.wcs'}), '(projection=datasets[0].counts_off.geom.wcs)\n', (414, 458), True, 'import matplotlib.pyplot as plt\n'), ((811, 855), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'label': 'dataset.name'}), '(label=dataset.name, **kwargs)\n', (825, 855), True, 'import matplotlib.patches as mpatches\n'), ((1286, 1297), 'numpy.diff', 'np.diff', (['xf'], {}), '(xf)\n', (1293, 1297), True, 'import numpy as np\n'), ((1307, 1318), 'numpy.diff', 'np.diff', (['yf'], {}), '(yf)\n', (1314, 1318), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 15:05:10 2019
The Simple Linear Regression model
@author: Dr. Dr. <NAME>
@web : https://dannyvanpoucke.be
"""
import pandas as pd
import numpy as np
from TModelClass import TModelClass
from TModelResults import TModelResults
from Bootstrap import TBootstrap
from TModelQualityData import TModelQualityData
from sklearn.pipeline import Pipeline
class TLinearModel(TModelClass):
"""
Child class representing the linear regression model
"""
def __init__(self,name,Target, Feature: pd.DataFrame,
Target_test, Feature_test: pd.DataFrame,
Pipeline: Pipeline
):
"""
Constructor of the TLinearModel class.
It requires:
- name : the name of the object instance
- Feature : a pandas dataframe containing the features
- Target : the training target data
- Target_test: the test target data
- Feature_test: the untransformed features for testing.
- Pipeline : a pipeline generated by the PipelineFactory
It sets the following properties
- pipeline : a pipeline object containing the preprocessing transformations (excluding the fitter function)
- model : the fitter function to be used (should be an sklearn function with "fit" method)
- feature_tf: the transformed features as obtained by the pipeline
"""
from sklearn.linear_model import LinearRegression
super().__init__(name,Target, Feature,Target_test, Feature_test)
self.nameModel='Linear Model'
self.name=name
print("Initialising the child class:",self.nameModel)
#create a pipeline (can be extended to contain more functions, p67)
self.pipeline=Pipeline
#self.pipeline = Pipeline([
# #('std_scaler', StandardScaler()), #scaling to be centered on 0, with unit variance...since the values are quite different, this will help things
# ('std_scaler', StandardScaler(with_mean=False, with_std=False)),
#])
self.feature_tf = self.pipeline.fit_transform(Feature) #this is a numpy array...
self.model = LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None) #default values..explicitly set
#def fit(self):
# """ test to check if it is possible to manually set the coef_ and intercept_
# --> conclusion: it seems to work, but only for fit & predict,
# not for cross_val_score...which just does new fittings
# Class-method wrapping the fit-method of the sklearn model.
#
# - Target : a pandas dataframe with the Target data belonging to the
# Features provided upon initialisation.
# """
# import numpy as np
# self.model.intercept_ = 0
# self.model.coef_= np.array([-0,0,6.1]).reshape((1,-1))
# self.setCoefficients()
# print("FIT COEFF=",self.model.coef_," INTERCEPT=",self.model.intercept_)
# print("did some fitting, Parent-style:",type(self.model).__name__)
def fitSanityCheck(self)->int:
"""
Class method which should cover/deal with failures of sklearn.
For some reason, sklearn LinearRegression randomly fails on small datasets.
This failure gives rise to huge coefficents. Hoever, just shuffling the
data seems to resolve the issue.
This function returns the number of shuffles needed to regain sanity.
"""
import sys
#first find out if we have "infinite" coefficients
cnt=0
insane=(abs(sum(self.model.coef_)/len(self.model.coef_))>1.0E9) #larger than 1 billion should be a clear sign
while (insane and (cnt<100)): #try up to 100x ... if non are OK, then it will never be fixed
cnt+=1
#then we shuffle the features & targets...
#1) recombine in 1 pandas dataframe
combo=pd.concat([self.feature,self.target], axis=1, sort=False, join='outer')
#2) shuffle: https://stackoverflow.com/questions/29576430/shuffle-dataframe-rows
combo=combo.sample(frac=1).reset_index(drop=True)
#3) re-store in target/feature/feature_tf
self.target=combo[combo.columns[-1]].copy()
self.feature=combo.drop(combo.columns[-1],axis=1)
self.feature_tf = self.pipeline.fit_transform(self.feature) #this is a numpy array...
#4) finally refit
self.fit()
insane=(abs(sum(abs(self.model.coef_))/len(self.model.coef_))>self.sanityThresshold) #normaly values of 1E14 are reached, but on occasion as low as 1E08 was found
if (cnt>0):#update the coefficients
self.setCoefficients()
if insane:
print("EPIC FAIL, 100 attempts at sanity failed in the ",self.name,". Terminating this sick job!")
sys.exit()
return cnt
#serial version
def setAverageCoefficients(self,EnsembleData: TModelResults, setCI: bool):
"""
Use the ensemble data to create an "average" model, and set the "coefficients"
in the current model. This should be performed in each model separately
"""
#import time
# 1. Calculate the average coefficients
# 1.1. transform them to arrays
#start = time.perf_counter_ns()
#print("3.1) Average Coefficients : AVG")
intercept=np.zeros(EnsembleData.NData)
coef=np.zeros((EnsembleData.NData,EnsembleData.modelCoef[0]['coef_'][1].shape[1]))
for i in range(EnsembleData.NData):
mcf=EnsembleData.modelCoef[i]
intercept[i]=np.asarray(mcf['intercept_'][1]).ravel()
coef[i,:]=np.asarray(mcf['coef_'][1]).ravel()
mean_intercept=np.mean(intercept,axis=0)#axis is the varying direction, so 0 means we calculate the average of a column by varying the row
mean_coef=np.mean(coef,axis=0)
# 2. Set the model coefficients to these averaged values
self.model.intercept_=mean_intercept
self.model.coef_=mean_coef
self.isAverage = True
self.hasCI=False
if setCI:
#end = time.perf_counter_ns()
#print("3.2.a) Average Coefficients : CI Intercept ",(end-start)/10E9)
# 3. Calculate Confidence Interval using Bootstrapper tech?
# & 4. Store the CI data
## For the intercept
boot=TBootstrap(data=intercept,Func=np.mean)
#end = time.perf_counter_ns()
#print("3.2.b) NPboot",(end-start)/1E9)
boot.NPbootstrap(n_iter=2000, Jackknife=True)
#end = time.perf_counter_ns()
#print("3.2.c) Con Int",(end-start)/1E9)
avgm, avgp = boot.ConfidenceInterval(CItype="BCa",alpha=0.05,n_samples=2000)#95%confidence interval
self.CI["intercept_lo"]=avgm
self.CI["intercept_hi"]=avgp
## For the coefficients
avgml=list()
avgpl=list()
for col in range(EnsembleData.modelCoef[0]['coef_'][1].shape[1]):
#end = time.perf_counter_ns()
#print("3.2) Average Coefficients : CI Coef ",col," ",(end-start)/1E9)
boot=TBootstrap(data=coef[:,col],Func=np.mean)
boot.NPbootstrap(n_iter=2000, Jackknife=True)
avgm, avgp = boot.ConfidenceInterval(CItype="BCa",alpha=0.05)#95%confidence interval
avgml.append(avgm)
avgpl.append(avgp)
self.CI["coef_lo"]=avgml
self.CI["coef_hi"]=avgpl
self.hasCI = True
#store the resulting coefficients in our wrapper tracker...and we are done
self.setCoefficients()
self.Quality=TModelQualityData(EData=EnsembleData)
def printAverageCoefficients(self, File: str=None):
"""
Print a block of information to a file, containing the averaged coefficients.
parameters:
- self:
- File: string containing a filename, if None standard output is used. Default=None
"""
if File is None:
print("======= THE AVERAGED MODEL ==============")
print(" Model : ",self.name)
print(self.Quality.QualitiesText())
if self.hasCI:
print("Intercept : ",self.model.intercept_," and CI=[",self.CI["intercept_lo"]," ; ",self.CI["intercept_hi"],"]")
for col in range(len(self.model.coef_)):
print("coef ",col," : ",self.model.coef_[col]," and CI=[",self.CI["coef_lo"][col]," ; ",self.CI["coef_hi"][col],"]")
else:
print("Intercept : ",self.model.intercept_)
for col in range(len(self.model.coef_)):
print("coef ",col," : ",self.model.coef_[col])
print("====================================\n\n")
else:
foo=open(File,"a+",)
foo.write("======= THE AVERAGED MODEL ==============\n")
line=" Model : "+self.name+"\n"
foo.write(line)
foo.write(self.Quality.QualitiesText())
if self.hasCI:
line="Intercept : "+str(self.model.intercept_)+" and CI=["+str(self.CI["intercept_lo"])+" ; "+str(self.CI["intercept_hi"])+"] \n"
foo.write(line)
for col in range(len(self.model.coef_)):
line="coef "+str(col)+" : "+str(self.model.coef_[col])+" and CI=["+str(self.CI["coef_lo"][col])+" ; "+str(self.CI["coef_hi"][col])+"] \n"
foo.write(line)
else:
line="Intercept : "+str(self.model.intercept_)+"\n"
foo.write(line)
for col in range(len(self.model.coef_)):
line="coef "+str(col)+" : "+str(self.model.coef_[col])+"\n"
foo.write(line)
foo.write("====================================\n\n")
foo.close()
def setCoefficients(self):
"""
Class-method collecting and storing the fitting coefficients for a
linear regression in the object
"""
import numpy as np
super().setCoefficients()
self.modelcoef['header_coef']=[self.coefindex,"The coefficients for each target (one per row) are given by:"]
self.modelcoef['coef_']=[self.coefindex+1,np.array([self.model.coef_])]
self.modelcoef['header_intercept']=[self.coefindex+2,"The intercepts for each target (one per row) are given by:"]
self.modelcoef['intercept_']=[self.coefindex+3,np.array([self.model.intercept_])]
self.coefindex+=4
|
[
"numpy.mean",
"Bootstrap.TBootstrap",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"sys.exit",
"pandas.concat",
"sklearn.linear_model.LinearRegression",
"TModelQualityData.TModelQualityData"
] |
[((2265, 2344), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)', 'normalize': '(False)', 'copy_X': '(True)', 'n_jobs': 'None'}), '(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None)\n', (2281, 2344), False, 'from sklearn.linear_model import LinearRegression\n'), ((5620, 5648), 'numpy.zeros', 'np.zeros', (['EnsembleData.NData'], {}), '(EnsembleData.NData)\n', (5628, 5648), True, 'import numpy as np\n'), ((5662, 5740), 'numpy.zeros', 'np.zeros', (["(EnsembleData.NData, EnsembleData.modelCoef[0]['coef_'][1].shape[1])"], {}), "((EnsembleData.NData, EnsembleData.modelCoef[0]['coef_'][1].shape[1]))\n", (5670, 5740), True, 'import numpy as np\n'), ((5986, 6012), 'numpy.mean', 'np.mean', (['intercept'], {'axis': '(0)'}), '(intercept, axis=0)\n', (5993, 6012), True, 'import numpy as np\n'), ((6128, 6149), 'numpy.mean', 'np.mean', (['coef'], {'axis': '(0)'}), '(coef, axis=0)\n', (6135, 6149), True, 'import numpy as np\n'), ((7995, 8032), 'TModelQualityData.TModelQualityData', 'TModelQualityData', ([], {'EData': 'EnsembleData'}), '(EData=EnsembleData)\n', (8012, 8032), False, 'from TModelQualityData import TModelQualityData\n'), ((4088, 4160), 'pandas.concat', 'pd.concat', (['[self.feature, self.target]'], {'axis': '(1)', 'sort': '(False)', 'join': '"""outer"""'}), "([self.feature, self.target], axis=1, sort=False, join='outer')\n", (4097, 4160), True, 'import pandas as pd\n'), ((5060, 5070), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5068, 5070), False, 'import sys\n'), ((6652, 6692), 'Bootstrap.TBootstrap', 'TBootstrap', ([], {'data': 'intercept', 'Func': 'np.mean'}), '(data=intercept, Func=np.mean)\n', (6662, 6692), False, 'from Bootstrap import TBootstrap\n'), ((10681, 10709), 'numpy.array', 'np.array', (['[self.model.coef_]'], {}), '([self.model.coef_])\n', (10689, 10709), True, 'import numpy as np\n'), ((10889, 10922), 'numpy.array', 'np.array', (['[self.model.intercept_]'], {}), '([self.model.intercept_])\n', (10897, 10922), True, 'import numpy as np\n'), ((7451, 7494), 'Bootstrap.TBootstrap', 'TBootstrap', ([], {'data': 'coef[:, col]', 'Func': 'np.mean'}), '(data=coef[:, col], Func=np.mean)\n', (7461, 7494), False, 'from Bootstrap import TBootstrap\n'), ((5851, 5883), 'numpy.asarray', 'np.asarray', (["mcf['intercept_'][1]"], {}), "(mcf['intercept_'][1])\n", (5861, 5883), True, 'import numpy as np\n'), ((5914, 5941), 'numpy.asarray', 'np.asarray', (["mcf['coef_'][1]"], {}), "(mcf['coef_'][1])\n", (5924, 5941), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, division, print_function
from abc import abstractmethod, abstractproperty
import numpy as np
from keras import __version__ as __keras_version__
from keras import backend as K
from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed
from keras.models import Model
from keras.regularizers import l2
from energyflow.archs.archbase import NNBase, _get_act_layer
from energyflow.utils import iter_or_rep
__all__ = [
# input constructor functions
#'construct_efn_input', 'construct_pfn_input',
# weight mask constructor functions
#'construct_efn_weight_mask', 'construct_pfn_weight_mask',
# network consstructor functions
#'construct_distributed_dense', 'construct_latent', 'construct_dense',
# full model classes
'EFN', 'PFN'
]
###############################################################################
# Keras 2.2.5 fixes bug in 2.2.4 that affects our usage of the Dot layer
###############################################################################
keras_version_tuple = tuple(map(int, __keras_version__.split('.')))
DOT_AXIS = 0 if keras_version_tuple <= (2, 2, 4) else 1
###############################################################################
# INPUT FUNCTIONS
###############################################################################
def construct_efn_input(input_dim, zs_name=None, phats_name=None):
# construct input tensors
zs_input = Input(batch_shape=(None, None), name=zs_name)
phats_input = Input(batch_shape=(None, None, input_dim), name=phats_name)
return [zs_input, phats_input]
def construct_pfn_input(input_dim, name=None):
# construct input tensor
return [Input(batch_shape=(None, None, input_dim), name=name)]
###############################################################################
# WEIGHT MASK FUNCTIONS
###############################################################################
def construct_efn_weight_mask(input_tensor, mask_val=0., name=None):
""""""
# define a function which maps the given mask_val to zero
def efn_mask_func(X, mask_val=mask_val):
# map mask_val to zero and leave everything else alone
return X * K.cast(K.not_equal(X, mask_val), K.dtype(X))
mask_layer = Lambda(efn_mask_func, name=name)
# return as lists for consistency
return [mask_layer], [mask_layer(input_tensor)]
def construct_pfn_weight_mask(input_tensor, mask_val=0., name=None):
""""""
# define a function which maps the given mask_val to zero
def pfn_mask_func(X, mask_val=mask_val):
# map mask_val to zero and return 1 elsewhere
return K.cast(K.any(K.not_equal(X, mask_val), axis=-1), K.dtype(X))
mask_layer = Lambda(pfn_mask_func, name=name)
# return as lists for consistency
return [mask_layer], [mask_layer(input_tensor)]
###############################################################################
# NETWORK FUNCTIONS
###############################################################################
def construct_distributed_dense(input_tensor, sizes, acts='relu', k_inits='he_uniform',
names=None, l2_regs=0.):
""""""
# repeat options if singletons
acts, k_inits, names = iter_or_rep(acts), iter_or_rep(k_inits), iter_or_rep(names)
l2_regs = iter_or_rep(l2_regs)
# list of tensors
layers, tensors = [], [input_tensor]
# iterate over specified layers
for s, act, k_init, name, l2_reg in zip(sizes, acts, k_inits, names, l2_regs):
# define a dense layer that will be applied through time distributed
kwargs = {}
if l2_reg > 0.:
kwargs.update({'kernel_regularizer': l2(l2_reg), 'bias_regularizer': l2(l2_reg)})
d_layer = Dense(s, kernel_initializer=k_init, **kwargs)
# get layers and append them to list
tdist_layer = TimeDistributed(d_layer, name=name)
act_layer = _get_act_layer(act)
layers.extend([tdist_layer, act_layer])
# get tensors and append them to list
tensors.append(tdist_layer(tensors[-1]))
tensors.append(act_layer(tensors[-1]))
return layers, tensors
def construct_latent(input_tensor, weight_tensor, dropout=0., name=None):
""""""
# lists of layers and tensors
layers = [Dot(DOT_AXIS, name=name)]
tensors = [layers[-1]([weight_tensor, input_tensor])]
# apply dropout if specified
if dropout > 0.:
dr_name = None if name is None else '{}_dropout'.format(name)
layers.append(Dropout(dropout, name=dr_name))
tensors.append(layers[-1](tensors[-1]))
return layers, tensors
def construct_dense(input_tensor, sizes,
acts='relu', k_inits='he_uniform',
dropouts=0., l2_regs=0.,
names=None):
""""""
# repeat options if singletons
acts, k_inits, names = iter_or_rep(acts), iter_or_rep(k_inits), iter_or_rep(names)
dropouts, l2_regs = iter_or_rep(dropouts), iter_or_rep(l2_regs)
# lists of layers and tensors
layers, tensors = [], [input_tensor]
# iterate to make specified layers
z = zip(sizes, acts, k_inits, dropouts, l2_regs, names)
for s, act, k_init, dropout, l2_reg, name in z:
# get layers and append them to list
kwargs = ({'kernel_regularizer': l2(l2_reg), 'bias_regularizer': l2(l2_reg)}
if l2_reg > 0. else {})
dense_layer = Dense(s, kernel_initializer=k_init, name=name, **kwargs)
act_layer = _get_act_layer(act)
layers.extend([dense_layer, act_layer])
# get tensors and append them to list
tensors.append(dense_layer(tensors[-1]))
tensors.append(act_layer(tensors[-1]))
# apply dropout if specified
if dropout > 0.:
dr_name = None if name is None else '{}_dropout'.format(name)
layers.append(Dropout(dropout, name=dr_name))
tensors.append(layers[-1](tensors[-1]))
return layers, tensors
###############################################################################
# SymmetricPerParticleNN - Base class for EFN-like models
###############################################################################
class SymmetricPerParticleNN(NNBase):
# EFN(*args, **kwargs)
def _process_hps(self):
r"""See [`ArchBase`](#archbase) for how to pass in hyperparameters as
well as defaults common to all EnergyFlow neural network models.
**Required EFN Hyperparameters**
- **input_dim** : _int_
- The number of features for each particle.
- **Phi_sizes** (formerly `ppm_sizes`) : {_tuple_, _list_} of _int_
- The sizes of the dense layers in the per-particle frontend
module $\Phi$. The last element will be the number of latent
observables that the model defines.
- **F_sizes** (formerly `dense_sizes`) : {_tuple_, _list_} of _int_
- The sizes of the dense layers in the backend module $F$.
**Default EFN Hyperparameters**
- **Phi_acts**=`'relu'` (formerly `ppm_acts`) : {_tuple_, _list_} of
_str_ or Keras activation
- Activation functions(s) for the dense layers in the
per-particle frontend module $\Phi$. A single string or activation
layer will apply the same activation to all layers. Keras advanced
activation layers are also accepted, either as strings (which use
the default arguments) or as Keras `Layer` instances. If passing a
single `Layer` instance, be aware that this layer will be used for
all activations and may introduce weight sharing (such as with
`PReLU`); it is recommended in this case to pass as many activations
as there are layers in the model. See the [Keras activations
docs](https://keras.io/activations/) for more detail.
- **F_acts**=`'relu'` (formerly `dense_acts`) : {_tuple_, _list_} of
_str_ or Keras activation
- Activation functions(s) for the dense layers in the
backend module $F$. A single string or activation layer will apply
the same activation to all layers.
- **Phi_k_inits**=`'he_uniform'` (formerly `ppm_k_inits`) : {_tuple_,
_list_} of _str_ or Keras initializer
- Kernel initializers for the dense layers in the per-particle
frontend module $\Phi$. A single string will apply the same
initializer to all layers. See the [Keras initializer docs](https:
//keras.io/initializers/) for more detail.
- **F_k_inits**=`'he_uniform'` (formerly `dense_k_inits`) : {_tuple_,
_list_} of _str_ or Keras initializer
- Kernel initializers for the dense layers in the backend
module $F$. A single string will apply the same initializer
to all layers.
- **latent_dropout**=`0` : _float_
- Dropout rates for the summation layer that defines the
value of the latent observables on the inputs. See the [Keras
Dropout layer](https://keras.io/layers/core/#dropout) for more
detail.
- **F_dropouts**=`0` (formerly `dense_dropouts`) : {_tuple_, _list_}
of _float_
- Dropout rates for the dense layers in the backend module $F$.
A single float will apply the same dropout rate to all dense layers.
- **Phi_l2_regs**=`0` : {_tuple_, _list_} of _float_
- $L_2$-regulatization strength for both the weights and biases
of the layers in the $\Phi$ network. A single float will apply the
same $L_2$-regulatization to all layers.
- **F_l2_regs**=`0` : {_tuple_, _list_} of _float_
- $L_2$-regulatization strength for both the weights and biases
of the layers in the $F$ network. A single float will apply the
same $L_2$-regulatization to all layers.
- **mask_val**=`0` : _float_
- The value for which particles with all features set equal to
this value will be ignored. The [Keras Masking layer](https://
keras.io/layers/core/#masking) appears to have issues masking
the biases of a network, so this has been implemented in a
custom (and correct) manner since version `0.12.0`.
"""
# process generic NN hps
super(SymmetricPerParticleNN, self)._process_hps()
# required hyperparameters
self.input_dim = self._proc_arg('input_dim')
self.Phi_sizes = self._proc_arg('Phi_sizes', old='ppm_sizes')
self.F_sizes = self._proc_arg('F_sizes', old='dense_sizes')
# activations
self.Phi_acts = iter_or_rep(self._proc_arg('Phi_acts', default='relu',
old='ppm_acts'))
self.F_acts = iter_or_rep(self._proc_arg('F_acts', default='relu',
old='dense_acts'))
# initializations
self.Phi_k_inits = iter_or_rep(self._proc_arg('Phi_k_inits', default='he_uniform',
old='ppm_k_inits'))
self.F_k_inits = iter_or_rep(self._proc_arg('F_k_inits', default='he_uniform',
old='dense_k_inits'))
# regularizations
self.latent_dropout = self._proc_arg('latent_dropout', default=0.)
self.F_dropouts = iter_or_rep(self._proc_arg('F_dropouts', default=0.,
old='dense_dropouts'))
self.Phi_l2_regs = iter_or_rep(self._proc_arg('Phi_l2_regs', default=0.))
self.F_l2_regs = iter_or_rep(self._proc_arg('F_l2_regs', default=0.))
# masking
self.mask_val = self._proc_arg('mask_val', default=0.)
self._verify_empty_hps()
def _construct_model(self):
# initialize dictionaries for holding indices of subnetworks
self._layer_inds, self._tensor_inds = {}, {}
# construct earlier parts of the model
self._construct_inputs()
self._construct_Phi()
self._construct_latent()
self._construct_F()
# get output layers
d_layer = Dense(self.output_dim, name=self._proc_name('output'))
act_layer = _get_act_layer(self.output_act)
# append output tensors
self._tensors.append(d_layer(self.tensors[-1]))
self._tensors.append(act_layer(self.tensors[-1]))
# construct a new model
self._model = Model(inputs=self.inputs, outputs=self.output)
# compile model
self._compile_model()
@abstractmethod
def _construct_inputs(self):
pass
def _construct_Phi(self):
# get names
names = [self._proc_name('tdist_{}'.format(i)) for i in range(len(self.Phi_sizes))]
# determine begin inds
layer_inds, tensor_inds = [len(self.layers)], [len(self.tensors)]
# construct Phi
Phi_layers, Phi_tensors = construct_distributed_dense(self.inputs[-1], self.Phi_sizes,
acts=self.Phi_acts,
k_inits=self.Phi_k_inits,
names=names,
l2_regs=self.Phi_l2_regs)
# add layers and tensors to internal lists
self._layers.extend(Phi_layers)
self._tensors.extend(Phi_tensors)
# determine end inds
layer_inds.append(len(self.layers))
tensor_inds.append(len(self.tensors))
# store inds
self._layer_inds['Phi'] = layer_inds
self._tensor_inds['Phi'] = tensor_inds
def _construct_latent(self):
# determine begin inds
layer_inds, tensor_inds = [len(self.layers)], [len(self.tensors)]
# construct latent tensors
latent_layers, latent_tensors = construct_latent(self._tensors[-1], self.weights,
dropout=self.latent_dropout,
name=self._proc_name('sum'))
# add layers and tensors to internal lists
self._layers.extend(latent_layers)
self._tensors.extend(latent_tensors)
# determine end inds
layer_inds.append(len(self.layers))
tensor_inds.append(len(self.tensors))
# store inds
self._layer_inds['latent'] = layer_inds
self._tensor_inds['latent'] = tensor_inds
def _construct_F(self):
# get names
names = [self._proc_name('dense_{}'.format(i)) for i in range(len(self.F_sizes))]
# determine begin inds
layer_inds, tensor_inds = [len(self.layers)], [len(self.tensors)]
# construct F
F_layers, F_tensors = construct_dense(self.latent[-1], self.F_sizes,
acts=self.F_acts, k_inits=self.F_k_inits,
dropouts=self.F_dropouts, names=names,
l2_regs=self.F_l2_regs)
# add layers and tensors to internal lists
self._layers.extend(F_layers)
self._tensors.extend(F_tensors)
# determine end inds
layer_inds.append(len(self.layers))
tensor_inds.append(len(self.tensors))
# store inds
self._layer_inds['F'] = layer_inds
self._tensor_inds['F'] = tensor_inds
@abstractproperty
def inputs(self):
pass
@abstractproperty
def weights(self):
pass
@property
def Phi(self):
r"""List of tensors corresponding to the layers in the $\Phi$ network."""
begin, end = self._tensor_inds['Phi']
return self._tensors[begin:end]
@property
def latent(self):
"""List of tensors corresponding to the summation layer in the
network, including any dropout layer if present.
"""
begin, end = self._tensor_inds['latent']
return self._tensors[begin:end]
@property
def F(self):
"""List of tensors corresponding to the layers in the $F$ network."""
begin, end = self._tensor_inds['F']
return self._tensors[begin:end]
@property
def output(self):
"""Output tensor for the model."""
return self._tensors[-1]
@property
def layers(self):
"""List of all layers in the model."""
return self._layers
@property
def tensors(self):
"""List of all tensors in the model."""
return self._tensors
###############################################################################
# EFN - Energy flow network class
###############################################################################
class EFN(SymmetricPerParticleNN):
"""Energy Flow Network (EFN) architecture."""
def _construct_inputs(self):
# construct input tensors
self._inputs = construct_efn_input(self.input_dim,
zs_name=self._proc_name('zs_input'),
phats_name=self._proc_name('phats_input'))
# construct weight tensor
mask_layers, mask_tensors = construct_efn_weight_mask(self.inputs[0],
mask_val=self.mask_val,
name=self._proc_name('mask'))
self._weights = mask_tensors[0]
# begin list of tensors with the inputs
self._tensors = [self.inputs, self.weights]
# begin list of layers with the mask layer
self._layers = [mask_layers[0]]
@property
def inputs(self):
"""List of input tensors to the model. EFNs have two input tensors:
`inputs[0]` corresponds to the `zs` input and `inputs[1]` corresponds
to the `phats` input.
"""
return self._inputs
@property
def weights(self):
"""Weight tensor for the model. This is the `zs` input where entries
equal to `mask_val` have been set to zero.
"""
return self._weights
# eval_filters(patch, n=100, prune=True)
def eval_filters(self, patch, n=100, prune=True):
"""Evaluates the latent space filters of this model on a patch of the
two-dimensional geometric input space.
**Arguments**
- **patch** : {_tuple_, _list_} of _float_
- Specifies the patch of the geometric input space to be evaluated.
A list of length 4 is interpretted as `[xmin, ymin, xmax, ymax]`.
Passing a single float `R` is equivalent to `[-R,-R,R,R]`.
- **n** : {_tuple_, _list_} of _int_
- The number of grid points on which to evaluate the filters. A list
of length 2 is interpretted as `[nx, ny]` where `nx` is the number of
points along the x (or first) dimension and `ny` is the number of points
along the y (or second) dimension.
- **prune** : _bool_
- Whether to remove filters that are all zero (which happens sometimes
due to dying ReLUs).
**Returns**
- (_numpy.ndarray_, _numpy.ndarray_, _numpy.ndarray_)
- Returns three arrays, `(X, Y, Z)`, where `X` and `Y` have shape `(nx, ny)`
and are arrays of the values of the geometric inputs in the specified patch.
`Z` has shape `(num_filters, nx, ny)` and is the value of the different
filters at each point.
"""
# determine patch of xy space to evaluate filters on
if isinstance(patch, (float, int)):
if patch > 0:
xmin, ymin, xmax, ymax = -patch, -patch, patch, patch
else:
ValueError('patch must be positive when passing as a single number.')
else:
xmin, ymin, xmax, ymax = patch
# determine number of pixels in each dimension
if isinstance(n, int):
nx = ny = n
else:
nx, ny = n
# construct grid of inputs
xs, ys = np.linspace(xmin, xmax, nx), np.linspace(ymin, ymax, ny)
X, Y = np.meshgrid(xs, ys, indexing='ij')
XY = np.asarray([X, Y]).reshape((1, 2, nx*ny)).transpose((0, 2, 1))
# handle weirdness of Keras/tensorflow
old_keras = (keras_version_tuple <= (2, 2, 5))
s = self.Phi_sizes[-1] if len(self.Phi_sizes) else self.input_dim
in_t, out_t = self.inputs[1], self._tensors[self._tensor_inds['latent'][0] - 1]
# construct function
kf = K.function([in_t] if old_keras else in_t, [out_t] if old_keras else out_t)
# evaluate function
Z = kf([XY] if old_keras else XY)[0].reshape(nx, ny, s).transpose((2, 0, 1))
# prune filters that are off
if prune:
return X, Y, Z[[not (z == 0).all() for z in Z]]
return X, Y, Z
###############################################################################
# PFN - Particle flow network class
###############################################################################
class PFN(SymmetricPerParticleNN):
"""Particle Flow Network (PFN) architecture. Accepts the same
hyperparameters as the [`EFN`](#EFN)."""
# PFN(*args, **kwargs)
def _construct_inputs(self):
"""""" # need this for autogen docs
# construct input tensor
self._inputs = construct_pfn_input(self.input_dim, name=self._proc_name('input'))
# construct weight tensor
mask_layers, mask_tensors = construct_pfn_weight_mask(self.inputs[0],
mask_val=self.mask_val,
name=self._proc_name('mask'))
self._weights = mask_tensors[0]
# begin list of tensors with the inputs
self._tensors = [self.inputs, self.weights]
# begin list of layers with the mask layer
self._layers = [mask_layers[0]]
@property
def inputs(self):
"""List of input tensors to the model. PFNs have one input tensor
corresponding to the `ps` input.
"""
return self._inputs
@property
def weights(self):
"""Weight tensor for the model. A weight of `0` is assigned to any
particle which has all features equal to `mask_val`, and `1` is
assigned otherwise.
"""
return self._weights
|
[
"keras.backend.dtype",
"energyflow.archs.archbase._get_act_layer",
"keras.backend.function",
"keras.layers.Lambda",
"keras.__version__.split",
"keras.layers.TimeDistributed",
"keras.layers.Dot",
"keras.backend.not_equal",
"numpy.asarray",
"keras.layers.Input",
"numpy.linspace",
"energyflow.utils.iter_or_rep",
"keras.models.Model",
"keras.regularizers.l2",
"keras.layers.Dense",
"numpy.meshgrid",
"keras.layers.Dropout"
] |
[((1477, 1522), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None)', 'name': 'zs_name'}), '(batch_shape=(None, None), name=zs_name)\n', (1482, 1522), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((1541, 1600), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, input_dim)', 'name': 'phats_name'}), '(batch_shape=(None, None, input_dim), name=phats_name)\n', (1546, 1600), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((2311, 2343), 'keras.layers.Lambda', 'Lambda', (['efn_mask_func'], {'name': 'name'}), '(efn_mask_func, name=name)\n', (2317, 2343), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((2773, 2805), 'keras.layers.Lambda', 'Lambda', (['pfn_mask_func'], {'name': 'name'}), '(pfn_mask_func, name=name)\n', (2779, 2805), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3408, 3428), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['l2_regs'], {}), '(l2_regs)\n', (3419, 3428), False, 'from energyflow.utils import iter_or_rep\n'), ((1097, 1125), 'keras.__version__.split', '__keras_version__.split', (['"""."""'], {}), "('.')\n", (1120, 1125), True, 'from keras import __version__ as __keras_version__\n'), ((1727, 1780), 'keras.layers.Input', 'Input', ([], {'batch_shape': '(None, None, input_dim)', 'name': 'name'}), '(batch_shape=(None, None, input_dim), name=name)\n', (1732, 1780), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3334, 3351), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['acts'], {}), '(acts)\n', (3345, 3351), False, 'from energyflow.utils import iter_or_rep\n'), ((3353, 3373), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['k_inits'], {}), '(k_inits)\n', (3364, 3373), False, 'from energyflow.utils import iter_or_rep\n'), ((3375, 3393), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['names'], {}), '(names)\n', (3386, 3393), False, 'from energyflow.utils import iter_or_rep\n'), ((3860, 3905), 'keras.layers.Dense', 'Dense', (['s'], {'kernel_initializer': 'k_init'}), '(s, kernel_initializer=k_init, **kwargs)\n', (3865, 3905), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3974, 4009), 'keras.layers.TimeDistributed', 'TimeDistributed', (['d_layer'], {'name': 'name'}), '(d_layer, name=name)\n', (3989, 4009), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((4030, 4049), 'energyflow.archs.archbase._get_act_layer', '_get_act_layer', (['act'], {}), '(act)\n', (4044, 4049), False, 'from energyflow.archs.archbase import NNBase, _get_act_layer\n'), ((4404, 4428), 'keras.layers.Dot', 'Dot', (['DOT_AXIS'], {'name': 'name'}), '(DOT_AXIS, name=name)\n', (4407, 4428), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((4999, 5016), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['acts'], {}), '(acts)\n', (5010, 5016), False, 'from energyflow.utils import iter_or_rep\n'), ((5018, 5038), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['k_inits'], {}), '(k_inits)\n', (5029, 5038), False, 'from energyflow.utils import iter_or_rep\n'), ((5040, 5058), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['names'], {}), '(names)\n', (5051, 5058), False, 'from energyflow.utils import iter_or_rep\n'), ((5083, 5104), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['dropouts'], {}), '(dropouts)\n', (5094, 5104), False, 'from energyflow.utils import iter_or_rep\n'), ((5106, 5126), 'energyflow.utils.iter_or_rep', 'iter_or_rep', (['l2_regs'], {}), '(l2_regs)\n', (5117, 5126), False, 'from energyflow.utils import iter_or_rep\n'), ((5551, 5607), 'keras.layers.Dense', 'Dense', (['s'], {'kernel_initializer': 'k_init', 'name': 'name'}), '(s, kernel_initializer=k_init, name=name, **kwargs)\n', (5556, 5607), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((5628, 5647), 'energyflow.archs.archbase._get_act_layer', '_get_act_layer', (['act'], {}), '(act)\n', (5642, 5647), False, 'from energyflow.archs.archbase import NNBase, _get_act_layer\n'), ((12561, 12592), 'energyflow.archs.archbase._get_act_layer', '_get_act_layer', (['self.output_act'], {}), '(self.output_act)\n', (12575, 12592), False, 'from energyflow.archs.archbase import NNBase, _get_act_layer\n'), ((12795, 12841), 'keras.models.Model', 'Model', ([], {'inputs': 'self.inputs', 'outputs': 'self.output'}), '(inputs=self.inputs, outputs=self.output)\n', (12800, 12841), False, 'from keras.models import Model\n'), ((20572, 20606), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {'indexing': '"""ij"""'}), "(xs, ys, indexing='ij')\n", (20583, 20606), True, 'import numpy as np\n'), ((20991, 21065), 'keras.backend.function', 'K.function', (['([in_t] if old_keras else in_t)', '([out_t] if old_keras else out_t)'], {}), '([in_t] if old_keras else in_t, [out_t] if old_keras else out_t)\n', (21001, 21065), True, 'from keras import backend as K\n'), ((2743, 2753), 'keras.backend.dtype', 'K.dtype', (['X'], {}), '(X)\n', (2750, 2753), True, 'from keras import backend as K\n'), ((4635, 4665), 'keras.layers.Dropout', 'Dropout', (['dropout'], {'name': 'dr_name'}), '(dropout, name=dr_name)\n', (4642, 4665), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((20500, 20527), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (20511, 20527), True, 'import numpy as np\n'), ((20529, 20556), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'ny'], {}), '(ymin, ymax, ny)\n', (20540, 20556), True, 'import numpy as np\n'), ((2255, 2279), 'keras.backend.not_equal', 'K.not_equal', (['X', 'mask_val'], {}), '(X, mask_val)\n', (2266, 2279), True, 'from keras import backend as K\n'), ((2281, 2291), 'keras.backend.dtype', 'K.dtype', (['X'], {}), '(X)\n', (2288, 2291), True, 'from keras import backend as K\n'), ((2707, 2731), 'keras.backend.not_equal', 'K.not_equal', (['X', 'mask_val'], {}), '(X, mask_val)\n', (2718, 2731), True, 'from keras import backend as K\n'), ((5442, 5452), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (5444, 5452), False, 'from keras.regularizers import l2\n'), ((5474, 5484), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (5476, 5484), False, 'from keras.regularizers import l2\n'), ((6002, 6032), 'keras.layers.Dropout', 'Dropout', (['dropout'], {'name': 'dr_name'}), '(dropout, name=dr_name)\n', (6009, 6032), False, 'from keras.layers import Dense, Dot, Dropout, Input, Lambda, TimeDistributed\n'), ((3797, 3807), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (3799, 3807), False, 'from keras.regularizers import l2\n'), ((3829, 3839), 'keras.regularizers.l2', 'l2', (['l2_reg'], {}), '(l2_reg)\n', (3831, 3839), False, 'from keras.regularizers import l2\n'), ((20620, 20638), 'numpy.asarray', 'np.asarray', (['[X, Y]'], {}), '([X, Y])\n', (20630, 20638), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
from blackbox_mpc.optimizers.optimizer_base import OptimizerBase
class PSOOptimizer(OptimizerBase):
def __init__(self, env_action_space, env_observation_space,
planning_horizon=50, max_iterations=5, population_size=500,
num_agents=5, c1=tf.constant(0.3, dtype=tf.float32),
c2=tf.constant(0.5, dtype=tf.float32), w=tf.constant(0.2, dtype=tf.float32),
initial_velocity_fraction=tf.constant(0.01, dtype=tf.float32)):
"""
This class defines the particle swarm optimizer.
(https://www.cs.tufts.edu/comp/150GA/homeworks/hw3/_reading6%201995%20particle%20swarming.pdf)
Parameters
---------
env_action_space: gym.ActionSpace
Defines the action space of the gym environment.
env_observation_space: gym.ObservationSpace
Defines the observation space of the gym environment.
planning_horizon: Int
Defines the planning horizon for the optimizer (how many steps to lookahead and optimize for).
max_iterations: tf.int32
Defines the maximimum iterations for the CMAES optimizer to refine its guess for the optimal solution.
population_size: tf.int32
Defines the population size of the particles evaluated at each iteration.
num_agents: tf.int32
Defines the number of runner running in parallel
c1: tf.float32
Defines the fraction of the local best known position direction.
c2: tf.float32
Defines the fraction of the global best known position direction.
w: tf.float32
Defines the fraction of the current velocity to use.
initial_velocity_fraction: tf.float32
Defines the initial velocity fraction out of the action space.
"""
super(PSOOptimizer, self).__init__(name=None,
planning_horizon=planning_horizon,
max_iterations=max_iterations,
num_agents=num_agents,
env_action_space=env_action_space,
env_observation_space=
env_observation_space)
self._solution_dim = [self._num_agents, tf.constant(self._planning_horizon, dtype=tf.int32), self._dim_U]
self._solution_size = tf.reduce_prod(self._solution_dim)
self._population_size = population_size
self._particle_positions = tf.Variable(tf.zeros([self._population_size, *self._solution_dim], dtype=tf.float32))
self._particle_velocities = tf.Variable(tf.zeros([self._population_size, *self._solution_dim], dtype=tf.float32))
self._particle_best_known_position = tf.Variable(tf.zeros([self._population_size, *self._solution_dim],
dtype=tf.float32))
self._particle_best_known_reward = tf.Variable(tf.zeros([self._population_size, self._num_agents],
dtype=tf.float32))
#global
self._global_best_known_position = tf.Variable(tf.zeros([*self._solution_dim], dtype=tf.float32))
self._global_best_known_reward = tf.Variable(tf.zeros([self._num_agents], dtype=tf.float32))
solution_variance_values = np.tile(np.square(self._action_lower_bound - self._action_upper_bound) / 16,
[self._planning_horizon * self._num_agents, 1])
solution_variance_values = solution_variance_values.reshape([self._num_agents, self._planning_horizon, -1])
self._solution_variance = tf.constant(solution_variance_values, dtype=tf.float32)
self._c1 = c1
self._c2 = c2
self._w = w
self._initial_velocity_fraction = initial_velocity_fraction
self._solution = tf.Variable(tf.zeros([self._num_agents, self._dim_U], dtype=tf.float32))
@tf.function
def _optimize(self, current_state, time_step):
def continue_condition(t, position):
result = tf.less(t, self._max_iterations)
return result
def iterate(t, position):
#evaluate each of the particles
# Evaluate and sort solutions
feasible_particle_positions = tf.clip_by_value(self._particle_positions, self._action_lower_bound_horizon,
self._action_upper_bound_horizon)
penalty = tf.norm(tf.reshape(self._particle_positions - feasible_particle_positions, [self._population_size, self._num_agents, -1]),
axis=2) ** 2
self._particle_positions.assign(feasible_particle_positions)
rewards = self._trajectory_evaluator(current_state, self._particle_positions, time_step) - penalty
#set the best local known position
condition = tf.less(self._particle_best_known_reward, rewards)
new_particle_best_known_position = tf.where(tf.expand_dims(tf.expand_dims(condition, -1), -1), self._particle_positions,
self._particle_best_known_position)
self._particle_best_known_position.assign(new_particle_best_known_position)
new_particle_best_known_reward = tf.where(condition, rewards,
self._particle_best_known_reward)
self._particle_best_known_reward.assign(new_particle_best_known_reward)
#get the global best now
global_best_known_position_index = tf.math.argmax(self._particle_best_known_reward)
samples = tf.transpose(self._particle_best_known_position, [1, 0, 2, 3])
global_best_known_position_index = tf.cast(global_best_known_position_index, dtype=tf.int32) + tf.range(0, samples.shape[0], dtype=tf.int32) * samples.shape[1]
samples = tf.reshape(samples, [-1, *samples.shape[2:]])
self._global_best_known_position.assign(tf.gather(samples, global_best_known_position_index))
samples = tf.reshape(self._particle_best_known_reward, [-1])
self._global_best_known_reward.assign(tf.gather(samples, global_best_known_position_index))
#calculate the velocity now
adapted_particle_velocities = (self._particle_velocities * self._w) + \
(self._particle_best_known_position - self._particle_positions) * self._c1 * tf.random.normal(shape=[], dtype=tf.float32) + \
(self._global_best_known_position - self._particle_positions) * self._c2 * tf.random.normal(shape=[], dtype=tf.float32)
self._particle_velocities.assign(adapted_particle_velocities)
self._particle_positions.assign(self._particle_positions + self._particle_velocities)
return t + tf.constant(1, dtype=tf.int32), self._global_best_known_position
_ = tf.while_loop(cond=continue_condition, body=iterate, loop_vars=[tf.constant(0, dtype=tf.int32), self._global_best_known_position])
self._solution.assign(self._global_best_known_position[:, 0, :])
# update the particles position for the next iteration
lower_bound_dist = self._global_best_known_position - self._action_lower_bound_horizon
upper_bound_dist = self._action_upper_bound_horizon - self._global_best_known_position
constrained_variance = tf.minimum(tf.minimum(tf.square(lower_bound_dist / tf.constant(2, dtype=tf.float32)),
tf.square(upper_bound_dist / tf.constant(2, dtype=tf.float32))),
self._solution_variance)
samples_positions = tf.random.truncated_normal([self._population_size,
*self._solution_dim],
tf.concat([self._global_best_known_position[:, 1:],
tf.expand_dims(self._global_best_known_position[:, -1],
1)], 1),
tf.sqrt(constrained_variance),
dtype=tf.float32)
action_space = self._action_upper_bound_horizon - self._action_lower_bound_horizon
initial_velocity = self._initial_velocity_fraction * action_space
samples_velocities = tf.random.uniform([self._population_size, *self._solution_dim], -initial_velocity,
initial_velocity, dtype=tf.float32)
self._particle_positions.assign(samples_positions)
self._particle_velocities.assign(samples_velocities)
self._particle_best_known_position.assign(samples_positions)
self._particle_best_known_reward.assign(tf.fill([self._population_size, self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
self._global_best_known_reward.assign(tf.fill([self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
#end update particles
resulting_action = self._solution
return resulting_action
def reset(self):
"""
This method resets the optimizer to its default state at the beginning of the trajectory/episode.
"""
samples_positions = tf.random.uniform([self._population_size, *self._solution_dim], self._action_lower_bound_horizon,
self._action_upper_bound_horizon, dtype=tf.float32)
action_space = self._action_upper_bound_horizon - self._action_lower_bound_horizon
initial_velocity = self._initial_velocity_fraction * action_space
samples_velocities = tf.random.uniform([self._population_size, *self._solution_dim], -initial_velocity,
initial_velocity, dtype=tf.float32)
self._particle_positions.assign(samples_positions)
self._particle_velocities.assign(samples_velocities)
self._particle_best_known_position.assign(samples_positions)
self._particle_best_known_reward.assign(tf.fill([self._population_size, self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
self._global_best_known_reward.assign(tf.fill([self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
return
|
[
"tensorflow.random.uniform",
"tensorflow.math.argmax",
"tensorflow.random.normal",
"tensorflow.reduce_prod",
"tensorflow.transpose",
"numpy.square",
"tensorflow.range",
"tensorflow.where",
"tensorflow.sqrt",
"tensorflow.constant",
"tensorflow.clip_by_value",
"tensorflow.gather",
"tensorflow.reshape",
"tensorflow.less",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.zeros"
] |
[((320, 354), 'tensorflow.constant', 'tf.constant', (['(0.3)'], {'dtype': 'tf.float32'}), '(0.3, dtype=tf.float32)\n', (331, 354), True, 'import tensorflow as tf\n'), ((376, 410), 'tensorflow.constant', 'tf.constant', (['(0.5)'], {'dtype': 'tf.float32'}), '(0.5, dtype=tf.float32)\n', (387, 410), True, 'import tensorflow as tf\n'), ((414, 448), 'tensorflow.constant', 'tf.constant', (['(0.2)'], {'dtype': 'tf.float32'}), '(0.2, dtype=tf.float32)\n', (425, 448), True, 'import tensorflow as tf\n'), ((493, 528), 'tensorflow.constant', 'tf.constant', (['(0.01)'], {'dtype': 'tf.float32'}), '(0.01, dtype=tf.float32)\n', (504, 528), True, 'import tensorflow as tf\n'), ((2504, 2538), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['self._solution_dim'], {}), '(self._solution_dim)\n', (2518, 2538), True, 'import tensorflow as tf\n'), ((3794, 3849), 'tensorflow.constant', 'tf.constant', (['solution_variance_values'], {'dtype': 'tf.float32'}), '(solution_variance_values, dtype=tf.float32)\n', (3805, 3849), True, 'import tensorflow as tf\n'), ((8720, 8843), 'tensorflow.random.uniform', 'tf.random.uniform', (['[self._population_size, *self._solution_dim]', '(-initial_velocity)', 'initial_velocity'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], -\n initial_velocity, initial_velocity, dtype=tf.float32)\n', (8737, 8843), True, 'import tensorflow as tf\n'), ((9726, 9885), 'tensorflow.random.uniform', 'tf.random.uniform', (['[self._population_size, *self._solution_dim]', 'self._action_lower_bound_horizon', 'self._action_upper_bound_horizon'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], self.\n _action_lower_bound_horizon, self._action_upper_bound_horizon, dtype=tf\n .float32)\n', (9743, 9885), True, 'import tensorflow as tf\n'), ((10116, 10239), 'tensorflow.random.uniform', 'tf.random.uniform', (['[self._population_size, *self._solution_dim]', '(-initial_velocity)', 'initial_velocity'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], -\n initial_velocity, initial_velocity, dtype=tf.float32)\n', (10133, 10239), True, 'import tensorflow as tf\n'), ((2408, 2459), 'tensorflow.constant', 'tf.constant', (['self._planning_horizon'], {'dtype': 'tf.int32'}), '(self._planning_horizon, dtype=tf.int32)\n', (2419, 2459), True, 'import tensorflow as tf\n'), ((2634, 2706), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, *self._solution_dim]'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], dtype=tf.float32)\n', (2642, 2706), True, 'import tensorflow as tf\n'), ((2756, 2828), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, *self._solution_dim]'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], dtype=tf.float32)\n', (2764, 2828), True, 'import tensorflow as tf\n'), ((2887, 2959), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, *self._solution_dim]'], {'dtype': 'tf.float32'}), '([self._population_size, *self._solution_dim], dtype=tf.float32)\n', (2895, 2959), True, 'import tensorflow as tf\n'), ((3082, 3151), 'tensorflow.zeros', 'tf.zeros', (['[self._population_size, self._num_agents]'], {'dtype': 'tf.float32'}), '([self._population_size, self._num_agents], dtype=tf.float32)\n', (3090, 3151), True, 'import tensorflow as tf\n'), ((3289, 3338), 'tensorflow.zeros', 'tf.zeros', (['[*self._solution_dim]'], {'dtype': 'tf.float32'}), '([*self._solution_dim], dtype=tf.float32)\n', (3297, 3338), True, 'import tensorflow as tf\n'), ((3393, 3439), 'tensorflow.zeros', 'tf.zeros', (['[self._num_agents]'], {'dtype': 'tf.float32'}), '([self._num_agents], dtype=tf.float32)\n', (3401, 3439), True, 'import tensorflow as tf\n'), ((4019, 4078), 'tensorflow.zeros', 'tf.zeros', (['[self._num_agents, self._dim_U]'], {'dtype': 'tf.float32'}), '([self._num_agents, self._dim_U], dtype=tf.float32)\n', (4027, 4078), True, 'import tensorflow as tf\n'), ((4215, 4247), 'tensorflow.less', 'tf.less', (['t', 'self._max_iterations'], {}), '(t, self._max_iterations)\n', (4222, 4247), True, 'import tensorflow as tf\n'), ((4437, 4551), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self._particle_positions', 'self._action_lower_bound_horizon', 'self._action_upper_bound_horizon'], {}), '(self._particle_positions, self._action_lower_bound_horizon,\n self._action_upper_bound_horizon)\n', (4453, 4551), True, 'import tensorflow as tf\n'), ((5051, 5101), 'tensorflow.less', 'tf.less', (['self._particle_best_known_reward', 'rewards'], {}), '(self._particle_best_known_reward, rewards)\n', (5058, 5101), True, 'import tensorflow as tf\n'), ((5461, 5523), 'tensorflow.where', 'tf.where', (['condition', 'rewards', 'self._particle_best_known_reward'], {}), '(condition, rewards, self._particle_best_known_reward)\n', (5469, 5523), True, 'import tensorflow as tf\n'), ((5747, 5795), 'tensorflow.math.argmax', 'tf.math.argmax', (['self._particle_best_known_reward'], {}), '(self._particle_best_known_reward)\n', (5761, 5795), True, 'import tensorflow as tf\n'), ((5818, 5880), 'tensorflow.transpose', 'tf.transpose', (['self._particle_best_known_position', '[1, 0, 2, 3]'], {}), '(self._particle_best_known_position, [1, 0, 2, 3])\n', (5830, 5880), True, 'import tensorflow as tf\n'), ((6075, 6120), 'tensorflow.reshape', 'tf.reshape', (['samples', '[-1, *samples.shape[2:]]'], {}), '(samples, [-1, *samples.shape[2:]])\n', (6085, 6120), True, 'import tensorflow as tf\n'), ((6249, 6299), 'tensorflow.reshape', 'tf.reshape', (['self._particle_best_known_reward', '[-1]'], {}), '(self._particle_best_known_reward, [-1])\n', (6259, 6299), True, 'import tensorflow as tf\n'), ((8422, 8451), 'tensorflow.sqrt', 'tf.sqrt', (['constrained_variance'], {}), '(constrained_variance)\n', (8429, 8451), True, 'import tensorflow as tf\n'), ((3484, 3546), 'numpy.square', 'np.square', (['(self._action_lower_bound - self._action_upper_bound)'], {}), '(self._action_lower_bound - self._action_upper_bound)\n', (3493, 3546), True, 'import numpy as np\n'), ((5928, 5985), 'tensorflow.cast', 'tf.cast', (['global_best_known_position_index'], {'dtype': 'tf.int32'}), '(global_best_known_position_index, dtype=tf.int32)\n', (5935, 5985), True, 'import tensorflow as tf\n'), ((6173, 6225), 'tensorflow.gather', 'tf.gather', (['samples', 'global_best_known_position_index'], {}), '(samples, global_best_known_position_index)\n', (6182, 6225), True, 'import tensorflow as tf\n'), ((6350, 6402), 'tensorflow.gather', 'tf.gather', (['samples', 'global_best_known_position_index'], {}), '(samples, global_best_known_position_index)\n', (6359, 6402), True, 'import tensorflow as tf\n'), ((9230, 9268), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (9241, 9268), True, 'import tensorflow as tf\n'), ((9399, 9437), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (9410, 9437), True, 'import tensorflow as tf\n'), ((10626, 10664), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (10637, 10664), True, 'import tensorflow as tf\n'), ((10795, 10833), 'tensorflow.constant', 'tf.constant', (['(-np.inf)'], {'dtype': 'tf.float32'}), '(-np.inf, dtype=tf.float32)\n', (10806, 10833), True, 'import tensorflow as tf\n'), ((4637, 4755), 'tensorflow.reshape', 'tf.reshape', (['(self._particle_positions - feasible_particle_positions)', '[self._population_size, self._num_agents, -1]'], {}), '(self._particle_positions - feasible_particle_positions, [self.\n _population_size, self._num_agents, -1])\n', (4647, 4755), True, 'import tensorflow as tf\n'), ((5174, 5203), 'tensorflow.expand_dims', 'tf.expand_dims', (['condition', '(-1)'], {}), '(condition, -1)\n', (5188, 5203), True, 'import tensorflow as tf\n'), ((5988, 6033), 'tensorflow.range', 'tf.range', (['(0)', 'samples.shape[0]'], {'dtype': 'tf.int32'}), '(0, samples.shape[0], dtype=tf.int32)\n', (5996, 6033), True, 'import tensorflow as tf\n'), ((6815, 6859), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[]', 'dtype': 'tf.float32'}), '(shape=[], dtype=tf.float32)\n', (6831, 6859), True, 'import tensorflow as tf\n'), ((7055, 7085), 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int32'}), '(1, dtype=tf.int32)\n', (7066, 7085), True, 'import tensorflow as tf\n'), ((7196, 7226), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (7207, 7226), True, 'import tensorflow as tf\n'), ((8221, 8279), 'tensorflow.expand_dims', 'tf.expand_dims', (['self._global_best_known_position[:, -1]', '(1)'], {}), '(self._global_best_known_position[:, -1], 1)\n', (8235, 8279), True, 'import tensorflow as tf\n'), ((6649, 6693), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '[]', 'dtype': 'tf.float32'}), '(shape=[], dtype=tf.float32)\n', (6665, 6693), True, 'import tensorflow as tf\n'), ((7671, 7703), 'tensorflow.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.float32'}), '(2, dtype=tf.float32)\n', (7682, 7703), True, 'import tensorflow as tf\n'), ((7788, 7820), 'tensorflow.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.float32'}), '(2, dtype=tf.float32)\n', (7799, 7820), True, 'import tensorflow as tf\n')]
|
import numpy as np
import porespy as ps
import matplotlib.pyplot as plt
import openpnm as op
np.random.seed(0)
def test_snow_example_script():
plot = False
im1 = ps.generators.blobs(shape=[600, 400], porosity=None, blobiness=1) < 0.4
im2 = ps.generators.blobs(shape=[600, 400], porosity=None, blobiness=1) < 0.7
phases = im1 + (im2 * ~im1)*2
# phases = phases > 0
snow_n = ps.networks.snow2(phases,
phase_alias={1: 'solid', 2: 'void'},
boundary_width=5,
accuracy='high',
parallelization=None)
assert snow_n.regions.max() == 211
# Remove all but 1 pixel-width of boundary regions
temp = ps.tools.extract_subsection(im=snow_n.regions,
shape=np.array(snow_n.regions.shape)-8)
assert temp.max() == 211
# Remove complete boundary region
temp = ps.tools.extract_subsection(im=snow_n.regions,
shape=np.array(snow_n.regions.shape)-10)
assert temp.max() == 164
# %% Plot the final extraction overlayed with snow segmentation
if plot:
fig, ax = plt.subplots(1, 1)
ax.imshow(ps.tools.randomize_colors(snow_n.regions.T))
proj = op.io.from_porespy(snow_n.network)
op.topotools.plot_connections(network=proj.network, ax=ax)
op.topotools.plot_coordinates(network=proj.network, ax=ax)
plt.axis('off')
|
[
"openpnm.topotools.plot_coordinates",
"porespy.generators.blobs",
"porespy.networks.snow2",
"openpnm.io.from_porespy",
"numpy.array",
"openpnm.topotools.plot_connections",
"numpy.random.seed",
"matplotlib.pyplot.axis",
"porespy.tools.randomize_colors",
"matplotlib.pyplot.subplots"
] |
[((93, 110), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (107, 110), True, 'import numpy as np\n'), ((401, 528), 'porespy.networks.snow2', 'ps.networks.snow2', (['phases'], {'phase_alias': "{(1): 'solid', (2): 'void'}", 'boundary_width': '(5)', 'accuracy': '"""high"""', 'parallelization': 'None'}), "(phases, phase_alias={(1): 'solid', (2): 'void'},\n boundary_width=5, accuracy='high', parallelization=None)\n", (418, 528), True, 'import porespy as ps\n'), ((173, 238), 'porespy.generators.blobs', 'ps.generators.blobs', ([], {'shape': '[600, 400]', 'porosity': 'None', 'blobiness': '(1)'}), '(shape=[600, 400], porosity=None, blobiness=1)\n', (192, 238), True, 'import porespy as ps\n'), ((255, 320), 'porespy.generators.blobs', 'ps.generators.blobs', ([], {'shape': '[600, 400]', 'porosity': 'None', 'blobiness': '(1)'}), '(shape=[600, 400], porosity=None, blobiness=1)\n', (274, 320), True, 'import porespy as ps\n'), ((1193, 1211), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1205, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1291, 1325), 'openpnm.io.from_porespy', 'op.io.from_porespy', (['snow_n.network'], {}), '(snow_n.network)\n', (1309, 1325), True, 'import openpnm as op\n'), ((1334, 1392), 'openpnm.topotools.plot_connections', 'op.topotools.plot_connections', ([], {'network': 'proj.network', 'ax': 'ax'}), '(network=proj.network, ax=ax)\n', (1363, 1392), True, 'import openpnm as op\n'), ((1401, 1459), 'openpnm.topotools.plot_coordinates', 'op.topotools.plot_coordinates', ([], {'network': 'proj.network', 'ax': 'ax'}), '(network=proj.network, ax=ax)\n', (1430, 1459), True, 'import openpnm as op\n'), ((1468, 1483), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1476, 1483), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1273), 'porespy.tools.randomize_colors', 'ps.tools.randomize_colors', (['snow_n.regions.T'], {}), '(snow_n.regions.T)\n', (1255, 1273), True, 'import porespy as ps\n'), ((828, 858), 'numpy.array', 'np.array', (['snow_n.regions.shape'], {}), '(snow_n.regions.shape)\n', (836, 858), True, 'import numpy as np\n'), ((1029, 1059), 'numpy.array', 'np.array', (['snow_n.regions.shape'], {}), '(snow_n.regions.shape)\n', (1037, 1059), True, 'import numpy as np\n')]
|
from bagua.torch_api.contrib.cached_dataset import CachedDataset
from torch.utils.data.dataset import Dataset
import numpy as np
import logging
import unittest
from tests import skip_if_cuda_available
logging.basicConfig(level=logging.DEBUG)
class MyDataset(Dataset):
def __init__(self, size):
self.size = size
self.dataset = [(np.random.rand(5, 2), np.random.rand(1)) for _ in range(size)]
def __getitem__(self, item):
return self.dataset[item]
def __len__(self):
return self.size
class TestCacheDataset(unittest.TestCase):
def check_dataset(self, dataset, cache_dataset):
for _ in range(10):
for _, _ in enumerate(cache_dataset):
pass
for i in range(len(dataset)):
self.assertTrue((dataset[i][0] == cache_dataset[i][0]).all())
self.assertTrue((dataset[i][1] == cache_dataset[i][1]).all())
@skip_if_cuda_available()
def test_redis(self):
dataset1 = MyDataset(102)
dataset2 = MyDataset(102)
cache_dataset1 = CachedDataset(
dataset1,
backend="redis",
dataset_name="d1",
)
cache_dataset2 = CachedDataset(
dataset2,
backend="redis",
dataset_name="d2",
)
cache_dataset1.cache_loader.store.clear()
self.check_dataset(dataset1, cache_dataset1)
self.assertEqual(cache_dataset1.cache_loader.num_keys(), len(dataset1))
self.check_dataset(dataset2, cache_dataset2)
self.assertEqual(
cache_dataset2.cache_loader.num_keys(), len(dataset1) + len(dataset2)
)
if __name__ == "__main__":
unittest.main()
|
[
"logging.basicConfig",
"numpy.random.rand",
"bagua.torch_api.contrib.cached_dataset.CachedDataset",
"tests.skip_if_cuda_available",
"unittest.main"
] |
[((202, 242), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (221, 242), False, 'import logging\n'), ((921, 945), 'tests.skip_if_cuda_available', 'skip_if_cuda_available', ([], {}), '()\n', (943, 945), False, 'from tests import skip_if_cuda_available\n'), ((1694, 1709), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1707, 1709), False, 'import unittest\n'), ((1065, 1124), 'bagua.torch_api.contrib.cached_dataset.CachedDataset', 'CachedDataset', (['dataset1'], {'backend': '"""redis"""', 'dataset_name': '"""d1"""'}), "(dataset1, backend='redis', dataset_name='d1')\n", (1078, 1124), False, 'from bagua.torch_api.contrib.cached_dataset import CachedDataset\n'), ((1197, 1256), 'bagua.torch_api.contrib.cached_dataset.CachedDataset', 'CachedDataset', (['dataset2'], {'backend': '"""redis"""', 'dataset_name': '"""d2"""'}), "(dataset2, backend='redis', dataset_name='d2')\n", (1210, 1256), False, 'from bagua.torch_api.contrib.cached_dataset import CachedDataset\n'), ((351, 371), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (365, 371), True, 'import numpy as np\n'), ((373, 390), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (387, 390), True, 'import numpy as np\n')]
|
import numpy as np
import argparse
from simple_algo_utils import *
if __name__=='__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,description=None)
parser.add_argument('--example_n', default=1, type=int, help=None)
parser.add_argument('--verbose', default=0, type=int, help=None)
args = vars(parser.parse_args())
example_n = args['example_n']
verbose = args['verbose']
AVAILABLE_EXAMPLES = [1,2]
SECTION_LABELS = [None,1,1]
PAGES = [None, 223, 225]
T_MAX_LIST = [None,3,9]
try:
print('Example from <NAME>\' textbook section 4.2.3.%s page %s'%(
str(SECTION_LABELS[example_n]),str(PAGES[example_n])))
print('example_n : %s'%(str(example_n)))
except:
print('EXAMPLE NOT FOUND.')
print('The available example numbers for --example_n are %s'%(str(AVAILABLE_EXAMPLES)))
exit()
print("==== SETTOPOLOGY ====")
Nj = 4 # no. of neighbors
J = np.array([ [1,0], [0,1], [-1,0], [0,-1]]) # (X,Y) coordinates of neighbors
assert(J.shape[0]==Nj)
print(J, '\n')
print("==== SETBSG ====")
# the group used was \mathbb{Z}_4
if example_n in [1]:
L = 10
elif example_n in [2]:
L = 20
nBSG = 4
BSG = setBSG(nBSG, Nj)
print('BSG:\n',BSG,'\n')
print("==== SETG0 ====")
age = np.zeros(shape=(L,L))
if example_n in [1]:
G0 = np.array([[0,0,0,0],[3,3,3,3],[0,0,3,3],[0,0,0,0]])
elif example_n in [2]:
G0 = [[0,0,0,0], [2,0,0,0],]
for i in range(3,1+9): G0.append([i,0,i,0])
G0.append([0,0,10,0])
G0 = np.array(G0)
nG0 = G0.shape[0]-1 # no of generators, excluding emtpy generator
alpha = range(nG0+1)
print('G0:\n',G0,'\n')
print('alpha:\n',alpha,'\n')
print("==== SETEXTG0 =====")
GE = setEXTG0(nBSG, G0, Nj, BSG)
print('GE:\n',GE,'\n')
print("==== SETCINIT ====")
CE = np.ones(shape=(L,L))
if example_n in [1]:
CE[4,4] = 5
elif example_n in [2]:
CE[10,4:9] = 37
print('CE:\n',CE.astype(int),'\n')
print('==== DEVELOP ===')
T_MAX = T_MAX_LIST[example_n]
P = 1
if example_n in [1,2]:
split_mode = None
if example_n==2: split_mode = 'split2'
CE, age = develop(T_MAX, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
if example_n == 2:
print('\nAPPLY SURGERY HERE')
CE[2:7,4:9] = 1
CE[7,4:9] = 21
# print(CE.astype(int))
print_in_alphabet(CE, nBSG, ALPH=None)
CE, age = develop(2, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
print('\nAPPLY MORE SURGERY HERE')
CE[4:6,4:9] = 1
CE[9:12,4:9] = 1
CE[8,4:9] = 29
# print(CE.astype(int))
print_in_alphabet(CE, nBSG, ALPH=None)
CE, age = develop(2, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
print('\nLAST PART')
CE = np.ones(shape=(20,20))
CE[4,4:10] = [13,13,14,13,13,13]
CE[5,4:10] = 19
CE[6,4:10] = 21
CE[7,4:10] = 27
CE[8,4:10] = 29
CE[6,8] = 29
CE[8,5] = 13
print_in_alphabet(CE, nBSG, ALPH=None)
print(CE.astype(int))
CE, age = develop(1, Nj, nBSG, L, J, GE, CE, age, P, verbose=verbose,
growth_mode='growth3', split_mode=split_mode)
|
[
"numpy.array",
"numpy.zeros",
"numpy.ones",
"argparse.ArgumentParser"
] |
[((107, 207), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': 'None'}), '(formatter_class=argparse.\n RawDescriptionHelpFormatter, description=None)\n', (130, 207), False, 'import argparse\n'), ((1010, 1054), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [-1, 0], [0, -1]]'], {}), '([[1, 0], [0, 1], [-1, 0], [0, -1]])\n', (1018, 1054), True, 'import numpy as np\n'), ((1394, 1416), 'numpy.zeros', 'np.zeros', ([], {'shape': '(L, L)'}), '(shape=(L, L))\n', (1402, 1416), True, 'import numpy as np\n'), ((1976, 1997), 'numpy.ones', 'np.ones', ([], {'shape': '(L, L)'}), '(shape=(L, L))\n', (1983, 1997), True, 'import numpy as np\n'), ((1454, 1520), 'numpy.array', 'np.array', (['[[0, 0, 0, 0], [3, 3, 3, 3], [0, 0, 3, 3], [0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0], [3, 3, 3, 3], [0, 0, 3, 3], [0, 0, 0, 0]])\n', (1462, 1520), True, 'import numpy as np\n'), ((1665, 1677), 'numpy.array', 'np.array', (['G0'], {}), '(G0)\n', (1673, 1677), True, 'import numpy as np\n'), ((3220, 3243), 'numpy.ones', 'np.ones', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (3227, 3243), True, 'import numpy as np\n')]
|
import logging
from vespid import setup_logger
logger = setup_logger(__name__)
import pandas as pd
import numpy as np
from tqdm import tqdm
def calculate_interdisciplinarity_score(
membership_vectors
):
'''
Given a set of entities and
one vector for each representing the (ordered) strength
of membership of that entity across a set of clusters,
calculate the level of interdisciplinarity for each entity.
NOTE: length of membership_vectors should be the same for
all entities for an accurate calculation.
Parameters
----------
membership_vectors: numpy array of shape (n_samples, n_clusters)
that indicates how strongly each sample/entity belongs to
a cluster (e.g. membership_vectors[0] = [0.1, 0.2, 0.3, 0.4]
would indicate the strongest association for sample 0 with
cluster 3 and the weakest with cluster 0).
Returns
-------
numpy array of float scores of shape (n_samples,) in the range
[0.0, 1.0].
'''
num_clusters = membership_vectors.shape[1]
# (N / N-1) * (1 - max(P)) * (1 - stdev(P))
id_scores = (num_clusters / (num_clusters - 1)) * \
(1 - membership_vectors.max(axis=1)) * \
(1 - membership_vectors.std(axis=1))
# In some instances, the score can go higher than 1.0
# Make sure that doesn't happen but we alert on it
over_max = id_scores[id_scores > 1.0].sum()
if over_max > 0:
logger.warn(f"Found {over_max} instances in which score is above 1.0. "
"Forcing these to be 1.0...")
id_scores[id_scores > 1.0] = 1.0
return id_scores
def interdisciplinarity_from_citation_clusters(
graph,
year,
cluster_attribute='clusterID'
):
'''
Uses Cypher query with Neo4j instance (enriched with paper cluster labels
e.g. from HDBSCAN clustering) to determine how interdisciplinary
papers' references and citations are. Uses a similar scoring
logic as what is used in vespid.models.clustering with
HDBSCAN soft clustering probabilities.
Parameters
----------
graph: Neo4jConnectionHandler object. Used for querying the
graph for citation information.
year: int. Indicates the maximum year of publication of
interest.
cluster_attribute: str. Indicates the node attribute to use
for determining the cluster membership of the node
(e.g. 'cluster_id_2019').
Returns
-------
pandas DataFrame with columns ['paperID', 'id_score'] of
length n_nodes, with id_score being interdisciplinarity
scores of shape (n_nodes,)
'''
def fill_out_vector(cluster_identifiers, cluster_values, num_total_clusters):
'''
Takes a partial membership vector and fills out the missing
elements with zeros, placing the nonzero elements properly.
Parameters
----------
cluster_identifiers: numpy array of ints. Indicates which clusters
map to the values given in ``cluster_values`` (and thus must
be the same length as ``cluster_values``) for the node
in question.
cluster_values: numpy array of float. Indicates the strength
of membership the entity has to each cluster for the node
in question.
num_total_clusters: int. Indicates how many clusters there
are in the total solution. Must be greater than or
equal to the values provided in ``cluster_identifiers``.
Returns
-------
numpy array of shape (num_total_clusters,) representing
the cluster membership strengths/probabilities of the
node.
'''
if len(cluster_identifiers) != len(cluster_values):
raise ValueError("cluster_identifiers and cluster_values "
f"must be of the same length, but got {len(cluster_identifiers)} "
f"and {len(cluster_values)}, resp.")
if num_total_clusters < np.max(cluster_identifiers):
raise ValueError(f"num_total_clusters ({num_total_clusters}) "
"must not be less than the maximum "
f"cluster_identifiers value ({np.max(cluster_identifiers)})")
if len(cluster_identifiers) > len(np.unique(cluster_identifiers)):
raise ValueError("cluster_identifiers contains duplicate values")
# Build out an all-zeros vector of the proper length
cluster_vector = np.zeros(num_total_clusters)
# Fill in the right zeros to reflect cluster membership values
cluster_vector[cluster_identifiers] = cluster_values
return cluster_vector
# Query in the same fashion as what is used to generate BW centrality scores
# Effectively insures that all papers are either published in `year` or
# are referenced by ones published in `year`
# also ignores publications that lack a cluster ID or are noise (clusterID = -1)
query = f"""
MATCH (p:Publication)<-[c:CITED_BY]-(m:Publication)
WHERE c.publicationDate.year = {year}
AND m.publicationDate.year <= {year}
AND p.{cluster_attribute} IS NOT NULL
AND toInteger(p.{cluster_attribute}) > -1
AND m.{cluster_attribute} IS NOT NULL
AND toInteger(m.{cluster_attribute}) > -1
WITH DISTINCT p AS p, COUNT(c) AS NumTotalCitations
MATCH (p)<-[c:CITED_BY]-(m:Publication)
WHERE c.publicationDate.year = {year}
AND m.publicationDate.year <= {year}
AND m.{cluster_attribute} IS NOT NULL
AND toInteger(m.{cluster_attribute}) > -1
WITH p,
NumTotalCitations,
toInteger(m.{cluster_attribute}) AS CitationClusterLabel,
COUNT(m) AS NumCitationsInCluster
RETURN p.id AS paperID,
p.publicationDate.year AS Year,
toInteger(p.{cluster_attribute}) AS PrimaryClusterLabel,
CitationClusterLabel,
toFloat(NumCitationsInCluster) / NumTotalCitations AS FractionalMembership
"""
df = graph.cypher_query_to_dataframe(query, verbose=False)
logger.debug(f"Years covered by network-ID-scoring query are {df['Year'].min()} to {df['Year'].max()}")
# Which papers didn't have a membership value for the cluster they're assigned to?
# AKA which ones failed to have any citations/references from within their own cluster?
df['PrimaryLabelMatchesCitation'] = df['PrimaryClusterLabel'] == df['CitationClusterLabel']
num_zero_primary_membership = \
df['paperID'].nunique() - df.loc[df['PrimaryLabelMatchesCitation'], 'paperID'].nunique()
fraction_zero_primary_membership = round(num_zero_primary_membership / df['paperID'].nunique() * 100, 2)
if num_zero_primary_membership > 0:
logger.warn(f"No citations from host cluster found for "
f"{num_zero_primary_membership} ({fraction_zero_primary_membership}%) papers! "
"This suggests that the clustering solution may not be very good or "
"that the citation network was undersampled")
query = f"""
MATCH (p:Publication)
WHERE p.{cluster_attribute} IS NOT NULL
AND p.publicationDate.year = {year}
RETURN MAX(toInteger(p.{cluster_attribute}))
"""
# cluster labels are zero-indexed, so need +1
num_clusters = graph.cypher_query_to_dataframe(query, verbose=False).iloc[0,0] + 1
tqdm.pandas(desc="Building full cluster membership vectors from citation-based membership per paper")
# Group membership into list for each paper
cluster_vectors = df.groupby('paperID', sort=False).agg(list).progress_apply(
lambda row: fill_out_vector(
row['CitationClusterLabel'],
row['FractionalMembership'],
num_clusters
),
axis=1
)
id_scores = calculate_interdisciplinarity_score(
np.array(cluster_vectors.tolist())
)
output = pd.DataFrame({
'paperID': df['paperID'].unique(),
'scoreInterDNetwork': id_scores
})
#TODO: maybe additional weighting from dendrogram distance/cluster exemplar-exemplar distance?
return output
|
[
"numpy.unique",
"vespid.setup_logger",
"numpy.max",
"numpy.zeros",
"tqdm.tqdm.pandas"
] |
[((56, 78), 'vespid.setup_logger', 'setup_logger', (['__name__'], {}), '(__name__)\n', (68, 78), False, 'from vespid import setup_logger\n'), ((7366, 7477), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {'desc': '"""Building full cluster membership vectors from citation-based membership per paper"""'}), "(desc=\n 'Building full cluster membership vectors from citation-based membership per paper'\n )\n", (7377, 7477), False, 'from tqdm import tqdm\n'), ((4473, 4501), 'numpy.zeros', 'np.zeros', (['num_total_clusters'], {}), '(num_total_clusters)\n', (4481, 4501), True, 'import numpy as np\n'), ((3997, 4024), 'numpy.max', 'np.max', (['cluster_identifiers'], {}), '(cluster_identifiers)\n', (4003, 4024), True, 'import numpy as np\n'), ((4267, 4297), 'numpy.unique', 'np.unique', (['cluster_identifiers'], {}), '(cluster_identifiers)\n', (4276, 4297), True, 'import numpy as np\n'), ((4192, 4219), 'numpy.max', 'np.max', (['cluster_identifiers'], {}), '(cluster_identifiers)\n', (4198, 4219), True, 'import numpy as np\n')]
|
import numpy as np
def Linear_Fit(array_A, array_B):
"""
Returns slope and y-intercept of the line of best fit
"""
array_A = np.array(array_A)
array_B = np.array(array_B)
#Pair arrays then sort them for easier fit
zipped_list = zip(array_A[~np.isnan(array_A)], array_B[~np.isnan(array_B)])
sorted_list = sorted(zipped_list)
sorted_a, sorted_b = zip(*sorted_list)
m, b = np.polyfit(sorted_a, sorted_b, 1)
return m, b
|
[
"numpy.array",
"numpy.isnan",
"numpy.polyfit"
] |
[((142, 159), 'numpy.array', 'np.array', (['array_A'], {}), '(array_A)\n', (150, 159), True, 'import numpy as np\n'), ((174, 191), 'numpy.array', 'np.array', (['array_B'], {}), '(array_B)\n', (182, 191), True, 'import numpy as np\n'), ((421, 454), 'numpy.polyfit', 'np.polyfit', (['sorted_a', 'sorted_b', '(1)'], {}), '(sorted_a, sorted_b, 1)\n', (431, 454), True, 'import numpy as np\n'), ((275, 292), 'numpy.isnan', 'np.isnan', (['array_A'], {}), '(array_A)\n', (283, 292), True, 'import numpy as np\n'), ((304, 321), 'numpy.isnan', 'np.isnan', (['array_B'], {}), '(array_B)\n', (312, 321), True, 'import numpy as np\n')]
|
# OpenCV: Image processing
import cv2
import time
import popupWindow as detectionWindow
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
# numpy: numerical computation
import numpy as np
import core.utils as utils
# Tensorflow: deep learning
import tensorflow as tf
# Allow for GPU memory growth to prevent "Out of memory" errors
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
import sma as sma
# YOLOV3 itself
from core.yolov3 import YOLOv3, decode
def startDetection(window, minConfidence, videoPath):
popUpFlag = False
video_path = videoPath
# Number of classes, one class for each element
num_classes = 80
input_size = 704
min_confidence = minConfidence / 100
# Layer to be used as an entry point into a Network (a graph of layers).
# Tuple with height, width and depth used to reshape arrays.
# This is used for reshaping in Keras.
input_layer = tf.keras.layers.Input([input_size, input_size, 3])
# (TO DO: see how it does it)
feature_maps = YOLOv3(input_layer)
bbox_tensors = []
for i, fm in enumerate(feature_maps):
bbox_tensor = decode(fm, i)
bbox_tensors.append(bbox_tensor)
# Model groups layers into an object with training and inference features.
# input: input_layer
# output: bbox_tensors
model = tf.keras.Model(input_layer, bbox_tensors)
# load weights from file
utils.load_weights(model, "./data/weights/handgun.weights")
# Prints a string summary of the network.
# model.summary()
# Load video from file with openCV
vid = cv2.VideoCapture(video_path)
runFlag = True
while runFlag:
# Get a frame from the video
# Returns a bool (True/False).
# If frame is read correctly, it will be True.
# So you can check end of the video by checking this return value.
return_value, frame = vid.read()
if not return_value:
raise ValueError("No image!")
# thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
# print(thistuple[:2]) => ('apple', 'banana')
# shape holds heigth, width and number of channels
# Gets width and height of the frame
frame_size = frame.shape[:2]
# np.copy(frame) => Return an array copy of the given object.
# Resizes frame to network input size => def image_preporcess(image, target_size, gt_boxes=None):
image_data = utils.image_preporcess(np.copy(frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
# Performs the prediction on the frame (TO DO: see how it does it)
pred_bbox = model.predict_on_batch(image_data)
# Changes tensor shape, similar to transposing a matrix
# href: https://www.tensorflow.org/api_docs/python/tf/reshape
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
# Concatenates tensors along one dimension axis = 0 => axis = y
pred_bbox = tf.concat(pred_bbox, axis=0)
# (TO DO: see how it does it)
bboxes = utils.postprocess_boxes(pred_bbox, frame_size, input_size, min_confidence)
# (TO DO: see how it does it)
bboxes = utils.nms(bboxes, 0.45, method='nms')
# Draws boundingbox in image
# (TO DO: see how it does it)
image = utils.draw_bbox(frame, bboxes)
window.imageDisplay.setPixmap(QtGui.QPixmap(utils.convert_cv_qt(image.image)))
# HERE check if detected class is handgun
if(image.classDetected == 'handgun' and image.calculatedSma >= 0.95):
if (popUpFlag == False):
popUpFlag = True
popUpFlag = callPopUpWindow(window, image.image)
if popUpFlag == "Alarm":
popUpFlag = True
window.title.setText("Alarm triggered - Detection saved to PC")
window.title.setStyleSheet("color : red")
# window.title.setPointSize(25)
window.setStyleSheet("""QMainWindow{border: 6px solid red;}""")
# Breaks while loop on 'q' press
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
def callPopUpWindow(self, detection):
dialog = detectionWindow.DetectionWindow(self)
dialog.setImage(detection)
dialog.show()
if dialog.exec_() == QtWidgets.QDialog.Accepted:
return dialog.returnValue
|
[
"core.utils.load_weights",
"tensorflow.keras.layers.Input",
"core.yolov3.YOLOv3",
"core.utils.draw_bbox",
"numpy.copy",
"popupWindow.DetectionWindow",
"tensorflow.shape",
"tensorflow.config.experimental.set_memory_growth",
"core.yolov3.decode",
"tensorflow.concat",
"core.utils.postprocess_boxes",
"core.utils.nms",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"tensorflow.keras.Model",
"cv2.waitKey",
"core.utils.convert_cv_qt",
"tensorflow.config.experimental.list_physical_devices"
] |
[((494, 545), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (538, 545), True, 'import tensorflow as tf\n'), ((1219, 1269), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['[input_size, input_size, 3]'], {}), '([input_size, input_size, 3])\n', (1240, 1269), True, 'import tensorflow as tf\n'), ((1324, 1343), 'core.yolov3.YOLOv3', 'YOLOv3', (['input_layer'], {}), '(input_layer)\n', (1330, 1343), False, 'from core.yolov3 import YOLOv3, decode\n'), ((1631, 1672), 'tensorflow.keras.Model', 'tf.keras.Model', (['input_layer', 'bbox_tensors'], {}), '(input_layer, bbox_tensors)\n', (1645, 1672), True, 'import tensorflow as tf\n'), ((1707, 1766), 'core.utils.load_weights', 'utils.load_weights', (['model', '"""./data/weights/handgun.weights"""'], {}), "(model, './data/weights/handgun.weights')\n", (1725, 1766), True, 'import core.utils as utils\n'), ((1887, 1915), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (1903, 1915), False, 'import cv2\n'), ((4629, 4666), 'popupWindow.DetectionWindow', 'detectionWindow.DetectionWindow', (['self'], {}), '(self)\n', (4660, 4666), True, 'import popupWindow as detectionWindow\n'), ((1432, 1445), 'core.yolov3.decode', 'decode', (['fm', 'i'], {}), '(fm, i)\n', (1438, 1445), False, 'from core.yolov3 import YOLOv3, decode\n'), ((3326, 3354), 'tensorflow.concat', 'tf.concat', (['pred_bbox'], {'axis': '(0)'}), '(pred_bbox, axis=0)\n', (3335, 3354), True, 'import tensorflow as tf\n'), ((3411, 3485), 'core.utils.postprocess_boxes', 'utils.postprocess_boxes', (['pred_bbox', 'frame_size', 'input_size', 'min_confidence'], {}), '(pred_bbox, frame_size, input_size, min_confidence)\n', (3434, 3485), True, 'import core.utils as utils\n'), ((3542, 3579), 'core.utils.nms', 'utils.nms', (['bboxes', '(0.45)'], {'method': '"""nms"""'}), "(bboxes, 0.45, method='nms')\n", (3551, 3579), True, 'import core.utils as utils\n'), ((3672, 3702), 'core.utils.draw_bbox', 'utils.draw_bbox', (['frame', 'bboxes'], {}), '(frame, bboxes)\n', (3687, 3702), True, 'import core.utils as utils\n'), ((589, 640), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (629, 640), True, 'import tensorflow as tf\n'), ((2779, 2793), 'numpy.copy', 'np.copy', (['frame'], {}), '(frame)\n', (2786, 2793), True, 'import numpy as np\n'), ((4530, 4553), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4551, 4553), False, 'import cv2\n'), ((3760, 3792), 'core.utils.convert_cv_qt', 'utils.convert_cv_qt', (['image.image'], {}), '(image.image)\n', (3779, 3792), True, 'import core.utils as utils\n'), ((4482, 4496), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4493, 4496), False, 'import cv2\n'), ((3196, 3207), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3204, 3207), True, 'import tensorflow as tf\n')]
|
from __future__ import print_function
import os
import keras
from keras.layers import Dense,Flatten,Conv2D,MaxPooling2D,Activation,Input,Concatenate,Dropout,GlobalAveragePooling2D
from keras.models import Model
import time
from keras.datasets import cifar10
from keras.optimizers import SGD
from keras.utils import get_file
from keras.preprocessing import image
import numpy as np
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
#SqueezeNet
#backend=tf channels_last (rows,cols,channels)
def fire_module(input,squeeze_filters,expand_filters):
squeeze=Conv2D(squeeze_filters,
kernel_size=(1,1),
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
data_format='channels_last'
)(input)
relu_squeeze=Activation('relu')(squeeze)
expand1=Conv2D(expand_filters,
kernel_size=(1,1),
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
data_format='channels_last'
)(relu_squeeze)
relu_expand1=Activation('relu')(expand1)
expand2=Conv2D(expand_filters,
kernel_size=(3,3),
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
data_format='channels_last'
)(relu_squeeze)
relu_expand2=Activation('relu')(expand2)
merge=Concatenate(axis=3)([relu_expand1,relu_expand2])
output=merge
return output
def SqueezeNet(input_shape,num_classes,weight=None):
input=Input(shape=input_shape)
conv_1=Conv2D(96,
kernel_size=(7,7),
strides=2,
padding='same',
kernel_initializer='glorot_uniform'
)(input)
pool_1=MaxPooling2D(pool_size=(3,3),
strides=2)(conv_1)
fire_2=fire_module(pool_1,16,64)
fire_3=fire_module(fire_2,16,64)
fire_4=fire_module(fire_3,32,128)
pool_4=MaxPooling2D(pool_size=(3,3),
strides=2)(fire_4)
fire_5=fire_module(pool_4,32,128)
fire_6=fire_module(fire_5,48,192)
fire_7=fire_module(fire_6,48,192)
fire_8=fire_module(fire_7,64,256)
pool_8=MaxPooling2D(pool_size=(3,3),
strides=2)(fire_8)
fire_9=fire_module(pool_8,64,256)
drop=Dropout(0.5)(fire_9)
conv_10=Conv2D(num_classes,
kernel_size=(1,1),
strides=1,
padding='same',
kernel_initializer='glorot_uniform'
)(drop)
relu_11=Activation('relu')(conv_10)
avgpool=GlobalAveragePooling2D()(relu_11)
flatten=Flatten()(relu_11)
dense=Dense(64)(flatten)
relu_dense=Activation('relu')(dense)
dense=Dense(2)(relu_dense)
softmax1=Activation('softmax')(dense)
softmax=Activation('softmax')(avgpool)
print(softmax)
output=softmax
model=Model(input=input,output=output)
return model
def main():
t0=time.time()
batch_size = 32
num_classes = 10
epochs = 20
data_augmentation = True
print('start')
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape)
x_train_n = np.zeros((x_train.shape[0], 224, 224, 3),dtype = 'float16')
x_test_n = np.zeros((x_test.shape[0], 224, 224, 3),dtype = 'float16')
for i in range(x_train.shape[0]):
if i%5000==0:
print(i)
data=x_train[i]
img=image.array_to_img(data)
img2=img.resize((224,224))
data2=image.img_to_array(img2)
x_train_n[i,:]=data2
for i in range(x_test.shape[0]):
if i%2000==0:
print(i)
data=x_test[i]
img=image.array_to_img(data)
img2=img.resize((224,224))
data2=image.img_to_array(img2)
x_test_n[i,:]=data2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train_n /= 255.0
x_test_n /= 255.0
model=SqueezeNet((224,224,3),10)
model.summary()
print('wow')
print(time.time()-t0)
sgd=SGD(lr=0.01,decay=0.0002,momentum=0.9)
model.compile(optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train_n, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test_n, y_test),
shuffle=True)
if __name__=='__main__':
main()
|
[
"keras.preprocessing.image.img_to_array",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"keras.datasets.cifar10.load_data",
"keras.layers.MaxPooling2D",
"keras.layers.Concatenate",
"keras.utils.to_categorical",
"numpy.zeros",
"keras.layers.Input",
"keras.optimizers.SGD",
"keras.models.Model",
"keras.layers.Activation",
"keras.layers.Dropout",
"keras.layers.Dense",
"keras.layers.GlobalAveragePooling2D",
"time.time",
"keras.preprocessing.image.array_to_img"
] |
[((1747, 1771), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1752, 1771), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3171, 3204), 'keras.models.Model', 'Model', ([], {'input': 'input', 'output': 'output'}), '(input=input, output=output)\n', (3176, 3204), False, 'from keras.models import Model\n'), ((3262, 3273), 'time.time', 'time.time', ([], {}), '()\n', (3271, 3273), False, 'import time\n'), ((3434, 3453), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (3451, 3453), False, 'from keras.datasets import cifar10\n'), ((3499, 3557), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0], 224, 224, 3)'], {'dtype': '"""float16"""'}), "((x_train.shape[0], 224, 224, 3), dtype='float16')\n", (3507, 3557), True, 'import numpy as np\n'), ((3575, 3632), 'numpy.zeros', 'np.zeros', (['(x_test.shape[0], 224, 224, 3)'], {'dtype': '"""float16"""'}), "((x_test.shape[0], 224, 224, 3), dtype='float16')\n", (3583, 3632), True, 'import numpy as np\n'), ((4156, 4204), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (4182, 4204), False, 'import keras\n'), ((4219, 4266), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (4245, 4266), False, 'import keras\n'), ((4431, 4471), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(0.0002)', 'momentum': '(0.9)'}), '(lr=0.01, decay=0.0002, momentum=0.9)\n', (4434, 4471), False, 'from keras.optimizers import SGD\n'), ((615, 755), 'keras.layers.Conv2D', 'Conv2D', (['squeeze_filters'], {'kernel_size': '(1, 1)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""', 'data_format': '"""channels_last"""'}), "(squeeze_filters, kernel_size=(1, 1), strides=1, padding='same',\n kernel_initializer='glorot_uniform', data_format='channels_last')\n", (621, 755), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((897, 915), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (907, 915), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((938, 1077), 'keras.layers.Conv2D', 'Conv2D', (['expand_filters'], {'kernel_size': '(1, 1)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""', 'data_format': '"""channels_last"""'}), "(expand_filters, kernel_size=(1, 1), strides=1, padding='same',\n kernel_initializer='glorot_uniform', data_format='channels_last')\n", (944, 1077), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1226, 1244), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1236, 1244), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1267, 1406), 'keras.layers.Conv2D', 'Conv2D', (['expand_filters'], {'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""', 'data_format': '"""channels_last"""'}), "(expand_filters, kernel_size=(3, 3), strides=1, padding='same',\n kernel_initializer='glorot_uniform', data_format='channels_last')\n", (1273, 1406), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1555, 1573), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1565, 1573), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1594, 1613), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(3)'}), '(axis=3)\n', (1605, 1613), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1784, 1882), 'keras.layers.Conv2D', 'Conv2D', (['(96)'], {'kernel_size': '(7, 7)', 'strides': '(2)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""'}), "(96, kernel_size=(7, 7), strides=2, padding='same',\n kernel_initializer='glorot_uniform')\n", (1790, 1882), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((1993, 2034), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)'}), '(pool_size=(3, 3), strides=2)\n', (2005, 2034), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2194, 2235), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)'}), '(pool_size=(3, 3), strides=2)\n', (2206, 2235), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2436, 2477), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)'}), '(pool_size=(3, 3), strides=2)\n', (2448, 2477), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2559, 2571), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2566, 2571), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2593, 2700), 'keras.layers.Conv2D', 'Conv2D', (['num_classes'], {'kernel_size': '(1, 1)', 'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""glorot_uniform"""'}), "(num_classes, kernel_size=(1, 1), strides=1, padding='same',\n kernel_initializer='glorot_uniform')\n", (2599, 2700), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2816, 2834), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2826, 2834), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2857, 2881), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (2879, 2881), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2908, 2917), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2915, 2917), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2938, 2947), 'keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (2943, 2947), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((2973, 2991), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2983, 2991), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3010, 3018), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (3015, 3018), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3045, 3066), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3055, 3066), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3089, 3110), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3099, 3110), False, 'from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Activation, Input, Concatenate, Dropout, GlobalAveragePooling2D\n'), ((3758, 3782), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['data'], {}), '(data)\n', (3776, 3782), False, 'from keras.preprocessing import image\n'), ((3834, 3858), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img2'], {}), '(img2)\n', (3852, 3858), False, 'from keras.preprocessing import image\n'), ((4009, 4033), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['data'], {}), '(data)\n', (4027, 4033), False, 'from keras.preprocessing import image\n'), ((4085, 4109), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img2'], {}), '(img2)\n', (4103, 4109), False, 'from keras.preprocessing import image\n'), ((4406, 4417), 'time.time', 'time.time', ([], {}), '()\n', (4415, 4417), False, 'import time\n')]
|
import numpy as np
from scipy.stats import binom
# import modules needed for logging
import logging
import os
logger = logging.getLogger(__name__) # module logger
def cummin(x):
"""A python implementation of the cummin function in R"""
for i in range(1, len(x)):
if x[i-1] < x[i]:
x[i] = x[i-1]
return x
def bh_fdr(pval):
"""A python implementation of the Benjamani-Hochberg FDR method.
This code should always give precisely the same answer as using
p.adjust(pval, method="BH") in R.
Parameters
----------
pval : list or array
list/array of p-values
Returns
-------
pval_adj : np.array
adjusted p-values according the benjamani-hochberg method
"""
pval_array = np.array(pval)
sorted_order = np.argsort(pval_array)
original_order = np.argsort(sorted_order)
pval_array = pval_array[sorted_order]
# calculate the needed alpha
n = float(len(pval))
pval_adj = np.zeros(int(n))
i = np.arange(1, n+1, dtype=float)[::-1] # largest to smallest
pval_adj = np.minimum(1, cummin(n/i * pval_array[::-1]))[::-1]
return pval_adj[original_order]
def frequency_test(mut_of_interest,
total_mut,
residues_of_interest,
residues_at_risk):
"""Perform a binomial test on the frequency of missense mutations within
given pre-defined residues within the gene.
Parameters
----------
mut_of_interest : {list, np.array}
number of mutations that are deemed "of interest"
total_mut : {list, np.array}
total number of mutations
residues_of_interest : {list, np.array}
contains the number of residues of interest for a mutation.
residues_at_risk : {list, np.array}
contains the number of residues at risk for a mutation.
Returns
-------
p_values : np.array
p-value for each gene for binomial test
"""
# initialize input
p_values = np.zeros(len(mut_of_interest))
mut = np.asarray(mut_of_interest)
N = np.asarray(total_mut)
residues_of_interest = np.asarray(residues_of_interest)
residues_at_risk = np.asarray(residues_at_risk, dtype=float)
residues_at_risk[residues_at_risk==0] = np.nan # fill zeros to avoid divide by zero
# calculate the background probability of mutation occurring at
# the residues of interest
P = residues_of_interest.astype(float) / residues_at_risk
# iterate through each gene to calculate p-value
logger.info('Calculating binomial test p-values . . .')
for k in range(len(mut)):
if not np.isnan(P[k]):
p_val = binomial_test(mut[k], N[k], P[k])
else:
# catch case for nan element
p_val = 1.0
p_values[k] = p_val
logger.info('Finished calculating binomial test p-values.')
return p_values
def binomial_test(n, N, P):
"""Perform binomial test on the observed n being higher than expected.
Specifically, N residues are at risk and of those there are n mutations
occurred at the Np residues of interest. Given the background probability of
a mutation at a specific residue, the p-value is calculated as the probability
of observing n or greater mutations. Since N is large and n is small,
it is computationally more efficient to take 1 - Pr(i<=n-1).
Parameters
----------
n : int
number of observed mutations
N : int
number of residues at risk
P : float
background probability that a mutation would occur at a single residue
Returns
-------
pval : np.array
p-value for binomial test
"""
if n <= 0:
return 1.0
pval = binom.sf(n-1, N, P)
return pval
|
[
"logging.getLogger",
"numpy.asarray",
"scipy.stats.binom.sf",
"numpy.argsort",
"numpy.array",
"numpy.isnan",
"numpy.arange"
] |
[((121, 148), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (138, 148), False, 'import logging\n'), ((763, 777), 'numpy.array', 'np.array', (['pval'], {}), '(pval)\n', (771, 777), True, 'import numpy as np\n'), ((797, 819), 'numpy.argsort', 'np.argsort', (['pval_array'], {}), '(pval_array)\n', (807, 819), True, 'import numpy as np\n'), ((841, 865), 'numpy.argsort', 'np.argsort', (['sorted_order'], {}), '(sorted_order)\n', (851, 865), True, 'import numpy as np\n'), ((2037, 2064), 'numpy.asarray', 'np.asarray', (['mut_of_interest'], {}), '(mut_of_interest)\n', (2047, 2064), True, 'import numpy as np\n'), ((2073, 2094), 'numpy.asarray', 'np.asarray', (['total_mut'], {}), '(total_mut)\n', (2083, 2094), True, 'import numpy as np\n'), ((2122, 2154), 'numpy.asarray', 'np.asarray', (['residues_of_interest'], {}), '(residues_of_interest)\n', (2132, 2154), True, 'import numpy as np\n'), ((2178, 2219), 'numpy.asarray', 'np.asarray', (['residues_at_risk'], {'dtype': 'float'}), '(residues_at_risk, dtype=float)\n', (2188, 2219), True, 'import numpy as np\n'), ((3728, 3749), 'scipy.stats.binom.sf', 'binom.sf', (['(n - 1)', 'N', 'P'], {}), '(n - 1, N, P)\n', (3736, 3749), False, 'from scipy.stats import binom\n'), ((1007, 1039), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {'dtype': 'float'}), '(1, n + 1, dtype=float)\n', (1016, 1039), True, 'import numpy as np\n'), ((2630, 2644), 'numpy.isnan', 'np.isnan', (['P[k]'], {}), '(P[k])\n', (2638, 2644), True, 'import numpy as np\n')]
|
import io
from flask import (
Blueprint,
render_template,
abort,
current_app,
make_response
)
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
client = Blueprint('client', __name__, template_folder='templates', static_url_path='/static')
@client.route('/<int:points>', methods=['GET'])
def home(points):
title = current_app.config['TITLE']
plot = plot_points(points)
return render_template('index.html', title=title, plot=plot)
def plot_points(points):
"""Generate a plot with a varying number of randomly generated points
Args:
points (int): a number of points to plot
Returns: An svg plot with <points> data points
"""
# data for plotting
data = np.random
data = np.random.rand(points, 2)
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.scatter(data[:,0], data[:,1])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(f'There are {points} data points!')
ax.grid(True)
img = io.StringIO()
fig.savefig(img, format='svg')
#clip off the xml headers from the image
svg_img = '<svg' + img.getvalue().split('<svg')[1]
return svg_img
|
[
"flask.render_template",
"numpy.random.rand",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"io.StringIO",
"flask.Blueprint"
] |
[((261, 351), 'flask.Blueprint', 'Blueprint', (['"""client"""', '__name__'], {'template_folder': '"""templates"""', 'static_url_path': '"""/static"""'}), "('client', __name__, template_folder='templates', static_url_path=\n '/static')\n", (270, 351), False, 'from flask import Blueprint, render_template, abort, current_app, make_response\n'), ((496, 549), 'flask.render_template', 'render_template', (['"""index.html"""'], {'title': 'title', 'plot': 'plot'}), "('index.html', title=title, plot=plot)\n", (511, 549), False, 'from flask import Blueprint, render_template, abort, current_app, make_response\n'), ((824, 849), 'numpy.random.rand', 'np.random.rand', (['points', '(2)'], {}), '(points, 2)\n', (838, 849), True, 'import numpy as np\n'), ((861, 869), 'matplotlib.figure.Figure', 'Figure', ([], {}), '()\n', (867, 869), False, 'from matplotlib.figure import Figure\n'), ((874, 891), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (886, 891), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((1090, 1103), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1101, 1103), False, 'import io\n')]
|
"""
Requirements:
-----------
pyngrok==5.0.5
mlflow==1.15.0
pandas==1.2.3
numpy==1.19.3
scikit-learn==0.24.1
Examples of usege can be found in the url below:
https://nbviewer.jupyter.org/github/abreukuse/ml_utilities/blob/master/examples/experiments_management.ipynb
"""
import os
import mlflow
from pyngrok import ngrok
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
def generate_mlflow_ui():
"""
Creates a remote mlflow user interface with ngrok.
"""
get_ipython().system_raw("mlflow ui --port 5000 &")
ngrok.kill()
ngrok_tunnel = ngrok.connect(addr="5000", proto="http", bind_tls=True)
print("MLflow Tracking UI:", ngrok_tunnel.public_url, end='\n\n')
def __log_metrics(metrics,
metric_name,
y_train,
y_test,
y_estimate_train,
y_estimate_test):
"""
Record a particular metric score in mlflow.
It will be called from the __logging function.
---------------------------------------
Parameters
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
metrics_name: String representing the name of the metric.
y_train and y_test: A numpy array or pandas series with the true train and test target values.
y_estimate_train and y_estimate_test: A numpy array or pandas series with the predicted train and test target values.
Return four variables representing the metrics names and values.
"""
# metric name
score_name_train = f'train_{metric_name}'
score_name_test = f'test_{metric_name}'
# metric score
score_train = metrics[metric_name](y_train, y_estimate_train)
score_test = metrics[metric_name](y_test, y_estimate_test)
if metric_name == 'rmse':
score_train = np.sqrt(score_train)
score_test = np.sqrt(score_test)
# metric log
mlflow.log_metric(score_name_train, score_train)
mlflow.log_metric(score_name_test, score_test)
return score_name_train, score_train, score_name_test, score_test
def __logging(metrics,
y_train,
y_test,
y_pred_train,
y_pred_test,
y_proba_train=None,
y_proba_test=None):
"""
Creates a dictionary with all the metrics from train and test.
It will be called from the simple_split function.
--------------------------------------------------------
Parameters
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
y_train and y_test: The true target values from train and test.
y_pred_train and y_pred_test: Array with the estimate results from the algorithms.
y_proba_train and y_proba_test: An array with the probability results from the algorithms.
Returns a dictionary with metrics names and metrics results.
"""
metrics_scores = {}
log_metrics_results = None
for metric_name in metrics.keys():
args = [metrics, metric_name, y_train, y_test]
if metric_name not in ['auc', 'log_loss']:
log_metrics_results = __log_metrics(*args, y_pred_train, y_pred_test)
else:
log_metrics_results = __log_metrics(*args, y_proba_train, y_proba_test)
# Unpacking
score_name_train = log_metrics_results[0]
score_train = log_metrics_results[1]
score_name_test = log_metrics_results[2]
score_test = log_metrics_results[3]
# Store the scores in a dictionary
metrics_scores.setdefault(score_name_train, score_train)
metrics_scores.setdefault(score_name_test, score_test)
return metrics_scores
def data_artifacts(X_train):
"""
Creates and stores data artifacts like a sample of the data, the features and indices.
---------------------------------------------------
Parameter
X_train: The pandas data frame right before it enters the algorithm in the last but one step the pipeline.
"""
os.makedirs('artifacts_temp', exist_ok=True)
features = list(X_train.columns)
indices = list(X_train.index)
with open('artifacts_temp/features.txt', 'w') as features_txt:
features_txt.write(str(features))
with open('artifacts_temp/indices.txt', 'w') as indices_txt:
indices_txt.write(str(indices))
X_train.head(10).to_csv('artifacts_temp/X_train_sample.csv', index=False)
mlflow.log_artifacts('artifacts_temp')
def simple_split(*, task,
pipeline,
X,
y,
test_size,
metrics,
random_state,
inverse=None):
"""
Split the data in train and test sets.
-------------------------------------
Parameters
task: String indicating if it is a 'classification' or 'regression' task.
pipeline: The sklearn pipeline to run.
X: Dataframe with all the variables.
y: Target.
test_size: Size of the test data. It can be a float representing a percentage or an interger.
metrics: Sictionary containing the metrics names as keys and the metrics fucnctions as values.
random_state: Random number generator for the split in data.
inverse: A function with the inverse transformation applied in the target.
Returns a dictionary with metrics names and metrics results.
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
# Get the last but one state of the training data.
if len(pipeline) > 1:
X_train = pipeline[:-1].fit_transform(X_train)
pipeline[-1].fit(X_train, y_train)
else:
pipeline.fit(X_train, y_train)
# Collect data artifacts
data_artifacts(X_train)
if task == 'classification':
y_pred_train, y_pred_test, y_proba_train, y_proba_test = None, None, None, None
allowed_metrics = ['precision','recall','f1_score','accuracy','auc','log_loss']
if not set(metrics.keys()).issubset(allowed_metrics):
raise ValueError(f'Only these metrics are valid: {allowed_metrics}.')
if any(item in allowed_metrics[-2:] for item in metrics.keys()):
y_proba_train = pipeline[-1].predict_proba(X_train)[:,1]
y_proba_test = pipeline.predict_proba(X_test)[:,1]
if any(item in allowed_metrics[:-2] for item in metrics.keys()):
y_pred_train = pipeline[-1].predict(X_train)
y_pred_test = pipeline.predict(X_test)
metrics_scores = __logging(metrics,
y_train,
y_test,
y_pred_train,
y_pred_test,
y_proba_train,
y_proba_test)
elif task == 'regression':
y_pred_train = pipeline[-1].predict(X_train)
y_pred_test = pipeline.predict(X_test)
if inverse:
targets = [y_train, y_test, y_pred_train, y_pred_test]
y_train, y_test, y_pred_train, y_pred_test = [inverse(target) for target in targets]
allowed_metrics = ['rmse','mae','mape','msle','r2']
if not set(metrics.keys()).issubset(allowed_metrics):
raise ValueError(f'Only these metrics are valid: {allowed_metrics}.')
metrics_scores = __logging(metrics,
y_train,
y_test,
y_pred_train,
y_pred_test)
return metrics_scores
def cross_validation(*, task,
pipeline,
X,
y,
cv_method,
metrics,
inverse=None):
"""
Performs cross validation.
-------------------------
Parameters
pipeline: The sklearn pipeline to run.
X: Dataframe with all the variables.
y: target.
cv_method: When 'cross_validation' is chose. A callble from sklearn e.g {KFold, StratifiedKFold}
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
inverse: A function with the inverse transformation applied in the target.
Returns a dictionary with metrics names and metrics results.
"""
metrics_scores = {}
X_train = None
splits = cv_method
for metric_name, metric in metrics.items():
for train_index, test_index in splits.split(X, y):
# split
X_train, y_train = X.loc[train_index], y.loc[train_index]
X_test, y_test = X.loc[test_index], y.loc[test_index]
# training
if len(pipeline) > 1:
X_train = pipeline[:-1].fit_transform(X_train)
pipeline[-1].fit(X_train, y_train)
else:
pipeline.fit(X_train, y_train)
# predict
if metric_name in ['auc', 'log_loss']:
y_pred_train = pipeline[-1].predict_proba(X_train)[:,1]
y_pred_test = pipeline.predict_proba(X_test)[:,1]
else:
y_pred_train = pipeline[-1].predict(X_train)
y_pred_test = pipeline.predict(X_test)
# inverse tranformation for target if needed
if task == 'regression' and inverse:
targets = [y_train, y_test, y_pred_train, y_pred_test]
y_train, y_test, y_pred_train, y_pred_test = [inverse(target) for target in targets]
# compute score
score_train = metrics[metric_name](y_train, y_pred_train)
score_test = metrics[metric_name](y_test, y_pred_test)
if metric_name == 'rmse':
score_train = np.sqrt(score_train)
score_test = np.sqrt(score_test)
metrics_scores.setdefault(f'train_{metric_name}', []).append(score_train)
metrics_scores.setdefault(f'test_{metric_name}', []).append(score_test)
# log
for metric_name, scores in metrics_scores.items():
mlflow.log_metric(metric_name, np.mean(scores))
metrics_scores[metric_name] = np.mean(scores)
# Collect data artifacts from the last fold
data_artifacts(X_train)
return metrics_scores
def experiment_manager(task,
pipeline, X, y,
runs,
validation,
hyperparameters,
metrics,
random_state=0,
remote_ui=False,
**kwargs):
"""
This function runs experiments and records the results.
-----------------------------------------------------
Parameters
task: String indicating if it is a 'classification' or 'regression' task.
pipeline: The sklearn pipeline to run.
X: Dataframe with all the variables.
y: Target.
validation: 'simple_split' or 'cross_validation'.
hyperparameters: A function returning a dictionary with all the hyperparameters names as keys
and range values to be tested in each algorithm as values.
metrics: Dictionary containing the metrics names as keys and the metrics fucnctions as values.
Metrics names allowed are: For classification {'precision', 'recall', 'f1_score', 'accuracy', 'auc', 'log_loss'}.
For regression {'rmse', 'mae', 'mape', 'msle', 'r2'}.
random_state: Random number generator for the split in data.
remote_ui: Interact with mlflow inerface remotely or locally. Set 'True' if you are using google colab or other remote platform.
available kwargs: run_label -> For optional labelling in the run name.
test_size -> When 'simple_split' is chose. Float for the size of the test set.
cv_method -> When 'cross_validation' is chose. A callble from sklearn e.g {KFold, StratifiedKFold}
inverse -> A function with the inverse transformation applied in the target.
"""
experiment_name = pipeline[-1].__class__.__name__
mlflow.set_experiment(experiment_name=experiment_name)
experiment = mlflow.get_experiment_by_name(experiment_name)
print(f"Experiment Name: {experiment.name}")
print(f"Experiment_id: {experiment.experiment_id}", end='\n\n')
for run in range(runs):
optional_run_label = kwargs.get('run_label') if kwargs.get('run_label') != None else ''
with mlflow.start_run(run_name=f'Run: {run+1}{optional_run_label}'):
# log hyperpatameters
for hyperpatameter_name, hyperpatameter in hyperparameters().items():
mlflow.log_param(hyperpatameter_name.split('__')[1], hyperpatameter)
# training
pipeline.set_params(**hyperparameters())
# simple split
if validation == 'simple_split':
mlflow.set_tag('random_state_split', random_state)
mlflow.set_tag('test_size', kwargs['test_size'])
metrics_scores = simple_split(task=task,
pipeline=pipeline,
X=X,
y=y,
test_size=kwargs['test_size'],
metrics=metrics,
random_state=random_state,
inverse=kwargs.get('inverse'))
# cross validation
elif validation == 'cross_validation':
mlflow.set_tag('cv', kwargs['cv_method'])
metrics_scores = cross_validation(task=task,
pipeline=pipeline,
X=X,
y=y,
cv_method=kwargs['cv_method'],
metrics=metrics,
inverse=kwargs.get('inverse'))
# Print results
print(f'Run {run+1}', end='\n\n')
print('HYPERPARAMETERS')
for key, value in hyperparameters().items():
print(f'{key[key.find("__")+2:]}: {value}')
print()
print('SCORES')
for key, value in metrics_scores.items():
print(f'{key}: {np.round(value, 3)}')
print()
# mlflow user interface
if remote_ui == True:
return generate_mlflow_ui()
elif remote_ui == False:
print('Type "mlflow ui" in your terminal in order to interact with mlflow user interface.', end='\n\n')
|
[
"numpy.mean",
"mlflow.set_tag",
"numpy.sqrt",
"os.makedirs",
"pyngrok.ngrok.kill",
"sklearn.model_selection.train_test_split",
"mlflow.log_metric",
"mlflow.set_experiment",
"mlflow.get_experiment_by_name",
"mlflow.log_artifacts",
"pyngrok.ngrok.connect",
"mlflow.start_run",
"numpy.round"
] |
[((574, 586), 'pyngrok.ngrok.kill', 'ngrok.kill', ([], {}), '()\n', (584, 586), False, 'from pyngrok import ngrok\n'), ((606, 661), 'pyngrok.ngrok.connect', 'ngrok.connect', ([], {'addr': '"""5000"""', 'proto': '"""http"""', 'bind_tls': '(True)'}), "(addr='5000', proto='http', bind_tls=True)\n", (619, 661), False, 'from pyngrok import ngrok\n'), ((1942, 1990), 'mlflow.log_metric', 'mlflow.log_metric', (['score_name_train', 'score_train'], {}), '(score_name_train, score_train)\n', (1959, 1990), False, 'import mlflow\n'), ((1995, 2041), 'mlflow.log_metric', 'mlflow.log_metric', (['score_name_test', 'score_test'], {}), '(score_name_test, score_test)\n', (2012, 2041), False, 'import mlflow\n'), ((4069, 4113), 'os.makedirs', 'os.makedirs', (['"""artifacts_temp"""'], {'exist_ok': '(True)'}), "('artifacts_temp', exist_ok=True)\n", (4080, 4113), False, 'import os\n'), ((4510, 4548), 'mlflow.log_artifacts', 'mlflow.log_artifacts', (['"""artifacts_temp"""'], {}), "('artifacts_temp')\n", (4530, 4548), False, 'import mlflow\n'), ((5528, 5598), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, test_size=test_size, random_state=random_state)\n', (5544, 5598), False, 'from sklearn.model_selection import train_test_split\n'), ((12318, 12372), 'mlflow.set_experiment', 'mlflow.set_experiment', ([], {'experiment_name': 'experiment_name'}), '(experiment_name=experiment_name)\n', (12339, 12372), False, 'import mlflow\n'), ((12390, 12436), 'mlflow.get_experiment_by_name', 'mlflow.get_experiment_by_name', (['experiment_name'], {}), '(experiment_name)\n', (12419, 12436), False, 'import mlflow\n'), ((1858, 1878), 'numpy.sqrt', 'np.sqrt', (['score_train'], {}), '(score_train)\n', (1865, 1878), True, 'import numpy as np\n'), ((1900, 1919), 'numpy.sqrt', 'np.sqrt', (['score_test'], {}), '(score_test)\n', (1907, 1919), True, 'import numpy as np\n'), ((10356, 10371), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (10363, 10371), True, 'import numpy as np\n'), ((10301, 10316), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (10308, 10316), True, 'import numpy as np\n'), ((12693, 12757), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': 'f"""Run: {run + 1}{optional_run_label}"""'}), "(run_name=f'Run: {run + 1}{optional_run_label}')\n", (12709, 12757), False, 'import mlflow\n'), ((9955, 9975), 'numpy.sqrt', 'np.sqrt', (['score_train'], {}), '(score_train)\n', (9962, 9975), True, 'import numpy as np\n'), ((10005, 10024), 'numpy.sqrt', 'np.sqrt', (['score_test'], {}), '(score_test)\n', (10012, 10024), True, 'import numpy as np\n'), ((13137, 13187), 'mlflow.set_tag', 'mlflow.set_tag', (['"""random_state_split"""', 'random_state'], {}), "('random_state_split', random_state)\n", (13151, 13187), False, 'import mlflow\n'), ((13204, 13252), 'mlflow.set_tag', 'mlflow.set_tag', (['"""test_size"""', "kwargs['test_size']"], {}), "('test_size', kwargs['test_size'])\n", (13218, 13252), False, 'import mlflow\n'), ((13872, 13913), 'mlflow.set_tag', 'mlflow.set_tag', (['"""cv"""', "kwargs['cv_method']"], {}), "('cv', kwargs['cv_method'])\n", (13886, 13913), False, 'import mlflow\n'), ((14713, 14731), 'numpy.round', 'np.round', (['value', '(3)'], {}), '(value, 3)\n', (14721, 14731), True, 'import numpy as np\n')]
|
#//////////////#####///////////////
#
# ANU u6325688 <NAME>
# Supervisor: Dr.<NAME>
#//////////////#####///////////////
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.utils.data
import numpy as np
from GAIL.Discriminator import Discriminator1D
from GAIL.Generator import Generator1D
from GAIL.PPO import PPO
from commons.DataInfo import DataInfo
import gym
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
cudnn.benchmark = True
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class GAIL():
def __init__(self,dataInfo:DataInfo, resultPath)-> None:
self.learnRate = 0.0005
self.entropyBeta = 0.001
self.lossCriterion = nn.BCELoss()
self.dataInfo = dataInfo
self.resultPath = resultPath
self.generator = None
self.generatorOptim = None
self.discriminator = None
self.discriminatorOptim = None
self.datatype = 0
self.lastActions = []
self.env = gym.make(dataInfo.gameName)
self.ppo = None
self.ppoExp = None
#Graphs
self.rwdCounter = []
self.genCounter = []
self.disCounter = []
self.entCounter = []
self.enableOnPolicy = True
def setUpGail(self):
self.generator = Generator1D(self.dataInfo).to(device)
self.generatorOptim = torch.optim.Adam(self.generator.parameters(), lr=self.learnRate)
self.discriminator = Discriminator1D(self.dataInfo).to(device)
self.discriminatorOptim = torch.optim.Adam(self.discriminator.parameters(), lr=self.learnRate)
self.ppoExp = PPO(self.generator,self.learnRate)
def getAction(self,state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.generator(state).cpu().data.numpy().flatten()
def makeDisInput(self, state, action):
output = action.view(action.shape[0],1)
output = output.type(torch.FloatTensor).to(device)
return torch.cat((state,output),1)
def getGraph(self):
if len(self.rwdCounter) > 0:
plt.plot(range(len(self.rwdCounter)), self.rwdCounter, linestyle='-', marker="X")
plt.xlabel("Iteration")
plt.ylabel("Rewards")
plt.title("GAIL for {}-{} AverageReward={}[{},{}]".format(self.dataInfo.gameName, "LocState", \
str(sum(self.rwdCounter) / len(self.rwdCounter)), \
str(min(self.rwdCounter)),
str(max(self.rwdCounter))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainRwd.png")
plt.close("all")
plt.plot(range(len(self.genCounter)), self.genCounter, linestyle='-')
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.title("GAIL-Generator Loss for {}-{}[{},{}]".format(self.dataInfo.gameName, \
"LocState", \
str(round(min(self.genCounter).item(), 5)), \
str(round(max(self.genCounter).item(), 5))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainGenLoss.png")
plt.close("all")
plt.plot(range(len(self.disCounter)), self.disCounter, linestyle='-')
plt.xlabel("Batch")
plt.ylabel("Loss")
plt.title("GAIL-Discriminator Loss for {}-{}[{},{}]".format(self.dataInfo.gameName, \
"LocState",
str(round(min(self.disCounter).item(), 5)), \
str(round(max(self.disCounter).item(), 5))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainDisLoss.png")
plt.close("all")
plt.plot(range(len(self.entCounter)), self.entCounter, linestyle='-')
plt.xlabel("Batch")
plt.ylabel("Entropy")
plt.title("GAIL Entropy for {}-{}[{},{}]".format(self.dataInfo.gameName, "LocState", \
str(round(min(self.entCounter).item(), 5)), \
str(round(max(self.entCounter).item(), 5))))
plt.savefig(self.resultPath + "/" + str(self.enableOnPolicy)+"LoctrainEntropy.png")
plt.close("all")
def updateModel(self):
for batchIndex in range(len(self.dataInfo.expertState)):
#read experts' state
batch = self.dataInfo.expertState[batchIndex].size
exp_action = np.zeros((batch, 1))
exp_reward = np.zeros((batch,1))
exp_done = np.zeros((batch,1)) #asume all "not done"
exp_done = (exp_done==0) #Return False for all
exp_state = np.zeros((batch, self.dataInfo.locateShape)) #Location
for j in range(batch):
exp_state[j] = self.dataInfo.expertLocation[batchIndex][j] #Location
exp_action[j] = self.dataInfo.expertAction[batchIndex][j]
exp_state = normalize(exp_state)
exp_state = (torch.from_numpy(exp_state)).type(torch.FloatTensor).to(device)
# exp_state = torch.unsqueeze(exp_state, 0)
exp_action = (torch.from_numpy(exp_action)).type(torch.FloatTensor).to(device)
print("Batch: {}\t generating {} fake data...".format(str(batchIndex), str(batch)))
#Generate action
fake_actionDis, fake_action, fake_entroP = self.generator(exp_state)
exp_score = (self.generator.criticScore).detach()
# Initialise Discriminator
self.discriminatorOptim.zero_grad()
#Train Discriminator with fake(s,a) & expert(s,a)
detach_fake_action = fake_action.detach()
fake_input = self.makeDisInput(exp_state, detach_fake_action)
exp_input = self.makeDisInput(exp_state, exp_action)
print("Calculating loss...")
fake_label = torch.full((batch, 1), 0, device=device)
exp_label = torch.full((batch, 1), 1, device=device)
fake_loss = self.discriminator(fake_input)
fake_loss = self.lossCriterion(fake_loss, fake_label)
exp_loss = self.discriminator(exp_input)
exp_loss = self.lossCriterion(exp_loss, exp_label)
#Update Discriminator based on loss gradient
loss = (fake_loss+exp_loss)-self.entropyBeta*fake_entroP.detach().mean()
loss.backward()
self.discriminatorOptim.step()
#Get PPO Loss
print("PPO....")
exp_state = (Variable(exp_state).data).cpu().numpy() #convert to numpy
exp_action = (Variable(exp_action).data).cpu().numpy()
exp_score = (Variable(exp_score).data).cpu().numpy()
self.ppoExp = PPO(self.generator, self.generatorOptim)
self.ppoExp.importExpertData(exp_state,exp_action,exp_reward,exp_score,exp_done,fake_actionDis)
state, generatorLoss, entropy = self.ppoExp.optimiseGenerator1D()
if torch.isnan(entropy) or loss==0:
break
self.generator.load_state_dict(state)
self.genCounter.append(generatorLoss)
self.disCounter.append(loss)
self.entCounter.append(entropy)
print("--DisLoss {}-- --GenLoss {} --Entropy {}".format(str(loss.detach()), \
str(generatorLoss), str(entropy)))
del self.ppoExp
def train(self, numIteration, enableOnPolicy):
self.enableOnPolicy = str(enableOnPolicy)
for i in range(numIteration):
print("-----------------------Iteration {}------------------------------".format(str(i)))
# GAIL
self.dataInfo.shuffle()
self.dataInfo.sampleData()
self.updateModel()
self.ppo = PPO(self.generator, self.generatorOptim)
self.ppo.tryEnvironment1D()
self.rwdCounter.append(self.ppo.totalReward)
if enableOnPolicy == True:
#PPO
state, loss, entropy = self.ppo.optimiseGenerator1D()
if torch.isnan(entropy) or loss==0:
del self.ppo
continue
else:
self.generator.load_state_dict(state)
del self.ppo
self.getGraph()
def save(self, path, type):
torch.save(self.generator.state_dict(), '{}/{}_generator.pth'.format(path,type))
torch.save(self.discriminator.state_dict(), '{}/{}_discriminator.pth'.format(path,type))
def load(self, path, type):
self.generator.load_state_dict(torch.load('{}/{}_generator.pth'.format(path,type),map_location=map_location))
self.discriminator.load_state_dict(torch.load('{}/{}_discriminator.pth'.format(path,type),map_location=map_location))
|
[
"matplotlib.pyplot.ylabel",
"torch.full",
"matplotlib.pyplot.xlabel",
"GAIL.Generator.Generator1D",
"torch.from_numpy",
"matplotlib.pyplot.close",
"torch.nn.BCELoss",
"torch.cuda.is_available",
"numpy.zeros",
"GAIL.PPO.PPO",
"GAIL.Discriminator.Discriminator1D",
"sklearn.preprocessing.normalize",
"torch.autograd.Variable",
"torch.isnan",
"gym.make",
"torch.cat"
] |
[((598, 623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (621, 623), False, 'import torch\n'), ((741, 766), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (764, 766), False, 'import torch\n'), ((950, 962), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (960, 962), True, 'import torch.nn as nn\n'), ((1252, 1279), 'gym.make', 'gym.make', (['dataInfo.gameName'], {}), '(dataInfo.gameName)\n', (1260, 1279), False, 'import gym\n'), ((1882, 1917), 'GAIL.PPO.PPO', 'PPO', (['self.generator', 'self.learnRate'], {}), '(self.generator, self.learnRate)\n', (1885, 1917), False, 'from GAIL.PPO import PPO\n'), ((2248, 2277), 'torch.cat', 'torch.cat', (['(state, output)', '(1)'], {}), '((state, output), 1)\n', (2257, 2277), False, 'import torch\n'), ((3135, 3154), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (3145, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3181), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3173, 3181), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3685), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3678, 3685), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3792), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (3783, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3801, 3819), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (3811, 3819), True, 'import matplotlib.pyplot as plt\n'), ((4321, 4337), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4330, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4425, 4444), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (4435, 4444), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4474), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Entropy"""'], {}), "('Entropy')\n", (4463, 4474), True, 'import matplotlib.pyplot as plt\n'), ((4875, 4891), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4884, 4891), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2467), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (2454, 2467), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rewards"""'], {}), "('Rewards')\n", (2490, 2501), True, 'import matplotlib.pyplot as plt\n'), ((3031, 3047), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3040, 3047), True, 'import matplotlib.pyplot as plt\n'), ((5107, 5127), 'numpy.zeros', 'np.zeros', (['(batch, 1)'], {}), '((batch, 1))\n', (5115, 5127), True, 'import numpy as np\n'), ((5153, 5173), 'numpy.zeros', 'np.zeros', (['(batch, 1)'], {}), '((batch, 1))\n', (5161, 5173), True, 'import numpy as np\n'), ((5196, 5216), 'numpy.zeros', 'np.zeros', (['(batch, 1)'], {}), '((batch, 1))\n', (5204, 5216), True, 'import numpy as np\n'), ((5322, 5366), 'numpy.zeros', 'np.zeros', (['(batch, self.dataInfo.locateShape)'], {}), '((batch, self.dataInfo.locateShape))\n', (5330, 5366), True, 'import numpy as np\n'), ((5597, 5617), 'sklearn.preprocessing.normalize', 'normalize', (['exp_state'], {}), '(exp_state)\n', (5606, 5617), False, 'from sklearn.preprocessing import normalize\n'), ((6533, 6573), 'torch.full', 'torch.full', (['(batch, 1)', '(0)'], {'device': 'device'}), '((batch, 1), 0, device=device)\n', (6543, 6573), False, 'import torch\n'), ((6598, 6638), 'torch.full', 'torch.full', (['(batch, 1)', '(1)'], {'device': 'device'}), '((batch, 1), 1, device=device)\n', (6608, 6638), False, 'import torch\n'), ((7387, 7427), 'GAIL.PPO.PPO', 'PPO', (['self.generator', 'self.generatorOptim'], {}), '(self.generator, self.generatorOptim)\n', (7390, 7427), False, 'from GAIL.PPO import PPO\n'), ((8481, 8521), 'GAIL.PPO.PPO', 'PPO', (['self.generator', 'self.generatorOptim'], {}), '(self.generator, self.generatorOptim)\n', (8484, 8521), False, 'from GAIL.PPO import PPO\n'), ((1551, 1577), 'GAIL.Generator.Generator1D', 'Generator1D', (['self.dataInfo'], {}), '(self.dataInfo)\n', (1562, 1577), False, 'from GAIL.Generator import Generator1D\n'), ((1714, 1744), 'GAIL.Discriminator.Discriminator1D', 'Discriminator1D', (['self.dataInfo'], {}), '(self.dataInfo)\n', (1729, 1744), False, 'from GAIL.Discriminator import Discriminator1D\n'), ((7629, 7649), 'torch.isnan', 'torch.isnan', (['entropy'], {}), '(entropy)\n', (7640, 7649), False, 'import torch\n'), ((8770, 8790), 'torch.isnan', 'torch.isnan', (['entropy'], {}), '(entropy)\n', (8781, 8790), False, 'import torch\n'), ((5643, 5670), 'torch.from_numpy', 'torch.from_numpy', (['exp_state'], {}), '(exp_state)\n', (5659, 5670), False, 'import torch\n'), ((5788, 5816), 'torch.from_numpy', 'torch.from_numpy', (['exp_action'], {}), '(exp_action)\n', (5804, 5816), False, 'import torch\n'), ((7171, 7190), 'torch.autograd.Variable', 'Variable', (['exp_state'], {}), '(exp_state)\n', (7179, 7190), False, 'from torch.autograd import Variable\n'), ((7255, 7275), 'torch.autograd.Variable', 'Variable', (['exp_action'], {}), '(exp_action)\n', (7263, 7275), False, 'from torch.autograd import Variable\n'), ((7321, 7340), 'torch.autograd.Variable', 'Variable', (['exp_score'], {}), '(exp_score)\n', (7329, 7340), False, 'from torch.autograd import Variable\n')]
|
from __future__ import print_function, division
from black import out
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.utils import class_weight
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, ignore_index=255, reduction='mean'):
super(CrossEntropyLoss2d, self).__init__()
self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, output, target):
loss = self.CE(output, target)
return loss
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=None):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index is not None and self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(reduce=False, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1-pt)**self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
"""
====================
Focal Loss
code reference: https://github.com/clcarwin/focal_loss_pytorch
====================
"""
# class FocalLoss(nn.Module):
# def __init__(self, gamma=0, alpha=None, size_average=True):
# super(FocalLoss, self).__init__()
# self.gamma = gamma
# self.alpha = alpha
# if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
# if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
# self.size_average = size_average
# def forward(self, input, target):
# if input.dim()>2:
# input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
# input = input.transpose(1,2) # N,C,H*W => N,H*W,C
# input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
# target = target.view(-1,1)
# logpt = F.log_softmax(input)
# logpt = logpt.gather(1,target)
# logpt = logpt.view(-1)
# pt = Variable(logpt.data.exp())
# if self.alpha is not None:
# if self.alpha.type()!=input.data.type():
# self.alpha = self.alpha.type_as(input.data)
# at = self.alpha.gather(0,target.data.view(-1))
# logpt = logpt * Variable(at)
# loss = -1 * (1-pt)**self.gamma * logpt
# if self.size_average: return loss.mean()
# else: return loss.sum()
class CE_DiceLoss(nn.Module):
def __init__(self, smooth=1, reduction='mean', ignore_index=None, weight=None):
super(CE_DiceLoss, self).__init__()
self.smooth = smooth
self.dice = DiceLoss()
if ignore_index is not None:
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)
else:
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction)
def forward(self, output, target):
CE_loss = self.cross_entropy(output, target)
dice_loss = self.dice(output, target)
return CE_loss + dice_loss
class LovaszSoftmax(nn.Module):
def __init__(self, classes='all', per_image=True, ignore_index=None):
super(LovaszSoftmax, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
return loss
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = - input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
<NAME> 2018 ESAT-PSI KU Leuven (MIT License)
https://github.com/bermanmaxim/LovaszSoftmax/blob/master/pytorch/lovasz_losses.py
"""
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / float(union)
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image
return 100 * np.array(ious)
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
### data processing ###
def toOneHot(mask, nb_class=10):
"""
Convert label image to onehot encoding
Args:
mask (Image): mask containing pixels labels
nb_class (int): number of class
"""
categorical = torch.from_numpy(np.array(mask)).long()
categorical = F.one_hot(categorical, nb_class)
return categorical.permute(2,0,1).float()
### losses & accuracy ###
def dice_loss(yhat, ytrue, epsilon=1e-6):
"""
Computes a soft Dice Loss
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
epsilon (Float): smoothing value to avoid division by 0
output:
DL value with `mean` reduction
"""
# compute Dice components
intersection = torch.sum(yhat * ytrue, (1,2,3))
cardinal = torch.sum(yhat + ytrue, (1,2,3))
return torch.mean(1. - (2 * intersection / (cardinal + epsilon)))
def tversky_index(yhat, ytrue, alpha=0.3, beta=0.7, epsilon=1e-6):
"""
Computes Tversky index
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
alpha (Float): weight for False positive
beta (Float): weight for False negative
`` alpha and beta control the magnitude of penalties and should sum to 1``
epsilon (Float): smoothing value to avoid division by 0
output:
tversky index value
"""
TP = torch.sum(yhat * ytrue)
FP = torch.sum((1. - ytrue) * yhat)
FN = torch.sum((1. - yhat) * ytrue)
return TP/(TP + alpha * FP + beta * FN + epsilon)
def tversky_loss(yhat, ytrue):
"""
Computes tversky loss given tversky index
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
output:
tversky loss value with `mean` reduction
"""
return torch.mean(1 - tversky_index(yhat, ytrue))
def tversky_focal_loss(yhat, ytrue, alpha=0.7, beta=0.3, gamma=0.75):
"""
Computes tversky focal loss for highly umbalanced data
https://arxiv.org/pdf/1810.07842.pdf
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
alpha (Float): weight for False positive
beta (Float): weight for False negative
`` alpha and beta control the magnitude of penalties and should sum to 1``
gamma (Float): focal parameter
``control the balance between easy background and hard ROI training examples``
output:
tversky focal loss value with `mean` reduction
"""
return torch.mean(torch.pow(1 - tversky_index(yhat, ytrue, alpha, beta), gamma))
def focal_loss(yhat, ytrue, alpha=0.75, gamma=2):
"""
Computes α-balanced focal loss from FAIR
https://arxiv.org/pdf/1708.02002v2.pdf
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
alpha (Float): weight to balance Cross entropy value
gamma (Float): focal parameter
output:
loss value with `mean` reduction
"""
# compute the actual focal loss
focal = -alpha * torch.pow(1. - yhat, gamma) * torch.log(yhat)
f_loss = torch.sum(ytrue * focal, dim=1)
return torch.mean(f_loss)
def iou_accuracy(yhat, ytrue, threshold=0.5, epsilon=1e-6):
"""
Computes Intersection over Union metric
Args:
yhat (Tensor): predicted masks
ytrue (Tensor): targets masks
threshold (Float): threshold for pixel classification
epsilon (Float): smoothing parameter for numerical stability
output:
iou value with `mean` reduction
"""
intersection = ((yhat>threshold).long() & ytrue.long()).float().sum((1,2,3))
union = ((yhat>threshold).long() | ytrue.long()).float().sum((1,2,3))
return torch.mean(intersection/(union + epsilon)).item()
def make_one_hot(labels, classes):
one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)
target = one_hot.scatter_(1, labels.data, 1)
return target
def get_weights(target):
t_np = target.view(-1).data.cpu().numpy()
classes, counts = np.unique(t_np, return_counts=True)
# cls_w = np.median(counts) / counts
cls_w = class_weight.compute_class_weight(class_weight='balanced', classes=classes, y=t_np)
weights = np.ones(7)
weights[classes] = cls_w
return torch.from_numpy(weights).float().cuda()
|
[
"torch.sort",
"torch.log",
"numpy.unique",
"numpy.ones",
"torch.nn.CrossEntropyLoss",
"torch.mean",
"sklearn.utils.class_weight.compute_class_weight",
"torch.exp",
"itertools.filterfalse",
"torch.pow",
"numpy.array",
"torch.from_numpy",
"torch.sum",
"torch.nn.functional.one_hot",
"torch.nn.functional.relu",
"torch.autograd.Variable",
"torch.nn.functional.softmax"
] |
[((8712, 8754), 'torch.sort', 'torch.sort', (['errors'], {'dim': '(0)', 'descending': '(True)'}), '(errors, dim=0, descending=True)\n', (8722, 8754), False, 'import torch\n'), ((13562, 13594), 'torch.nn.functional.one_hot', 'F.one_hot', (['categorical', 'nb_class'], {}), '(categorical, nb_class)\n', (13571, 13594), True, 'import torch.nn.functional as F\n'), ((14010, 14044), 'torch.sum', 'torch.sum', (['(yhat * ytrue)', '(1, 2, 3)'], {}), '(yhat * ytrue, (1, 2, 3))\n', (14019, 14044), False, 'import torch\n'), ((14058, 14092), 'torch.sum', 'torch.sum', (['(yhat + ytrue)', '(1, 2, 3)'], {}), '(yhat + ytrue, (1, 2, 3))\n', (14067, 14092), False, 'import torch\n'), ((14103, 14160), 'torch.mean', 'torch.mean', (['(1.0 - 2 * intersection / (cardinal + epsilon))'], {}), '(1.0 - 2 * intersection / (cardinal + epsilon))\n', (14113, 14160), False, 'import torch\n'), ((14665, 14688), 'torch.sum', 'torch.sum', (['(yhat * ytrue)'], {}), '(yhat * ytrue)\n', (14674, 14688), False, 'import torch\n'), ((14698, 14729), 'torch.sum', 'torch.sum', (['((1.0 - ytrue) * yhat)'], {}), '((1.0 - ytrue) * yhat)\n', (14707, 14729), False, 'import torch\n'), ((14738, 14769), 'torch.sum', 'torch.sum', (['((1.0 - yhat) * ytrue)'], {}), '((1.0 - yhat) * ytrue)\n', (14747, 14769), False, 'import torch\n'), ((16394, 16425), 'torch.sum', 'torch.sum', (['(ytrue * focal)'], {'dim': '(1)'}), '(ytrue * focal, dim=1)\n', (16403, 16425), False, 'import torch\n'), ((16438, 16456), 'torch.mean', 'torch.mean', (['f_loss'], {}), '(f_loss)\n', (16448, 16456), False, 'import torch\n'), ((17385, 17420), 'numpy.unique', 'np.unique', (['t_np'], {'return_counts': '(True)'}), '(t_np, return_counts=True)\n', (17394, 17420), True, 'import numpy as np\n'), ((17474, 17561), 'sklearn.utils.class_weight.compute_class_weight', 'class_weight.compute_class_weight', ([], {'class_weight': '"""balanced"""', 'classes': 'classes', 'y': 't_np'}), "(class_weight='balanced', classes=classes,\n y=t_np)\n", (17507, 17561), False, 'from sklearn.utils import class_weight\n'), ((17573, 17583), 'numpy.ones', 'np.ones', (['(7)'], {}), '(7)\n', (17580, 17583), True, 'import numpy as np\n'), ((1000, 1087), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight', 'ignore_index': 'ignore_index', 'reduction': 'reduction'}), '(weight=weight, ignore_index=ignore_index, reduction=\n reduction)\n', (1019, 1087), True, 'import torch.nn as nn\n'), ((1741, 1765), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (1750, 1765), True, 'import torch.nn.functional as F\n'), ((2302, 2349), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduce': '(False)', 'weight': 'alpha'}), '(reduce=False, weight=alpha)\n', (2321, 2349), True, 'import torch.nn as nn\n'), ((2448, 2465), 'torch.exp', 'torch.exp', (['(-logpt)'], {}), '(-logpt)\n', (2457, 2465), False, 'import torch\n'), ((4994, 5018), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (5003, 5018), True, 'import torch.nn.functional as F\n'), ((7493, 7507), 'numpy.array', 'np.array', (['ious'], {}), '(ious)\n', (7501, 7507), True, 'import numpy as np\n'), ((8860, 8881), 'torch.nn.functional.relu', 'F.relu', (['errors_sorted'], {}), '(errors_sorted)\n', (8866, 8881), True, 'import torch.nn.functional as F\n'), ((8883, 8897), 'torch.autograd.Variable', 'Variable', (['grad'], {}), '(grad)\n', (8891, 8897), False, 'from torch.autograd import Variable\n'), ((11779, 11817), 'torch.sort', 'torch.sort', (['errors', '(0)'], {'descending': '(True)'}), '(errors, 0, descending=True)\n', (11789, 11817), False, 'import torch\n'), ((12677, 12693), 'torch.autograd.Variable', 'Variable', (['labels'], {}), '(labels)\n', (12685, 12693), False, 'from torch.autograd import Variable\n'), ((12975, 12997), 'itertools.filterfalse', 'ifilterfalse', (['isnan', 'l'], {}), '(isnan, l)\n', (12987, 12997), True, 'from itertools import filterfalse as ifilterfalse\n'), ((16365, 16380), 'torch.log', 'torch.log', (['yhat'], {}), '(yhat)\n', (16374, 16380), False, 'import torch\n'), ((4309, 4396), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight', 'reduction': 'reduction', 'ignore_index': 'ignore_index'}), '(weight=weight, reduction=reduction, ignore_index=\n ignore_index)\n', (4328, 4396), True, 'import torch.nn as nn\n'), ((4439, 4494), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'weight', 'reduction': 'reduction'}), '(weight=weight, reduction=reduction)\n', (4458, 4494), True, 'import torch.nn as nn\n'), ((8669, 8684), 'torch.autograd.Variable', 'Variable', (['signs'], {}), '(signs)\n', (8677, 8684), False, 'from torch.autograd import Variable\n'), ((16335, 16363), 'torch.pow', 'torch.pow', (['(1.0 - yhat)', 'gamma'], {}), '(1.0 - yhat, gamma)\n', (16344, 16363), False, 'import torch\n'), ((17015, 17059), 'torch.mean', 'torch.mean', (['(intersection / (union + epsilon))'], {}), '(intersection / (union + epsilon))\n', (17025, 17059), False, 'import torch\n'), ((13521, 13535), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (13529, 13535), True, 'import numpy as np\n'), ((11716, 11728), 'torch.autograd.Variable', 'Variable', (['fg'], {}), '(fg)\n', (11724, 11728), False, 'from torch.autograd import Variable\n'), ((17624, 17649), 'torch.from_numpy', 'torch.from_numpy', (['weights'], {}), '(weights)\n', (17640, 17649), False, 'import torch\n')]
|
# imports
import numpy as np
from rubin_sim.maf.metrics.baseMetric import BaseMetric
# constants
__all__ = ["UseMetric"]
# exception classes
# interface functions
# classes
class UseMetric(BaseMetric): # pylint: disable=too-few-public-methods
"""Metric to classify visits by type of visits"""
def __init__(self, noteCol="note", **kwargs):
self.noteCol = noteCol
super().__init__(col=[noteCol], metricDtype="object", **kwargs)
def run(self, dataSlice, slicePoint=None): # pylint: disable=invalid-name
"""Run the metric.
Parameters
----------
dataSlice : numpy.NDarray
Values passed to metric by the slicer, which the metric will use to calculate
metric values at each slicePoint.
slicePoint : Dict
Dictionary of slicePoint metadata passed to each metric.
E.g. the ra/dec of the healpix pixel or opsim fieldId.
Returns
-------
str
use at each slicePoint.
"""
use_name = None
visible_bands = ("u", "g", "r")
notes = dataSlice[self.noteCol]
if len(notes.shape) == 0:
note = notes
else:
note = notes[0]
assert np.all(notes == note)
note_elems = note.replace(":", ", ").split(", ")
if note_elems[0] == "greedy":
use_name = note_elems[0]
if note_elems[0] == "DD":
use_name = note_elems[1]
if note_elems[0] == "blob":
use_name = "wide with only IR"
for band in visible_bands:
if band in note_elems[1]:
use_name = "wide with u, g, or r"
assert use_name is not None, f"Unrecognized note: {note}"
return use_name
# internal functions & classes
|
[
"numpy.all"
] |
[((1251, 1272), 'numpy.all', 'np.all', (['(notes == note)'], {}), '(notes == note)\n', (1257, 1272), True, 'import numpy as np\n')]
|
import time
import numpy as np
from tqdm import tqdm
from sklearn.decomposition import MiniBatchDictionaryLearning
from .metrics import distance_between_atoms
from .visualizations import show_dictionary_atoms_img
from .plots import plot_reconstruction_error_and_dictionary_distances
def loader(X, batch_size):
for j, i in enumerate(range(0, len(X), batch_size)):
try:
yield j, X[i: i + batch_size]
except IndexError:
yield j, X[i:]
def study_dictionary_convergence_and_reconstruction_for_images(
X: np.ndarray, X_test: np.ndarray, n_atoms=10, batch_size=30, data_nature_changes=[], compute_atoms_distance_every=10, color=True, atom_h=32, atom_w=32, display_intermediate=True):
"""
X: array of shape (num_samples, feature_size)
X_test: array of shape (num_samples, feature_size)
"""
# Initializations
times, reconstruction_errors = [], []
dictionary_atoms_distances, batches_seen = [], []
data_nature_changes_batches = [
size // batch_size for size in data_nature_changes]
data_nature_changes_time = []
# Define an online dictionary learning
clf = MiniBatchDictionaryLearning(n_components=n_atoms,
batch_size=batch_size,
transform_algorithm='lasso_lars',
verbose=False)
former_atoms = np.zeros((n_atoms, X_test.shape[1]))
start = time.time()
# For every batch of image, compute a partial fit of the dictionary
for i, sample in tqdm(loader(X, batch_size), total=X.shape[0] // batch_size):
clf.partial_fit(sample)
# We then measure the reconstruction error
reconstruction_error = np.array([np.linalg.norm(
test_x - clf.transform(test_x).dot(clf.components_)) for test_x in X_test])
reconstruction_errors.append(reconstruction_error)
times.append(time.time() - start)
# We compute the data nature change time if there is any
nb_of_current_changes = len(data_nature_changes_time)
if nb_of_current_changes < len(data_nature_changes):
# Data nature change at current batch
if data_nature_changes[nb_of_current_changes] <= i * batch_size:
data_nature_changes_time.append(time.time() - start)
atoms_distance_computation_cond = i % compute_atoms_distance_every == compute_atoms_distance_every - 1
if atoms_distance_computation_cond:
# We occasionally compute the atoms distances between iterations
distance_from_prev_dict = distance_between_atoms(
former_atoms, clf.components_)
former_atoms = np.copy(clf.components_)
dictionary_atoms_distances.append(distance_from_prev_dict)
batches_seen.append(i)
# We optionally display the learnt atoms
if display_intermediate:
print("=" * 20, "\n", "Batch", i)
print("Distance between current and previous atoms:",
distance_from_prev_dict)
show_dictionary_atoms_img(
clf, color=color, atom_h=atom_h, atom_w=atom_w)
# We eventually plot the reconstruction error and the evolution of atoms distances
reconstruction_errors = np.array(reconstruction_errors).T
dictionary_atoms_distances = np.array(dictionary_atoms_distances)
plot_reconstruction_error_and_dictionary_distances(
times, reconstruction_errors, batches_seen, dictionary_atoms_distances, compute_atoms_distance_every, data_nature_changes_time, data_nature_changes_batches)
|
[
"numpy.copy",
"sklearn.decomposition.MiniBatchDictionaryLearning",
"numpy.array",
"numpy.zeros",
"time.time"
] |
[((1165, 1290), 'sklearn.decomposition.MiniBatchDictionaryLearning', 'MiniBatchDictionaryLearning', ([], {'n_components': 'n_atoms', 'batch_size': 'batch_size', 'transform_algorithm': '"""lasso_lars"""', 'verbose': '(False)'}), "(n_components=n_atoms, batch_size=batch_size,\n transform_algorithm='lasso_lars', verbose=False)\n", (1192, 1290), False, 'from sklearn.decomposition import MiniBatchDictionaryLearning\n'), ((1421, 1457), 'numpy.zeros', 'np.zeros', (['(n_atoms, X_test.shape[1])'], {}), '((n_atoms, X_test.shape[1]))\n', (1429, 1457), True, 'import numpy as np\n'), ((1471, 1482), 'time.time', 'time.time', ([], {}), '()\n', (1480, 1482), False, 'import time\n'), ((3405, 3441), 'numpy.array', 'np.array', (['dictionary_atoms_distances'], {}), '(dictionary_atoms_distances)\n', (3413, 3441), True, 'import numpy as np\n'), ((3338, 3369), 'numpy.array', 'np.array', (['reconstruction_errors'], {}), '(reconstruction_errors)\n', (3346, 3369), True, 'import numpy as np\n'), ((2721, 2745), 'numpy.copy', 'np.copy', (['clf.components_'], {}), '(clf.components_)\n', (2728, 2745), True, 'import numpy as np\n'), ((1946, 1957), 'time.time', 'time.time', ([], {}), '()\n', (1955, 1957), False, 'import time\n'), ((2331, 2342), 'time.time', 'time.time', ([], {}), '()\n', (2340, 2342), False, 'import time\n')]
|
import sys
import os
# Add basedir to path
script_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(script_dir + "/../")
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from data.utils import read_pickle_from_file
from dscribe.descriptors import ACSF
from dscribe.core.system import System
from data.data import MolecularGraph, load_csv, make_graph
from time import time
DATA_DIR = script_dir + "/../../data/"
SPLIT_DIR = script_dir + "./split/"
GRAPH_DIR = script_dir + "./graph/"
class MolecularGraphDataset(Dataset):
def __init__(self, split,
csv,
mode,
augment=None):
"""Set Dataset for molecular graph
Arguments:
split {str} -- numpy splot
csv {str} -- 'train' or 'test'
mode {str} -- train
Keyword Arguments:
augment {[type]} -- [description] (default: {None})
"""
self.split = split
self.csv = csv
self.mode = mode
self.augment = augment
self.df = pd.read_csv(DATA_DIR + '/%s.csv'%csv)
if split is not None:
self.id = np.load(split,allow_pickle=True)
else:
self.id = self.df.molecule_name.unique()
def __str__(self):
string = ''\
+ '\tmode = %s\n'%self.mode \
+ '\tsplit = %s\n'%self.split \
+ '\tcsv = %s\n'%self.csv \
+ '\tlen = %d\n'%len(self)
return string
def __len__(self):
return len(self.id)
def __getitem__(self, index):
molecule_name = self.id[index]
graph_file = f'{GRAPH_DIR}/{molecule_name}.pickle'
graph = read_pickle_from_file(graph_file)
if 0:
# 1JHC, 2JHC, 3JHC, 1JHN, 2JHN, 3JHN, 2JHH, 3JHH
mask = np.zeros(len(graph['coupling'].type),np.bool)
for t in ['2JHC' , '2JHN', '2JHH']:
mask += (graph['coupling'].type == COUPLING_TYPE.index(t))
graph['coupling'].id = graph['coupling'].id[mask]
graph['coupling'].contribution = graph['coupling'].contribution[mask]
graph['coupling'].index = graph['coupling'].index[mask]
graph['coupling'].type = graph['coupling'].type[mask]
graph['coupling'].value = graph['coupling'].value[mask]
return graph
def _collate_fn(batch):
graphs = []
targets = []
batch_size = len(batch)
offset = 0
coupling_value = []
coupling_atom_index = []
coupling_type_index = []
coupling_batch_index = []
infor = []
for b in range(batch_size):
graph = batch[b]
graphs.append(graph)
num_coupling = len(graph['coupling'].value)
coupling_value.append(graph['coupling'].value)
coupling_atom_index.append(graph['coupling'].index+offset)
coupling_type_index.append (graph['coupling'].type)
coupling_batch_index.append(np.array([b]*num_coupling))
infor.append(graph['coupling'].id)
offset += len(graph['atom'])
train_input = MoleculaGraph().get_flat_data(graphs)
gnode = []
for i, j in enumerate(train_input[0]):
gnode += [i] * len(j)
gbond = []
for i, j in enumerate(train_input[1]):
gbond += [i] * len(j)
gnode = torch.from_numpy(np.ravel(gnode))
gbond = torch.from_numpy(np.ravel(gbond))
node = torch.from_numpy(np.concatenate(train_input[0])).float()
edge = torch.from_numpy(np.concatenate(train_input[1])).float()
state = torch.from_numpy(np.concatenate(train_input[2])).float()
index1_temp = train_input[3]
index2_temp = train_input[4]
index1 = []
index2 = []
offset_ind = 0
for ind1, ind2 in zip(index1_temp, index2_temp):
index1 += [i + offset_ind for i in ind1]
index2 += [i + offset_ind for i in ind2]
offset_ind += (max(ind1) + 1)
index1 = torch.from_numpy(np.ravel(index1)).long()
index2 = torch.from_numpy(np.ravel(index2)).long()
coupling_value = torch.from_numpy(np.concatenate(coupling_value)).float()
targets = coupling_value
coupling_index = np.concatenate([
np.concatenate(coupling_atom_index),
np.concatenate(coupling_type_index).reshape(-1,1),
np.concatenate(coupling_batch_index).reshape(-1,1),
],-1)
coupling_index = torch.from_numpy(coupling_index).long()
inputs = [node, edge, state, index1, index2, gnode, gbond, coupling_index, infor]
return inputs, targets
if __name__ == "__main__":
dataset = MolecularGraphDataset(split='debug_split_by_mol.1000.npy',
mode = 'train',
csv = 'train',
)
train_dl = DataLoader(dataset, batch_size=16,
shuffle=False, collate_fn=_collate_fn,
num_workers=0)
# print(dataset[0])
start = time()
for inputs, targets in train_dl:
print(time() - start)
start = time()
pass
print('qsdf')
|
[
"data.utils.read_pickle_from_file",
"pandas.read_csv",
"torch.from_numpy",
"os.path.dirname",
"numpy.array",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"numpy.ravel",
"sys.path.append",
"numpy.load",
"time.time"
] |
[((100, 136), 'sys.path.append', 'sys.path.append', (["(script_dir + '/../')"], {}), "(script_dir + '/../')\n", (115, 136), False, 'import sys\n'), ((73, 98), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (88, 98), False, 'import os\n'), ((4884, 4976), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(16)', 'shuffle': '(False)', 'collate_fn': '_collate_fn', 'num_workers': '(0)'}), '(dataset, batch_size=16, shuffle=False, collate_fn=_collate_fn,\n num_workers=0)\n', (4894, 4976), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5062, 5068), 'time.time', 'time', ([], {}), '()\n', (5066, 5068), False, 'from time import time\n'), ((1152, 1191), 'pandas.read_csv', 'pd.read_csv', (["(DATA_DIR + '/%s.csv' % csv)"], {}), "(DATA_DIR + '/%s.csv' % csv)\n", (1163, 1191), True, 'import pandas as pd\n'), ((1769, 1802), 'data.utils.read_pickle_from_file', 'read_pickle_from_file', (['graph_file'], {}), '(graph_file)\n', (1790, 1802), False, 'from data.utils import read_pickle_from_file\n'), ((3446, 3461), 'numpy.ravel', 'np.ravel', (['gnode'], {}), '(gnode)\n', (3454, 3461), True, 'import numpy as np\n'), ((3492, 3507), 'numpy.ravel', 'np.ravel', (['gbond'], {}), '(gbond)\n', (3500, 3507), True, 'import numpy as np\n'), ((5152, 5158), 'time.time', 'time', ([], {}), '()\n', (5156, 5158), False, 'from time import time\n'), ((1242, 1275), 'numpy.load', 'np.load', (['split'], {'allow_pickle': '(True)'}), '(split, allow_pickle=True)\n', (1249, 1275), True, 'import numpy as np\n'), ((3071, 3099), 'numpy.array', 'np.array', (['([b] * num_coupling)'], {}), '([b] * num_coupling)\n', (3079, 3099), True, 'import numpy as np\n'), ((4286, 4321), 'numpy.concatenate', 'np.concatenate', (['coupling_atom_index'], {}), '(coupling_atom_index)\n', (4300, 4321), True, 'import numpy as np\n'), ((4473, 4505), 'torch.from_numpy', 'torch.from_numpy', (['coupling_index'], {}), '(coupling_index)\n', (4489, 4505), False, 'import torch\n'), ((3537, 3567), 'numpy.concatenate', 'np.concatenate', (['train_input[0]'], {}), '(train_input[0])\n', (3551, 3567), True, 'import numpy as np\n'), ((3605, 3635), 'numpy.concatenate', 'np.concatenate', (['train_input[1]'], {}), '(train_input[1])\n', (3619, 3635), True, 'import numpy as np\n'), ((3674, 3704), 'numpy.concatenate', 'np.concatenate', (['train_input[2]'], {}), '(train_input[2])\n', (3688, 3704), True, 'import numpy as np\n'), ((4051, 4067), 'numpy.ravel', 'np.ravel', (['index1'], {}), '(index1)\n', (4059, 4067), True, 'import numpy as np\n'), ((4106, 4122), 'numpy.ravel', 'np.ravel', (['index2'], {}), '(index2)\n', (4114, 4122), True, 'import numpy as np\n'), ((4169, 4199), 'numpy.concatenate', 'np.concatenate', (['coupling_value'], {}), '(coupling_value)\n', (4183, 4199), True, 'import numpy as np\n'), ((5120, 5126), 'time.time', 'time', ([], {}), '()\n', (5124, 5126), False, 'from time import time\n'), ((4331, 4366), 'numpy.concatenate', 'np.concatenate', (['coupling_type_index'], {}), '(coupling_type_index)\n', (4345, 4366), True, 'import numpy as np\n'), ((4390, 4426), 'numpy.concatenate', 'np.concatenate', (['coupling_batch_index'], {}), '(coupling_batch_index)\n', (4404, 4426), True, 'import numpy as np\n')]
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: pysemeels.tools.generate_hdf5_file
.. moduleauthor:: <NAME> <<EMAIL>>
Generate HDF5 file from Hitachi EELS data.
"""
###############################################################################
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
import os.path
import logging
# Third party modules.
import numpy as np
# Local modules.
# Project modules.
from pysemeels.hitachi.eels_su.elv_text_file import ElvTextParameters
from pysemeels.hitachi.eels_su.elv_file import ElvFile
from pysemeels.tools.hdf5_file_labels import *
# Globals and constants variables.
class GenerateHdf5File(object):
def __init__(self, hdf5_file):
self.hdf5_file = hdf5_file
def add_spectrum(self, file_path, name=None):
if name is None:
basename, _extension = os.path.splitext(os.path.basename(file_path))
name = basename
spectrum_group = self.hdf5_file.create_group(name)
elv_text_file_path, _extension = os.path.splitext(file_path)
elv_text_file_path += '.txt'
with open(elv_text_file_path, 'r', encoding="UTF-16", errors='ignore') as elv_text_file:
elv_text_parameters = ElvTextParameters()
elv_text_parameters.read(elv_text_file)
spectrum_group.attrs[HDF5_MODEL] = elv_text_parameters.model
spectrum_group.attrs[HDF5_SAMPLE_HEIGHT] = elv_text_parameters.sample_height_mm
spectrum_group.attrs[HDF5_FILE_PATH] = elv_text_parameters.file_name
spectrum_group.attrs[HDF5_COMMENT] = elv_text_parameters.comment
spectrum_group.attrs[HDF5_DATE] = elv_text_parameters.date
spectrum_group.attrs[HDF5_TIME] = elv_text_parameters.time
spectrum_group.attrs[HDF5_ACCELERATING_VOLTAGE_V] = elv_text_parameters.accelerating_voltage_V
spectrum_group.attrs[HDF5_ENERGY_WIDTH_eV] = elv_text_parameters.energy_width_eV
spectrum_group.attrs[HDF5_ENERGY_LOSS] = elv_text_parameters.energy_loss_eV
spectrum_group.attrs[HDF5_ACQUISITION_SPEED] = elv_text_parameters.speed_us
with open(file_path, 'r', encoding="ANSI", errors='ignore') as elv_text_file:
elv_file = ElvFile()
elv_file.read(elv_text_file)
self.compare_attribute(spectrum_group, HDF5_DATE, elv_file.date)
self.compare_attribute(spectrum_group, HDF5_TIME, elv_file.time)
self.compare_attribute(spectrum_group, HDF5_COMMENT, elv_file.comment)
self.compare_attribute(spectrum_group, HDF5_ACQUISITION_SPEED, elv_file.dose)
self.compare_attribute(spectrum_group, HDF5_ENERGY_LOSS, elv_file.le)
spectrum_group.attrs[HDF5_RAW] = elv_file.raw
self.compare_attribute(spectrum_group, HDF5_ENERGY_WIDTH_eV, elv_file.energy_width)
spectrum_group.attrs[HDF5_DUAL_DET_POSITION] = elv_file.dual_det_position
spectrum_group.attrs[HDF5_DUAL_DET_POST] = elv_file.dual_det_post
spectrum_group.attrs[HDF5_DUAL_DET_CENTER] = elv_file.dual_det_center
spectrum_group.attrs[HDF5_Q1] = elv_file.q1
spectrum_group.attrs[HDF5_Q1S] = elv_file.q1s
spectrum_group.attrs[HDF5_Q2] = elv_file.q2
spectrum_group.attrs[HDF5_Q2S] = elv_file.q2s
spectrum_group.attrs[HDF5_Q3] = elv_file.q3
spectrum_group.attrs[HDF5_H1] = elv_file.h1
spectrum_group.attrs[HDF5_H1S] = elv_file.h1s
spectrum_group.attrs[HDF5_H2] = elv_file.h2
spectrum_group.attrs[HDF5_H2S] = elv_file.h2s
spectrum_group.attrs[HDF5_H4] = elv_file.h4
spectrum_group.attrs[HDF5_ELV_X] = elv_file.elv_x
spectrum_group.attrs[HDF5_ELV_Y] = elv_file.elv_y
spectrum_group.attrs[HDF5_SPECTRUM_ALIGNMENT_X] = elv_file.spectrum_alignment_x
spectrum_group.attrs[HDF5_SPECTRUM_ALIGNMENT_Y] = elv_file.spectrum_alignment_y
spectrum_group.attrs[HDF5_DET_SPEC_ALIGNMENT_X] = elv_file.det_spec_alignment_x
spectrum_group.attrs[HDF5_DET_SPEC_ALIGNMENT_Y] = elv_file.det_spec_alignment_y
spectrum_group.attrs[HDF5_DET_MAP_ALIGNMENT_X] = elv_file.det_map_alignment_x
spectrum_group.attrs[HDF5_DET_MAP_ALIGNMENT_Y] = elv_file.det_map_alignment_y
spectrum_group.attrs[HDF5_MAGNIFICATION] = elv_file.mag
data = np.zeros((1023, 5))
data[:, 0] = elv_file.energies_eV[:-1]
data[:, 1] = elv_file.counts[:-1]
data[:, 2] = elv_file.raw_counts[:-1]
data[:, 3] = elv_file.gain_corrections[:-1]
data[:, 4] = elv_file.dark_currents[:-1]
spectrum_data_set = spectrum_group.create_dataset(HDF5_SPECTRUM, data=data)
data = np.arange(1, 1023+1)
spectrum_channel_data_set = spectrum_group.create_dataset(HDF5_SPECTRUM_CHANNELS, data=data)
spectrum_data_set.dims.create_scale(spectrum_channel_data_set, HDF5_SPECTRUM_CHANNEL)
spectrum_data_set.dims[0].attach_scale(spectrum_channel_data_set)
data_types = [HDF5_SPECTRUM_ENERGIES_eV, HDF5_SPECTRUM_COUNTS, HDF5_SPECTRUM_RAW_COUNTS,
HDF5_SPECTRUM_GAIN_CORRECTIONS, HDF5_SPECTRUM_DARK_CURRENTS]
max_size = max([len(data_type) for data_type in data_types])
data = np.array(data_types, dtype="S{}".format(max_size+1))
spectrum_types_data_set = spectrum_group.create_dataset(HDF5_SPECTRUM_DATA_TYPES, data=data)
spectrum_data_set.dims.create_scale(spectrum_types_data_set, HDF5_SPECTRUM_DATA_TYPE)
spectrum_data_set.dims[1].attach_scale(spectrum_types_data_set)
def compare_attribute(self, spectrum_group, attribute_name, attribute_value):
if attribute_name in spectrum_group.attrs:
if attribute_value != spectrum_group.attrs[attribute_name]:
logging.error("{} is not the same in .txt and .elv files".format(attribute_name))
else:
spectrum_group.attrs[attribute_name] = attribute_value
|
[
"numpy.zeros",
"pysemeels.hitachi.eels_su.elv_file.ElvFile",
"pysemeels.hitachi.eels_su.elv_text_file.ElvTextParameters",
"numpy.arange"
] |
[((1859, 1878), 'pysemeels.hitachi.eels_su.elv_text_file.ElvTextParameters', 'ElvTextParameters', ([], {}), '()\n', (1876, 1878), False, 'from pysemeels.hitachi.eels_su.elv_text_file import ElvTextParameters\n'), ((2883, 2892), 'pysemeels.hitachi.eels_su.elv_file.ElvFile', 'ElvFile', ([], {}), '()\n', (2890, 2892), False, 'from pysemeels.hitachi.eels_su.elv_file import ElvFile\n'), ((5076, 5095), 'numpy.zeros', 'np.zeros', (['(1023, 5)'], {}), '((1023, 5))\n', (5084, 5095), True, 'import numpy as np\n'), ((5461, 5483), 'numpy.arange', 'np.arange', (['(1)', '(1023 + 1)'], {}), '(1, 1023 + 1)\n', (5470, 5483), True, 'import numpy as np\n')]
|
import sys
import click
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_probability as tfp
import statsmodels.api as sm
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from abc import ABC, abstractmethod
from pathlib import Path
from tensorflow.keras.losses import BinaryCrossentropy
from bore.models import DenseMaximizableSequential
from bore_experiments.datasets import make_classification_dataset
from bore_experiments.plotting.utils import GOLDEN_RATIO, WIDTH, pt_to_in
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
K.set_floatx("float64")
# shortcuts
tfd = tfp.distributions
OUTPUT_DIR = "figures/"
class DensityRatioBase(ABC):
def __call__(self, X, y=None):
return self.ratio(X, y)
@abstractmethod
def logit(self, X, y=None):
pass
def ratio(self, X, y=None):
return tf.exp(self.logit(X, y))
def prob(self, X, y=None):
"""
Probability of sample being from P_{top}(x) vs. P_{bot}(x).
"""
return tf.sigmoid(self.logit(X, y))
class DensityRatioMarginals(DensityRatioBase):
def __init__(self, top, bot):
self.top = top
self.bot = bot
def logit(self, X, y=None):
return self.top.log_prob(X) - self.bot.log_prob(X)
def make_dataset(self, num_samples, rate=0.5, dtype=tf.float64, seed=None):
num_top = int(num_samples * rate)
num_bot = num_samples - num_top
_X_top = self.top.sample(sample_shape=(num_top, 1), seed=seed)
_X_bot = self.bot.sample(sample_shape=(num_bot, 1), seed=seed)
X_top = tf.cast(_X_top, dtype=dtype).numpy()
X_bot = tf.cast(_X_bot, dtype=dtype).numpy()
return X_top, X_bot
class MLPDensityRatioEstimator(DensityRatioBase):
def __init__(self, num_layers=2, num_units=32, activation="tanh",
seed=None, *args, **kwargs):
self.model = DenseMaximizableSequential(input_dim=1, output_dim=1,
num_layers=num_layers,
num_units=num_units,
layer_kws=dict(activation=activation))
def logit(self, X, y=None):
# TODO: time will tell whether squeezing the final axis
# makes things easier.
return K.squeeze(self.model(X), axis=-1)
def compile(self, optimizer, metrics=["accuracy"], *args, **kwargs):
self.model.compile(optimizer=optimizer,
loss=BinaryCrossentropy(from_logits=True),
metrics=metrics, *args, **kwargs)
def fit(self, X_top, X_bot, *args, **kwargs):
X, y = make_classification_dataset(X_top, X_bot)
return self.model.fit(X, y, *args, **kwargs)
def evaluate(self, X_top, X_bot, *args, **kwargs):
X, y = make_classification_dataset(X_top, X_bot)
return self.model.evaluate(X, y, *args, **kwargs)
def gamma_relative_density_ratio(ratio, gamma):
denom = gamma + (1-gamma) / ratio
return 1 / denom
@click.command()
@click.argument("name")
@click.option('--gamma', '-g', type=float, default=1/3)
@click.option("--output-dir", default=OUTPUT_DIR,
type=click.Path(file_okay=False, dir_okay=True),
help="Output directory.")
@click.option('--transparent', is_flag=True)
@click.option('--context', default="paper")
@click.option('--style', default="ticks")
@click.option('--palette', default="deep")
@click.option('--width', '-w', type=float, default=pt_to_in(WIDTH))
@click.option('--height', '-h', type=float)
@click.option('--aspect', '-a', type=float, default=GOLDEN_RATIO)
@click.option('--dpi', type=float)
@click.option('--extension', '-e', multiple=True, default=["png"])
@click.option("--seed", default=8888)
def main(name, gamma, output_dir, transparent, context, style, palette,
width, height, aspect, dpi, extension, seed):
num_features = 1 # dimensionality
num_train = 1000 # nbr training points in synthetic dataset
# x_min, x_max = -6.0, 6.0
x_min, x_max = -5.0, 5.0
num_index_points = 512 # nbr of index points
if height is None:
height = width / aspect
# figsize = size(width, aspect)
figsize = (width, height)
suffix = f"{width*dpi:.0f}x{height*dpi:.0f}"
rc = {
"figure.figsize": figsize,
"font.serif": ["Times New Roman"],
"text.usetex": True,
}
sns.set(context=context, style=style, palette=palette, font="serif", rc=rc)
output_path = Path(output_dir).joinpath(name)
output_path.mkdir(parents=True, exist_ok=True)
random_state = np.random.RandomState(seed)
# /preamble
X_grid = np.linspace(x_min, x_max, num_index_points) \
.reshape(-1, num_features)
p = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[0.3, 0.7]),
components_distribution=tfd.Normal(loc=[2.0, -3.0], scale=[1.0, 0.5]))
q = tfd.Normal(loc=0.0, scale=2.0)
r = DensityRatioMarginals(top=p, bot=q)
X_p, X_q = r.make_dataset(num_train, rate=gamma, seed=seed)
X_train, y_train = make_classification_dataset(X_p, X_q)
kde_lesser = sm.nonparametric.KDEUnivariate(X_p)
kde_lesser.fit(bw="normal_reference")
kde_greater = sm.nonparametric.KDEUnivariate(X_q)
kde_greater.fit(bw="normal_reference")
# Build DataFrame
rows = []
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=r.top.prob(X_grid).numpy().squeeze(axis=-1),
density=r"$\ell(x)$", kind=r"$\textsc{exact}$"))
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=r.bot.prob(X_grid).numpy().squeeze(axis=-1),
density=r"$g(x)$", kind=r"$\textsc{exact}$"))
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=kde_lesser.evaluate(X_grid.ravel()),
density=r"$\ell(x)$", kind=r"$\textsc{kde}$"))
rows.append(dict(x=X_grid.squeeze(axis=-1),
y=kde_greater.evaluate(X_grid.ravel()),
density=r"$g(x)$", kind=r"$\textsc{kde}$"))
frames = map(pd.DataFrame, rows)
data = pd.concat(frames, axis="index", ignore_index=True, sort=True)
fig, ax = plt.subplots()
sns.lineplot(x='x', y='y', hue="density", style="kind", data=data, ax=ax)
ax.set_prop_cycle(None)
ax.set_ylim(-0.025, None)
ax.set_xlim(1.1*X_grid.min(), 1.1*X_grid.max())
sns.rugplot(X_p.squeeze(), height=0.02, alpha=0.2, ax=ax)
sns.rugplot(X_q.squeeze(), height=0.02, alpha=0.2, ax=ax)
ax.set_xlabel(r'$x$')
ax.set_ylabel('density')
plt.tight_layout()
for ext in extension:
fig.savefig(output_path.joinpath(f"densities_{context}_{suffix}.{ext}"),
dpi=dpi, transparent=transparent)
plt.show()
classifiers = dict(
svm=SVC(C=10.0, kernel="rbf", probability=True, tol=1e-9),
rf=RandomForestClassifier(n_estimators=16, max_depth=3, random_state=random_state),
xgb=xgb.XGBClassifier(n_estimators=16, max_depth=3, use_label_encoder=False, random_state=random_state)
# mlp=
)
# base_clf = RandomForestClassifier(random_state=random_state)
# clf = CalibratedClassifierCV(base_estimator=base_clf, method="isotonic") \
# .fit(X_train, y_train)
r_mlp = MLPDensityRatioEstimator(num_layers=3, num_units=32, activation="elu")
r_mlp.compile(optimizer="adam", metrics=["accuracy"])
r_mlp.fit(X_p, X_q, epochs=500, batch_size=64)
# Build DataFrame
# rows = []
# # exact
# # rows.append({'x': X_grid.squeeze(axis=-1),
# # 'y': r.ratio(X_grid).numpy().squeeze(axis=-1),
# # 'kind': r"$\textsc{exact}$", r'$\gamma$': r"$0$"})
# rows.append({'x': X_grid.squeeze(axis=-1),
# 'y': gamma_relative_density_ratio(r.ratio(X_grid), gamma=gamma) \
# .numpy().squeeze(axis=-1),
# 'kind': r"$\textsc{exact}$", r'$\gamma$': r"$\frac{1}{4}$", "exact": True})
# # kde
# # rows.append({'x': X_grid.squeeze(axis=-1),
# # 'y': kde_lesser.evaluate(X_grid.ravel()) / kde_greater.evaluate(X_grid.ravel()),
# # 'kind': r"$\textsc{kde}$", r'$\gamma$': r"$0$"})
# rows.append({'x': X_grid.squeeze(axis=-1),
# 'y': gamma_relative_density_ratio(kde_lesser.evaluate(X_grid.ravel()) / kde_greater.evaluate(X_grid.ravel()), gamma),
# 'kind': r"$\textsc{kde}$", r'$\gamma$': r"$\frac{1}{4}$", "exact": False})
# # cpe
# for clf_name, clf in classifiers.items():
# clf = clf.fit(X_train, y_train)
# rows.append({'x': X_grid.squeeze(axis=-1),
# 'y': clf.predict_proba(X_grid).T[1] / gamma,
# 'kind': rf"$\textsc{{cpe}}$ (\textsc{{{clf_name}}})",
# r'$\gamma$': r"$\frac{1}{3}$", "exact": False})
# data = pd.concat(map(pd.DataFrame, rows), axis="index", ignore_index=True,
# sort=True)
fig, ax = plt.subplots()
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(r.ratio(X_grid), gamma=gamma).numpy().squeeze(axis=-1),
label=r"$\textsc{exact}$")
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(kde_lesser.evaluate(X_grid.ravel()) /
kde_greater.evaluate(X_grid.ravel()),
gamma=gamma),
alpha=0.8, label=r"$\textsc{kde}$")
ax.plot(X_grid.squeeze(axis=-1), r_mlp.prob(X_grid) / gamma,
alpha=0.8, label=r"$\textsc{{cpe}}$ (\textsc{mlp})")
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$r_{\gamma}(x)$")
ax.set_xlim(1.1*X_grid.min(), 1.1*X_grid.max())
ax.legend()
plt.tight_layout()
for ext in extension:
fig.savefig(output_path.joinpath(f"ratios_mlp_{context}_{suffix}.{ext}"),
dpi=dpi, transparent=transparent)
plt.show()
for clf_name, clf in classifiers.items():
clf = clf.fit(X_train, y_train)
fig, ax = plt.subplots()
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(r.ratio(X_grid), gamma=gamma).numpy().squeeze(axis=-1),
label=r"$\textsc{exact}$")
ax.plot(X_grid.squeeze(axis=-1),
gamma_relative_density_ratio(kde_lesser.evaluate(X_grid.ravel()) /
kde_greater.evaluate(X_grid.ravel()),
gamma=gamma),
alpha=0.8, label=r"$\textsc{kde}$")
ax.plot(X_grid.squeeze(axis=-1), clf.predict_proba(X_grid).T[1] / gamma,
alpha=0.8, label=rf"$\textsc{{cpe}}$ (\textsc{{{clf_name}}})")
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$r_{\gamma}(x)$")
ax.set_xlim(1.1*X_grid.min(), 1.1*X_grid.max())
ax.legend()
plt.tight_layout()
for ext in extension:
fig.savefig(output_path.joinpath(f"ratios_{clf_name}_{context}_{suffix}.{ext}"),
dpi=dpi, transparent=transparent)
plt.show()
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
|
[
"tensorflow.cast",
"numpy.random.RandomState",
"seaborn.set",
"pathlib.Path",
"click.option",
"bore_experiments.datasets.make_classification_dataset",
"numpy.linspace",
"click.command",
"click.argument",
"statsmodels.api.nonparametric.KDEUnivariate",
"tensorflow.keras.losses.BinaryCrossentropy",
"bore_experiments.plotting.utils.pt_to_in",
"sklearn.ensemble.RandomForestClassifier",
"tensorflow.keras.backend.set_floatx",
"seaborn.lineplot",
"xgboost.XGBClassifier",
"sklearn.svm.SVC",
"matplotlib.pyplot.show",
"click.Path",
"matplotlib.pyplot.tight_layout",
"pandas.concat",
"matplotlib.pyplot.subplots"
] |
[((717, 740), 'tensorflow.keras.backend.set_floatx', 'K.set_floatx', (['"""float64"""'], {}), "('float64')\n", (729, 740), True, 'import tensorflow.keras.backend as K\n'), ((3229, 3244), 'click.command', 'click.command', ([], {}), '()\n', (3242, 3244), False, 'import click\n'), ((3246, 3268), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (3260, 3268), False, 'import click\n'), ((3270, 3326), 'click.option', 'click.option', (['"""--gamma"""', '"""-g"""'], {'type': 'float', 'default': '(1 / 3)'}), "('--gamma', '-g', type=float, default=1 / 3)\n", (3282, 3326), False, 'import click\n'), ((3479, 3522), 'click.option', 'click.option', (['"""--transparent"""'], {'is_flag': '(True)'}), "('--transparent', is_flag=True)\n", (3491, 3522), False, 'import click\n'), ((3524, 3566), 'click.option', 'click.option', (['"""--context"""'], {'default': '"""paper"""'}), "('--context', default='paper')\n", (3536, 3566), False, 'import click\n'), ((3568, 3608), 'click.option', 'click.option', (['"""--style"""'], {'default': '"""ticks"""'}), "('--style', default='ticks')\n", (3580, 3608), False, 'import click\n'), ((3610, 3651), 'click.option', 'click.option', (['"""--palette"""'], {'default': '"""deep"""'}), "('--palette', default='deep')\n", (3622, 3651), False, 'import click\n'), ((3721, 3763), 'click.option', 'click.option', (['"""--height"""', '"""-h"""'], {'type': 'float'}), "('--height', '-h', type=float)\n", (3733, 3763), False, 'import click\n'), ((3765, 3829), 'click.option', 'click.option', (['"""--aspect"""', '"""-a"""'], {'type': 'float', 'default': 'GOLDEN_RATIO'}), "('--aspect', '-a', type=float, default=GOLDEN_RATIO)\n", (3777, 3829), False, 'import click\n'), ((3831, 3864), 'click.option', 'click.option', (['"""--dpi"""'], {'type': 'float'}), "('--dpi', type=float)\n", (3843, 3864), False, 'import click\n'), ((3866, 3931), 'click.option', 'click.option', (['"""--extension"""', '"""-e"""'], {'multiple': '(True)', 'default': "['png']"}), "('--extension', '-e', multiple=True, default=['png'])\n", (3878, 3931), False, 'import click\n'), ((3933, 3969), 'click.option', 'click.option', (['"""--seed"""'], {'default': '(8888)'}), "('--seed', default=8888)\n", (3945, 3969), False, 'import click\n'), ((4612, 4687), 'seaborn.set', 'sns.set', ([], {'context': 'context', 'style': 'style', 'palette': 'palette', 'font': '"""serif"""', 'rc': 'rc'}), "(context=context, style=style, palette=palette, font='serif', rc=rc)\n", (4619, 4687), True, 'import seaborn as sns\n'), ((4810, 4837), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4831, 4837), True, 'import numpy as np\n'), ((5303, 5340), 'bore_experiments.datasets.make_classification_dataset', 'make_classification_dataset', (['X_p', 'X_q'], {}), '(X_p, X_q)\n', (5330, 5340), False, 'from bore_experiments.datasets import make_classification_dataset\n'), ((5359, 5394), 'statsmodels.api.nonparametric.KDEUnivariate', 'sm.nonparametric.KDEUnivariate', (['X_p'], {}), '(X_p)\n', (5389, 5394), True, 'import statsmodels.api as sm\n'), ((5456, 5491), 'statsmodels.api.nonparametric.KDEUnivariate', 'sm.nonparametric.KDEUnivariate', (['X_q'], {}), '(X_q)\n', (5486, 5491), True, 'import statsmodels.api as sm\n'), ((6340, 6401), 'pandas.concat', 'pd.concat', (['frames'], {'axis': '"""index"""', 'ignore_index': '(True)', 'sort': '(True)'}), "(frames, axis='index', ignore_index=True, sort=True)\n", (6349, 6401), True, 'import pandas as pd\n'), ((6417, 6431), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6429, 6431), True, 'import matplotlib.pyplot as plt\n'), ((6437, 6510), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""x"""', 'y': '"""y"""', 'hue': '"""density"""', 'style': '"""kind"""', 'data': 'data', 'ax': 'ax'}), "(x='x', y='y', hue='density', style='kind', data=data, ax=ax)\n", (6449, 6510), True, 'import seaborn as sns\n'), ((6808, 6826), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6824, 6826), True, 'import matplotlib.pyplot as plt\n'), ((6994, 7004), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7002, 7004), True, 'import matplotlib.pyplot as plt\n'), ((9248, 9262), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9260, 9262), True, 'import matplotlib.pyplot as plt\n'), ((10007, 10025), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10023, 10025), True, 'import matplotlib.pyplot as plt\n'), ((10194, 10204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10202, 10204), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2890), 'bore_experiments.datasets.make_classification_dataset', 'make_classification_dataset', (['X_top', 'X_bot'], {}), '(X_top, X_bot)\n', (2876, 2890), False, 'from bore_experiments.datasets import make_classification_dataset\n'), ((3016, 3057), 'bore_experiments.datasets.make_classification_dataset', 'make_classification_dataset', (['X_top', 'X_bot'], {}), '(X_top, X_bot)\n', (3043, 3057), False, 'from bore_experiments.datasets import make_classification_dataset\n'), ((10312, 10326), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10324, 10326), True, 'import matplotlib.pyplot as plt\n'), ((11153, 11171), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11169, 11171), True, 'import matplotlib.pyplot as plt\n'), ((11363, 11373), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11371, 11373), True, 'import matplotlib.pyplot as plt\n'), ((3394, 3436), 'click.Path', 'click.Path', ([], {'file_okay': '(False)', 'dir_okay': '(True)'}), '(file_okay=False, dir_okay=True)\n', (3404, 3436), False, 'import click\n'), ((3703, 3718), 'bore_experiments.plotting.utils.pt_to_in', 'pt_to_in', (['WIDTH'], {}), '(WIDTH)\n', (3711, 3718), False, 'from bore_experiments.plotting.utils import GOLDEN_RATIO, WIDTH, pt_to_in\n'), ((4707, 4723), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (4711, 4723), False, 'from pathlib import Path\n'), ((4868, 4911), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'num_index_points'], {}), '(x_min, x_max, num_index_points)\n', (4879, 4911), True, 'import numpy as np\n'), ((7042, 7096), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(10.0)', 'kernel': '"""rbf"""', 'probability': '(True)', 'tol': '(1e-09)'}), "(C=10.0, kernel='rbf', probability=True, tol=1e-09)\n", (7045, 7096), False, 'from sklearn.svm import SVC\n'), ((7108, 7187), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(16)', 'max_depth': '(3)', 'random_state': 'random_state'}), '(n_estimators=16, max_depth=3, random_state=random_state)\n', (7130, 7187), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7201, 7304), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'n_estimators': '(16)', 'max_depth': '(3)', 'use_label_encoder': '(False)', 'random_state': 'random_state'}), '(n_estimators=16, max_depth=3, use_label_encoder=False,\n random_state=random_state)\n', (7218, 7304), True, 'import xgboost as xgb\n'), ((1759, 1787), 'tensorflow.cast', 'tf.cast', (['_X_top'], {'dtype': 'dtype'}), '(_X_top, dtype=dtype)\n', (1766, 1787), True, 'import tensorflow as tf\n'), ((1812, 1840), 'tensorflow.cast', 'tf.cast', (['_X_bot'], {'dtype': 'dtype'}), '(_X_bot, dtype=dtype)\n', (1819, 1840), True, 'import tensorflow as tf\n'), ((2683, 2719), 'tensorflow.keras.losses.BinaryCrossentropy', 'BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2701, 2719), False, 'from tensorflow.keras.losses import BinaryCrossentropy\n')]
|
"""
imgLog.py - experimental log for imgFolder
initial: 2019-10-04
"""
import os
import pandas as pd
if ('np' not in dir()): import numpy as np
from imlib.imgfolder import ImgFolder
__author__ = '<NAME> <<EMAIL>>'
__version__ = '1.0.0'
class ImgLog(ImgFolder):
""" imgFolder for channel experiment images """
def __init__(self, dirname, sort=False, debug=False):
""" initialization """
super().__init__(dirname, sort=sort, debug=debug)
self._logfname = dirname + '/log.xlsx'
if not self.loadLog():
self._data['wall1'] = self._image._meta['wall1']
self._data['wall2'] = self._image._meta['wall2']
self._data['isChannel'] = False
self._data['range1'] = self._image._meta['range1']
self._data['range2'] = self._image._meta['range2']
self._data['hasRange'] = False
self._data['fangle'] = 0. # frame angle
self._data['mangle'] = 0. # migration angle
self._data['p'] = 0. # pressure
self._data['u'] = 0. # longitudinal velocity
self._data['mag'] = '40x' # magnification
self._data['filter'] = '' # filter
self._data['exp'] = 0. # exposure
self._data['location'] = '' # location
self._data['D'] = 0. # diffusion constant
self._data['Pe'] = 0. # Peclet number
self._data = self._data.astype(
{'D':'float', 'Pe':'float', 'mangle':'float',
'hasRange':'bool', 'isChannel':'bool',
'exp':'float', 'range1':'int', 'range2':'int',
'wall1':'int', 'wall2':'int'})
def __repr__(self):
""" show print out message """
msg = super().__repr__()
return msg
def __getitem__(self, fileID):
self._image = super().__getitem__(fileID)
p = self._data.at[fileID, 'p']
if isinstance(p, str) and (p.find(',') > -1):
p = float(p.replace(',', '.'))
self._data.at[fileID, 'p'] = p
if isinstance(p, float) or isinstance(p, np.int64):
u = 143.9518*p + 44.0784 # TODO in case of condenser chip
self._data.at[fileID, 'u'] = u
if self._debug: print('... (p, u) = {}, {}'.format(p, u))
self._data.at[fileID, 'exp'] = self._image._meta['exposuretime']
self._image.set_expInfo(magnification=self._data.at[fileID, 'mag'],
velocity=self._data.at[fileID, 'u'], p=p,
fangle=self._data.at[fileID, 'fangle'])
return self._image
# manage log excel sheet
def saveLog(self):
""" save log sheet """
with pd.ExcelWriter(self._logfname) as writer:
if self._debug: print('... save to {}'.format(self._logfname))
self._data.to_excel(writer)
def loadLog(self):
""" load log sheet """
if os.path.isfile(self._logfname):
if self._debug: print('... load from {}'.format(self._logfname))
self._data = pd.read_excel(self._logfname, index_col=0)
return True
else:
return False
# image analysis
def set_log(self, colname, values, ranges=[]):
""" set log values for specific colname """
if len(ranges) == 0:
ranges = range(len(self._data))
for i in ranges:
self._data.at[i, colname] = values
if self._debug: print('{}: [{}] - {}'.format(i, colname, values))
def detect_channel(self, fileID=-1, show=True):
""" find wall information and save in object """
if fileID > -1:
self._image = self.getfile(fileID)
res = self._image.detect_channel(show=show)
if len(res) > 3:
self._data.at[self._curidx, 'range1'] = res[2]
self._data.at[self._curidx, 'range2'] = res[3]
self._data.at[self._curidx, 'hasRange'] = True
if len(res) > 1:
self._data.at[self._curidx, 'wall1'] = res[0]
self._data.at[self._curidx, 'wall2'] = res[1]
self._data.at[self._curidx, 'isChannel'] = True
if len(res) == 1:
self._data.at[self._curidx, 'isChannel'] = False
return res
def analysis_10x(self, fileID, bfileID=-1, wallinfo=[], p=-1, method='gaussian', update=True, padding=0):
""" find angle and diffusion constant in 10x flu. and bright field images """
angle = 0.0
if p > -1:
self._data.at[self._curidx, 'p'] = p
if len(wallinfo) == 4:
self._data.loc[fileID, ['wall1', 'wall2', 'range1', 'range2']] = wallinfo
print('... fileID: [{}] use wallinfo: {}, ranges: {}'.format(fileID, wallinfo[:2], wallinfo[2:]))
else:
if bfileID > -1:
wallinfo = self.detect_channel(fileID=bfileID)
wallinfo[0] = wallinfo[0] + padding
wallinfo[1] = wallinfo[1] - padding
if len(wallinfo) == 3:
self._data.loc[fileID, ['wall1', 'wall2', 'range1']] = wallinfo
elif len(wallinfo) == 4:
self._data.loc[fileID, ['wall1', 'wall2', 'range1', 'range2']] = wallinfo
else:
print('... no wall. Is this [{}] correct image?'.format(bfileID))
return
img = self.__getitem__(bfileID)
angle = img.detect_angles(show=False)
print('... fileID: [{}] use wallinfo: {}, ranges: {}, frame angle: {}'.format(fileID, wallinfo[:2], wallinfo[2:], angle))
# set image information
self._image = self.__getitem__(fileID)
self._image.set_wallinfo(self._data.loc[fileID, ['wall1', 'wall2', 'range1', 'range2']])
self._image.set_expInfo(magnification=self._data.at[fileID, 'mag'],
velocity=self._data.at[fileID, 'u'], p=self._data.at[fileID, 'p'],
fangle=self._data.at[fileID, 'fangle'])
# calculate peak positions
self._image.fitlines_x(method=method, update=update)
self._image.showfit_sigmas()
self._image.showfit_angles()
# save results
self._data.at[fileID, 'mangle'] = self._image._meta['mangle']
self._data.at[fileID, 'D'] = self._image._meta['D']
self._data.at[fileID, 'Pe'] = self._image._meta['Pe']
if angle > 0:
self._data.at[fileID, 'fangle'] = angle
def analysis_all(self, blist, flist, method='gaussian', update=False):
""" analaysis migration angle of files in flist with wall info from
blist """
if isinstance(blist, int):
blist = np.zeros_like(np.array(flist)) + blist
for i in range(len(flist)):
self.analysis_10x(flist[i], bfileID=blist[i], padding=5,
update=update, method=method)
self.saveLog()
def showinfo(self, colname='mag', condition='10x'):
""" show panda data with condition """
return self._data[self._data[colname] == condition]
# vim:foldmethod=indent:foldlevel=0
|
[
"os.path.isfile",
"numpy.array",
"pandas.ExcelWriter",
"pandas.read_excel"
] |
[((2968, 2998), 'os.path.isfile', 'os.path.isfile', (['self._logfname'], {}), '(self._logfname)\n', (2982, 2998), False, 'import os\n'), ((2744, 2774), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['self._logfname'], {}), '(self._logfname)\n', (2758, 2774), True, 'import pandas as pd\n'), ((3102, 3144), 'pandas.read_excel', 'pd.read_excel', (['self._logfname'], {'index_col': '(0)'}), '(self._logfname, index_col=0)\n', (3115, 3144), True, 'import pandas as pd\n'), ((6749, 6764), 'numpy.array', 'np.array', (['flist'], {}), '(flist)\n', (6757, 6764), True, 'import numpy as np\n')]
|
from mcc_libusb import *
import datetime
import time
import numpy as np
mcc = USB1208FS()
mcc.usbOpen()
#mcc.usbDConfigPort(DIO_PORTA, DIO_DIR_OUT)
#mcc.usbDConfigPort(DIO_PORTB, DIO_DIR_IN)
#mcc.usbDOut(DIO_PORTA, 0)
#num = mcc.usbAIn(1, BP_1_00V)
#print(str(mcc.volts_FS(BP_1_00V, num)))
#channel = np.array([1, 2, 3, 7])
#gain = np.array([SE_10_00V, BP_10_00V, BP_20_00V, BP_1_25V])
#mcc.usbALoadQueue(4, channel, gain)
#mcc.usbReset()
#mcc.usbAIn_Stop()
options = AIN_EXECUTION | AIN_GAIN_QUEUE
sdata = mcc.usbAIn_Scan_SE(0, 0, 50, 1000, options)
print(sdata)
print(mcc.volts_SE(np.average(sdata)))
#mcc.usbALoadQueue(1, np.array([1]), np.array([BP_10_00V]))
#sdata1 = mcc.usbAIn_Scan(1,1,50,1000, AIN_EXECUTION)
#print(sdata1)
#print(mcc.volts_FS(BP_10_00V, np.average(sdata1)))
mcc.usbClose()
'''
while 1:
print("\nUSB 1208FS Testing")
print("----------------")
print("Hit 'b' to blink LED")
print("Hit 'c' to test counter")
print("Hit 'e' to exit")
print("Hit 'd' to test digital I/O");
print("Hit 'g' to test analog input scan (differential).")
print("Hit 'j' to test analog input scan (single ended).")
print("Hit 'i' to test analog input (differential mode)")
print("Hit 'h' to test analog input (single ended)")
print("Hit 'o' to test analog output")
print("Hit 'O' to test analog output scan")
print("Hit 'r' to reset")
print("Hit 'S' to get status")
print("Hit 's' to get serial number")
i = input(">> ")
if i == 'b': #test to see if led blinks
mcc.usbBlink()
elif i == 'e':
mcc.close()
exit(1)
elif i == 'd':
print("\nTesting Digital I/O....")
print("connect pins 21 through 28 <=> 32 through 39")
temp = int(input("Enter a byte number [0-0xff]: "))
mcc.usbDOut(DIO_PORTA, temp)
din = mcc.usbDIn(DIO_PORTB)
print("The number you entered = " + hex(din & 0xff))
elif i == 'i':
print("Testing the analog input differential...")
gain = int(input("Enter gain: "))
channel = int(input("Enter channel [0-7]: "))
value = mcc.usbAIn(channel, gain)
print("Channel: " + str(channel) + ": value = " + str(value))
elif i == 'h':
print("Testing the analog input single ended...")
#channel = input("Entner channel [0-7]: ")
for i in range(0, 100):
start = datetime.datetime.now()
for j in range(0,8):
value = mcc.usbAIn(j, SE_10_00V)
print("Channel: %d: Value = 0x%04X, %.2fV" % (j%8 ,value, mcc.volts_SE(value)))
delta = datetime.datetime.now() - start;
print("%d" % (delta.microseconds))
time.sleep(0.1)
elif i == 'o': #test the analog output
print("Testing the analog output...")
channel = int(input("Enter channel [0-1] => (pin 13-14): "))
value = int(input("Enter a value: "))
mcc.usbAOut(channel, value)
else:
continue
'''
|
[
"numpy.average"
] |
[((585, 602), 'numpy.average', 'np.average', (['sdata'], {}), '(sdata)\n', (595, 602), True, 'import numpy as np\n')]
|
import sys
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.backends import cudnn
from utils.utils import cast
from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_
from .dpcnn import dpcnn
from .prep_text import TextData_Uni, TextData_Lab, TextDataBatches, gen_uni_name, gen_lab_name
from .prep_text_n import TextData_N, gen_n_name
from .prep_text import main as prep_text_main
from .text_utils import get_dlist, load_x_emb, match_vocab
from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index
cudnn.benchmark = True
#--------------------------------------------------------------------
# For a dataset "dataname", the following input files are required.
# dataname-train.tok.txt, dataname-train.cat
# dataname-test.tok.txt, dataname-test.cat
# dataname.catdic
#
# *.tok.txt: tokens delimited by white space. one document per line.
# *.cat: class labels.
# *.catdic: class names used in *.cat. one name per line.
#--------------------------------------------------------------------
#--------------------------------------------------------------------
def prep_data(opt, types):
missing = ''
for type in types:
ds_path = gen_uni_name(opt.dataroot, opt.dataset, type)
ls_path = gen_lab_name(opt.dataroot, opt.dataset, type)
if not os.path.exists(ds_path):
missing += ' %s' % ds_path
if not os.path.exists(ls_path):
missing += ' %s' % ls_path
if len(missing) > 0:
timeLog('----------------------------------------------------------------------')
if opt.dont_write_to_dataroot:
raise Exception("The following files are missing: %s\nTo generate them, turn off 'dont_write_to_dataroot'." % missing)
timeLog('Calling prep_text_main for creating the following files: %s' % missing)
prep_text_main('prep', ['--dataset', opt.dataset, '--dataroot', opt.dataroot ])
timeLog('Done with prep text main ------------------------------')
else:
timeLog('Using existing data files ... ')
#----------------------------------------------------------
def check_opt_(opt):
#--- required attributes
names = [ 'dataroot','dataset','num_dev','x_emb','seed','batch_unit','batch_size','depth','width','dropout','top_dropout','ker_size']
raise_if_absent(opt, names, who='dpcnn_train')
#--- optional attributes
add_if_absent_(opt, ['dont_write_to_dataroot'], False)
add_if_absent_(opt, ['num_train','req_max_len'], -1)
add_if_absent_(opt, ['train_dlist_path','dev_dlist_path'], None)
add_if_absent_(opt, ['csv_fn'], '')
#********************************************************************
def main(opt):
timeLog("dpcnn_train(opt) begins ...")
check_opt_(opt)
logging('Using %s ... ' % ('GPU(s)' if torch.cuda.is_available() else 'CPU'))
reset_logging(opt.csv_fn)
torch.manual_seed(opt.seed)
np.random.seed(opt.seed)
#--- load external embeddings
x_embed,x_n_maxes = load_x_emb(opt.x_emb)
#--- prepare data
prep_data(opt, ['train', 'test'])
rs = np.random.get_state()
def prep_uni(type):
return TextData_Uni(pathname=gen_uni_name(opt.dataroot, opt.dataset, type))
def prep_lab(type):
return TextData_Lab(pathname=gen_lab_name(opt.dataroot, opt.dataset, type))
def prep_x_dss(type): # 'x' for extra
if x_n_maxes is None:
return None
return [ TextData_N(gen_n_name(opt.dataroot, opt.dataset, n_max, type)) for n_max in x_n_maxes ]
trn_dlist,dev_dlist = get_dlist(opt.seed, opt.num_train, opt.num_dev, opt.train_dlist_path, opt.dev_dlist_path,
len(prep_uni('train')))
def read_ds_ls_x(dlist): # read data and labels
type='train'
ds = prep_uni(type); ds.shuffle(dlist)
ls = prep_lab(type); ls.shuffle(dlist)
x_dss = prep_x_dss(type)
if x_dss is not None:
for xds in x_dss:
xds.shuffle(dlist)
return ds, ls, x_dss
td_ds,td_ls,x_td = read_ds_ls_x(trn_dlist) # training data
dv_ds,dv_ls,x_dv = read_ds_ls_x(dev_dlist) # validation data
match_vocab(x_embed, td_ds, x_td)
type = 'test'
ts_ds = prep_uni(type); ts_ls = prep_lab(type); x_ts = prep_x_dss(type) # test data
bch_param = {'req_max_len':opt.req_max_len, 'batch_unit':opt.batch_unit, 'batch_size':opt.batch_size}
trn_data = TextDataBatches(td_ds, td_ls, **bch_param, do_shuffle=True, x_dss=x_td)
dev_data = TextDataBatches(dv_ds, dv_ls, **bch_param, do_shuffle=False, x_dss=x_dv)
tst_data = TextDataBatches(ts_ds, ts_ls, **bch_param, do_shuffle=False, x_dss=x_ts)
np.random.set_state(rs)
test_dss = [ {'name':'dev', 'data':dev_data}, {'name':'test', 'data':tst_data} ]
num_classes = td_ls.num_class()
logging('#classes=%d' % num_classes)
if num_classes != dv_ls.num_class() or num_classes != ts_ls.num_class():
raise Exception('Conflict in # of classes: ' +str(num_classes)+','+str(dv_ls.num_class())+','+str(ts_ls.num_class()))
vocab_size = td_ds.vocab_size()
logging('#vocab=%d' % vocab_size)
if vocab_size != dv_ds.vocab_size() or vocab_size != ts_ds.vocab_size():
raise Exception('Conflict in vocabulary sizes: '+str(vocab_size)+','+str(dv_ds.vocab_size())+','+str(ts_ds.vocab_size()))
#--- prepare a model
def initialize_model():
return dpcnn(opt.depth, opt.width, num_classes, vocab_size,
top_dropout=opt.top_dropout, dropout=opt.dropout,
ker_size=opt.ker_size,
x_embed=x_embed) # external embedding
func, params = initialize_model()
#--- training ...
loss_function = F.cross_entropy
def net(sample, is_train=False):
if sample is None:
return loss_function
inputs = cast(sample[0], 'long')
x_inputs = [ cast(data, 'long') for data in sample[2] ] if len(sample) >= 3 else None
output = func(inputs, params, is_train, extra_input=x_inputs)
targets = cast(sample[Target_index], 'long')
return loss_function(output, targets), output
if not is_gulf(opt):
train_base_model(opt, net, params, trn_data, test_dss)
else:
i_func, i_params = initialize_model()
copy_params(src=params, dst=i_params)
def i_net(sample):
is_train = False
inputs = cast(sample[0], 'long')
x_inputs = [ cast(data, 'long') for data in sample[2] ] if len(sample) >= 3 else None
return i_func(inputs, i_params, is_train, extra_input=x_inputs)
train_gulf_model(opt, i_net, i_params, net, params, trn_data, test_dss)
timeLog("dpcnn_train(opt) ends ...")
|
[
"utils.utils0.raise_if_absent",
"torch.manual_seed",
"numpy.random.get_state",
"utils.utils0.add_if_absent_",
"numpy.random.set_state",
"utils.utils.cast",
"os.path.exists",
"utils.utils0.timeLog",
"gulf.train_base_model",
"gulf.train_gulf_model",
"gulf.copy_params",
"torch.cuda.is_available",
"utils.utils0.reset_logging",
"numpy.random.seed",
"gulf.is_gulf",
"utils.utils0.logging"
] |
[((2349, 2395), 'utils.utils0.raise_if_absent', 'raise_if_absent', (['opt', 'names'], {'who': '"""dpcnn_train"""'}), "(opt, names, who='dpcnn_train')\n", (2364, 2395), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2429, 2483), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['dont_write_to_dataroot']", '(False)'], {}), "(opt, ['dont_write_to_dataroot'], False)\n", (2443, 2483), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2487, 2540), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['num_train', 'req_max_len']", '(-1)'], {}), "(opt, ['num_train', 'req_max_len'], -1)\n", (2501, 2540), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2543, 2608), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['train_dlist_path', 'dev_dlist_path']", 'None'], {}), "(opt, ['train_dlist_path', 'dev_dlist_path'], None)\n", (2557, 2608), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2611, 2646), 'utils.utils0.add_if_absent_', 'add_if_absent_', (['opt', "['csv_fn']", '""""""'], {}), "(opt, ['csv_fn'], '')\n", (2625, 2646), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2736, 2774), 'utils.utils0.timeLog', 'timeLog', (['"""dpcnn_train(opt) begins ..."""'], {}), "('dpcnn_train(opt) begins ...')\n", (2743, 2774), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2879, 2904), 'utils.utils0.reset_logging', 'reset_logging', (['opt.csv_fn'], {}), '(opt.csv_fn)\n', (2892, 2904), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2909, 2936), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (2926, 2936), False, 'import torch\n'), ((2940, 2964), 'numpy.random.seed', 'np.random.seed', (['opt.seed'], {}), '(opt.seed)\n', (2954, 2964), True, 'import numpy as np\n'), ((3113, 3134), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (3132, 3134), True, 'import numpy as np\n'), ((4659, 4682), 'numpy.random.set_state', 'np.random.set_state', (['rs'], {}), '(rs)\n', (4678, 4682), True, 'import numpy as np\n'), ((4807, 4843), 'utils.utils0.logging', 'logging', (["('#classes=%d' % num_classes)"], {}), "('#classes=%d' % num_classes)\n", (4814, 4843), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((5082, 5115), 'utils.utils0.logging', 'logging', (["('#vocab=%d' % vocab_size)"], {}), "('#vocab=%d' % vocab_size)\n", (5089, 5115), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((6624, 6660), 'utils.utils0.timeLog', 'timeLog', (['"""dpcnn_train(opt) ends ..."""'], {}), "('dpcnn_train(opt) ends ...')\n", (6631, 6660), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((1547, 1633), 'utils.utils0.timeLog', 'timeLog', (['"""----------------------------------------------------------------------"""'], {}), "(\n '----------------------------------------------------------------------')\n", (1554, 1633), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((1800, 1885), 'utils.utils0.timeLog', 'timeLog', (["('Calling prep_text_main for creating the following files: %s' % missing)"], {}), "('Calling prep_text_main for creating the following files: %s' % missing\n )\n", (1807, 1885), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((1973, 2039), 'utils.utils0.timeLog', 'timeLog', (['"""Done with prep text main ------------------------------"""'], {}), "('Done with prep text main ------------------------------')\n", (1980, 2039), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((2055, 2096), 'utils.utils0.timeLog', 'timeLog', (['"""Using existing data files ... """'], {}), "('Using existing data files ... ')\n", (2062, 2096), False, 'from utils.utils0 import logging, reset_logging, timeLog, raise_if_absent, add_if_absent_\n'), ((5809, 5832), 'utils.utils.cast', 'cast', (['sample[0]', '"""long"""'], {}), "(sample[0], 'long')\n", (5813, 5832), False, 'from utils.utils import cast\n'), ((6009, 6043), 'utils.utils.cast', 'cast', (['sample[Target_index]', '"""long"""'], {}), "(sample[Target_index], 'long')\n", (6013, 6043), False, 'from utils.utils import cast\n'), ((6107, 6119), 'gulf.is_gulf', 'is_gulf', (['opt'], {}), '(opt)\n', (6114, 6119), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((6127, 6181), 'gulf.train_base_model', 'train_base_model', (['opt', 'net', 'params', 'trn_data', 'test_dss'], {}), '(opt, net, params, trn_data, test_dss)\n', (6143, 6181), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((6241, 6278), 'gulf.copy_params', 'copy_params', ([], {'src': 'params', 'dst': 'i_params'}), '(src=params, dst=i_params)\n', (6252, 6278), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((6548, 6619), 'gulf.train_gulf_model', 'train_gulf_model', (['opt', 'i_net', 'i_params', 'net', 'params', 'trn_data', 'test_dss'], {}), '(opt, i_net, i_params, net, params, trn_data, test_dss)\n', (6564, 6619), False, 'from gulf import is_gulf, train_base_model, train_gulf_model, copy_params, Target_index\n'), ((1382, 1405), 'os.path.exists', 'os.path.exists', (['ds_path'], {}), '(ds_path)\n', (1396, 1405), False, 'import os\n'), ((1456, 1479), 'os.path.exists', 'os.path.exists', (['ls_path'], {}), '(ls_path)\n', (1470, 1479), False, 'import os\n'), ((6349, 6372), 'utils.utils.cast', 'cast', (['sample[0]', '"""long"""'], {}), "(sample[0], 'long')\n", (6353, 6372), False, 'from utils.utils import cast\n'), ((2836, 2861), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2859, 2861), False, 'import torch\n'), ((5852, 5870), 'utils.utils.cast', 'cast', (['data', '"""long"""'], {}), "(data, 'long')\n", (5856, 5870), False, 'from utils.utils import cast\n'), ((6395, 6413), 'utils.utils.cast', 'cast', (['data', '"""long"""'], {}), "(data, 'long')\n", (6399, 6413), False, 'from utils.utils import cast\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import random
import psutil
import logging
import pandas as pd
import numpy as np
from io import open
from collections import Counter
from multiprocessing import cpu_count
from concurrent.futures import ProcessPoolExecutor
from scipy.sparse import csr_matrix, save_npz, load_npz, lil_matrix
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
from six import text_type as unicode
from six import iteritems
from six.moves import range
from fastGraph.graph import Graph
from fastGraph import ngram
import pdb
p = psutil.Process(os.getpid())
try:
p.cpu_affinity(list(range(cpu_count())))
except AttributeError:
pass
LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s Line %(lineno)s: %(message)s"
logging.basicConfig(format=LOGFORMAT)
logger = logging.getLogger("fastGraph")
logger.setLevel(logging.INFO)
DTYPE = np.float64
def debug(type_, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(type_, value, tb)
else:
import traceback
import pdb
traceback.print_exception(type_, value, tb)
print(u"\n")
pdb.pm()
def load_matrix(args):
logger.info("Reading from "+str(args.input))
if "wiki-Vote.csv" in args.input:
df = pd.read_csv(args.input, sep=',', comment='#')
max_node = max(max(df['FromNodeId'].unique()), max(df['ToNodeId'].unique()))
total_len = max_node + 1
matrix = lil_matrix(np.zeros((total_len, total_len), dtype=DTYPE))
for row in df.itertuples():
matrix[row.FromNodeId, row.ToNodeId] = matrix[row.FromNodeId, row.ToNodeId] + 1 # Each edge is binary
return csr_matrix(matrix)
elif "weighted_directed.csv" in args.input:
df = pd.read_csv(args.input, sep=',', comment='#')
max_node = max(max(df['SOURCE'].unique()), max(df['TARGET'].unique()))
total_len = max_node + 1
matrix = lil_matrix(np.zeros((total_len, total_len), dtype=DTYPE))
for row in df.itertuples():
matrix[row.SOURCE, row.TARGET] = matrix[row.SOURCE, row.TARGET] + row.RATING # Each edge has different weights
return csr_matrix(matrix)
elif ".npz" in args.input or ".npy" in args.input:
logger.info("Load matrix directly")
matrix = np.load(args.input)
return csr_matrix(matrix)
else:
# Implement parsing here to transform into matrix form.
raise NotImplementedError("Implement customized parsing here.")
def fastGraph_flow(args):
# Read and process different input
matrix = load_matrix(args)
logger.info("Matrix loaded.")
graph = Graph()
graph.build_graph_from_matrix(matrix, is_directed=True, remove_self_loops=False,
normalized_edge=True, outward_prob_check=True)
# Generate walks, select which walk to use by de-comment
if args.walk_type == "likely":
# walks = graph.build_likely_walk_corpus_multiprocess(args.number_paths, args.path_length,
# rand=random.Random(0), shuffle=True, deduplicate=False)
walks = graph.build_likely_walk_corpus(args.number_paths, args.path_length, rand=random.Random(0),
shuffle=True, deduplicate=False)
elif args.walk_type == "node2vec":
graph.preprocess_node2vec_walk(args.p, args.q)
walks = graph.build_node2vec_walk_corpus(args.number_paths, args.path_length, rand=random.Random(0),
shuffle=True, deduplicate=False)
elif args.walk_type == "deep":
walks = graph.build_deepwalk_corpus(args.number_paths, args.path_length, rand=random.Random(0),
shuffle=True, deduplicate=False)
else:
raise ValueError("--walk-type must be either 'likely', 'node2vec' or 'deep'.")
# Save walks to storage, enabling gensim's iterator ability.
walks_file = ''.join(str(args.input).split('.')[:-1])+'.walks'
with open(walks_file, 'w') as fout:
for walk in walks:
fout.write(' '.join(walk)+'\n')
logger.info("Walks saved to "+walks_file)
walks = LineSentence(args.input)
# Phrases
if args.ngram > 1:
logger.info("Building n-gram with n="+str(args.ngram)+"...")
walks, ngram_phrasers = ngram.build_ngram(walks, args.ngram)
# Word2Vec
logger.info("Training ...")
w2v = Word2Vec(walks, size=args.embed_size, window=args.window_size, min_count=0,
sg=1, hs=0, negative=10, workers=args.workers)
# Save model
w2v.save(args.output)
def main():
parser = ArgumentParser("fastGraph", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("-l", "--log", dest="log", default="INFO",
help="log verbosity level")
parser.add_argument('--input', nargs='?', required=True,
help='Input matrix')
parser.add_argument('--max-memory-data-size', default=1000000000, type=int,
help='Size to start dumping walks to disk, instead of keeping them in memory.')
parser.add_argument('--number-paths', default=5, type=int,
help='Number of random walks to start at each node')
parser.add_argument('--output', required=True,
help='Output representation file')
parser.add_argument('--embed-size', default=64, type=int,
help='Dimension of the latent vector as embedding.')
parser.add_argument('--seed', default=0, type=int,
help='Seed for random walk generator.')
parser.add_argument('--directed', default=True, type=bool,
help='Treat the graph as directed.')
parser.add_argument('--path-length', default=40, type=int,
help='Length of the random walk started at each node')
parser.add_argument('--window-size', default=5, type=int,
help='Window size of skipgram model.')
parser.add_argument('--walk-type', default="likely", type=str,
help='Which walk method to use: likely, random, node2vec.')
parser.add_argument('--p', default=5, type=int,
help="p value, refer to original paper: https://cs.stanford.edu/~jure/pubs/node2vec-kdd16.pdf ")
parser.add_argument('--q', default=3, type=int,
help="q value, refer to original paper: https://cs.stanford.edu/~jure/pubs/node2vec-kdd16.pdf ")
parser.add_argument('--workers', default=cpu_count(), type=int,
help='Number of parallel processes.')
parser.add_argument('--ngram', default=1, type=int,
help='N of n-grams, e.g.: set 2 for bigrams, 3 for trigrams, etc.')
args = parser.parse_args()
numeric_level = getattr(logging, args.log.upper(), None)
logging.basicConfig(format=LOGFORMAT)
logger.setLevel(numeric_level)
fastGraph_flow(args)
if __name__ == "__main":
sys.exit(main())
|
[
"logging.getLogger",
"pdb.pm",
"pandas.read_csv",
"io.open",
"sys.stderr.isatty",
"multiprocessing.cpu_count",
"sys.__excepthook__",
"argparse.ArgumentParser",
"fastGraph.ngram.build_ngram",
"random.Random",
"fastGraph.graph.Graph",
"traceback.print_exception",
"gensim.models.Word2Vec",
"gensim.models.word2vec.LineSentence",
"os.getpid",
"scipy.sparse.csr_matrix",
"logging.basicConfig",
"numpy.zeros",
"numpy.load"
] |
[((882, 919), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'LOGFORMAT'}), '(format=LOGFORMAT)\n', (901, 919), False, 'import logging\n'), ((930, 960), 'logging.getLogger', 'logging.getLogger', (['"""fastGraph"""'], {}), "('fastGraph')\n", (947, 960), False, 'import logging\n'), ((706, 717), 'os.getpid', 'os.getpid', ([], {}), '()\n', (715, 717), False, 'import os\n'), ((2590, 2597), 'fastGraph.graph.Graph', 'Graph', ([], {}), '()\n', (2595, 2597), False, 'from fastGraph.graph import Graph\n'), ((3895, 3919), 'gensim.models.word2vec.LineSentence', 'LineSentence', (['args.input'], {}), '(args.input)\n', (3907, 3919), False, 'from gensim.models.word2vec import LineSentence\n'), ((4127, 4253), 'gensim.models.Word2Vec', 'Word2Vec', (['walks'], {'size': 'args.embed_size', 'window': 'args.window_size', 'min_count': '(0)', 'sg': '(1)', 'hs': '(0)', 'negative': '(10)', 'workers': 'args.workers'}), '(walks, size=args.embed_size, window=args.window_size, min_count=0,\n sg=1, hs=0, negative=10, workers=args.workers)\n', (4135, 4253), False, 'from gensim.models import Word2Vec\n'), ((4318, 4424), 'argparse.ArgumentParser', 'ArgumentParser', (['"""fastGraph"""'], {'formatter_class': 'ArgumentDefaultsHelpFormatter', 'conflict_handler': '"""resolve"""'}), "('fastGraph', formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n", (4332, 4424), False, 'from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter\n'), ((6293, 6330), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'LOGFORMAT'}), '(format=LOGFORMAT)\n', (6312, 6330), False, 'import logging\n'), ((1095, 1131), 'sys.__excepthook__', 'sys.__excepthook__', (['type_', 'value', 'tb'], {}), '(type_, value, tb)\n', (1113, 1131), False, 'import sys\n'), ((1173, 1216), 'traceback.print_exception', 'traceback.print_exception', (['type_', 'value', 'tb'], {}), '(type_, value, tb)\n', (1198, 1216), False, 'import traceback\n'), ((1234, 1242), 'pdb.pm', 'pdb.pm', ([], {}), '()\n', (1240, 1242), False, 'import pdb\n'), ((1355, 1400), 'pandas.read_csv', 'pd.read_csv', (['args.input'], {'sep': '""","""', 'comment': '"""#"""'}), "(args.input, sep=',', comment='#')\n", (1366, 1400), True, 'import pandas as pd\n'), ((1720, 1738), 'scipy.sparse.csr_matrix', 'csr_matrix', (['matrix'], {}), '(matrix)\n', (1730, 1738), False, 'from scipy.sparse import csr_matrix, save_npz, load_npz, lil_matrix\n'), ((3756, 3777), 'io.open', 'open', (['walks_file', '"""w"""'], {}), "(walks_file, 'w')\n", (3760, 3777), False, 'from io import open\n'), ((4041, 4077), 'fastGraph.ngram.build_ngram', 'ngram.build_ngram', (['walks', 'args.ngram'], {}), '(walks, args.ngram)\n', (4058, 4077), False, 'from fastGraph import ngram\n'), ((1072, 1091), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (1089, 1091), False, 'import sys\n'), ((1529, 1574), 'numpy.zeros', 'np.zeros', (['(total_len, total_len)'], {'dtype': 'DTYPE'}), '((total_len, total_len), dtype=DTYPE)\n', (1537, 1574), True, 'import numpy as np\n'), ((1791, 1836), 'pandas.read_csv', 'pd.read_csv', (['args.input'], {'sep': '""","""', 'comment': '"""#"""'}), "(args.input, sep=',', comment='#')\n", (1802, 1836), True, 'import pandas as pd\n'), ((2159, 2177), 'scipy.sparse.csr_matrix', 'csr_matrix', (['matrix'], {}), '(matrix)\n', (2169, 2177), False, 'from scipy.sparse import csr_matrix, save_npz, load_npz, lil_matrix\n'), ((6009, 6020), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (6018, 6020), False, 'from multiprocessing import cpu_count\n'), ((751, 762), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (760, 762), False, 'from multiprocessing import cpu_count\n'), ((1959, 2004), 'numpy.zeros', 'np.zeros', (['(total_len, total_len)'], {'dtype': 'DTYPE'}), '((total_len, total_len), dtype=DTYPE)\n', (1967, 2004), True, 'import numpy as np\n'), ((2279, 2298), 'numpy.load', 'np.load', (['args.input'], {}), '(args.input)\n', (2286, 2298), True, 'import numpy as np\n'), ((2308, 2326), 'scipy.sparse.csr_matrix', 'csr_matrix', (['matrix'], {}), '(matrix)\n', (2318, 2326), False, 'from scipy.sparse import csr_matrix, save_npz, load_npz, lil_matrix\n'), ((3064, 3080), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (3077, 3080), False, 'import random\n'), ((3297, 3313), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (3310, 3313), False, 'import random\n'), ((3473, 3489), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (3486, 3489), False, 'import random\n')]
|
# coding=utf-8
import sys
import os
import csv
from datetime import datetime, timedelta
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import drange
from matplotlib.patches import Rectangle
import scenario_factory
# http://www.javascripter.net/faq/hextorgb.htm
PRIMA = (148/256, 164/256, 182/256)
PRIMB = (101/256, 129/256, 164/256)
PRIM = ( 31/256, 74/256, 125/256)
PRIMC = ( 41/256, 65/256, 94/256)
PRIMD = ( 10/256, 42/256, 81/256)
EC = (1, 1, 1, 0)
GRAY = (0.5, 0.5, 0.5)
WHITE = (1, 1, 1)
def load(f):
with np.load(f) as npz:
data = np.array([npz[k] for k in sorted(npz.keys())])
return data
def plot_aggregated(sc, bd, unctrl, ctrl, ctrl_sched, res=1):
t_day_start = sc.t_block_start - timedelta(hours=sc.t_block_start.hour,
minutes=sc.t_block_start.minute)
t = drange(t_day_start, sc.t_end, timedelta(minutes=res))
skip = (t_day_start - sc.t_start).total_seconds() / 60 / res
i_block_start = (sc.t_block_start - t_day_start).total_seconds() / 60 / res
i_block_end = (sc.t_block_end - t_day_start).total_seconds() / 60 / res
P_el_unctrl = unctrl[:,0,skip:].sum(0)
P_el_ctrl = ctrl[:,0,skip:].sum(0)
P_el_sched = ctrl_sched[:,skip:].sum(0)
P_el_target = np.ma.array(P_el_sched)
block = np.array(sc.block)
if block.shape == (1,):
block = block.repeat(P_el_target[~P_el_target.mask].shape[0])
elif block.shape[0] == P_el_target[~P_el_target.mask].shape[0] / 15:
block = block.repeat(15)
P_el_target[~P_el_target.mask] = block
T_storage_ctrl = ctrl[:,2,skip:]
ft = np.array([t[0]] + list(np.repeat(t[1:-1], 2)) + [t[-1]])
P_el_ctrl_fill = np.repeat(P_el_ctrl[:-1], 2)
fig, ax = plt.subplots(2, sharex=True)
fig.subplots_adjust(left=0.105, right=0.998, hspace=0.3, top=0.975, bottom=0.2)
for a in ax:
plt.setp(list(a.spines.values()), color='k')
plt.setp([a.get_xticklines(), a.get_yticklines()], color='k')
ax[0].set_ylabel('P$_{\mathrm{el}}$ [kW]')
ymax = max(P_el_unctrl.max(), P_el_ctrl_fill.max(), P_el_sched.max(), 0) / 1000.0
ymin = min(P_el_unctrl.min(), P_el_ctrl_fill.min(), P_el_sched.min(), 0) / 1000.0
ax[0].set_ylim(ymin - abs(ymin * 0.1), ymax + abs(ymax * 0.1))
xspace = (t[-1] - t[-2])
# ax[0].set_xlim(t[0], t[-1] + xspace)
ax[0].set_xlim(t[0], t[len(t)/2])
# ax[0].axvline(t[i_block_start], ls='--', color='0.5')
# ax[0].axvline(t[i_block_end], ls='--', color='0.5')
ax[0].axvspan(t[i_block_start], t[i_block_end], fc=GRAY+(0.2,), ec=EC)
# ax[0].axvline(t[0], ls='-', color=GRAY, lw=0.5)
# ax[0].axvline(t[len(t)/2], ls='-', color=GRAY, lw=0.5)
l_unctrl, = ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt=':', color='k', drawstyle='steps-post', lw=0.75)
l_unctrl.set_dashes([1.0, 1.0])
# add lw=0.0 due to bug in mpl (will show as hairline in pdf though...)
l_ctrl = ax[0].fill_between(ft, P_el_ctrl_fill / 1000.0, facecolors=GRAY+(0.75,), edgecolors=EC, lw=0.0)
# Create proxy artist as l_ctrl legend handle
l_ctrl_proxy = Rectangle((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.5)
# l_sched, = ax[0].plot_date(t, P_el_sched / 1000.0, fmt='-', color=GRAY, drawstyle='steps-post', lw=0.75)
l_target, = ax[0].plot_date(t, P_el_target / 1000.0, fmt='-', color='k', drawstyle='steps-post', lw=0.75)
# colors = [
# '#348ABD', # blue
# '#7A68A6', # purple
# '#A60628', # red
# '#467821', # green
# '#CF4457', # pink
# '#188487', # turqoise
# '#E24A33', # orange
# '#1F4A7D', # primary
# '#BF9D23', # secondary
# '#BF5B23', # complementary
# '#94A4B6', # primaryA
# '#6581A4', # primaryB
# '#29415E', # primaryC
# '#0A2A51', # primaryD
# ][:len(unctrl)]
# for (c, P_el_unctrl, P_el_ctrl, P_el_sched) in zip(colors, unctrl[:,0,:], ctrl[:,0,:], ctrl_sched):
# ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt='-', color=c, lw=1, label='unctrl')
# ax[0].plot_date(t, P_el_ctrl / 1000.0, fmt=':', color=c, lw=1, label='ctrl')
# ax[0].plot_date(t, P_el_sched / 1000.0, fmt='--x', color=c, lw=1, label='sched')
ymax = T_storage_ctrl.max() - 273
ymin = T_storage_ctrl.min() - 273
ax[1].set_ylim(ymin - abs(ymin * 0.01), ymax + abs(ymax * 0.01))
ax[1].set_ylabel('T$_{\mathrm{storage}}\;[^{\circ}\mathrm{C}]$', labelpad=9)
ax[1].axvspan(t[i_block_start], t[i_block_end], fc=GRAY+(0.1,), ec=EC)
# ax[1].axvline(t[0], ls='-', color=GRAY, lw=0.5)
# ax[1].axvline(t[len(t)/2], ls='-', color=GRAY, lw=0.5)
for v in T_storage_ctrl:
ax[1].plot_date(t, v - 273.0, fmt='-', color=GRAY, alpha=0.25, lw=0.5)
# HP and CHP have different temperature ranges (HP: 40-50, CHP: 50-70)
crit = (T_storage_ctrl - 273 >= 50).all(axis=1)
T_CHP = T_storage_ctrl[crit]
T_HP = T_storage_ctrl[~crit]
l_T_med_CHP, = ax[1].plot_date(t, T_CHP.mean(0) - 273.0, fmt='-', color=GRAY, alpha=0.75, lw=1.5)
l_T_med_HP, = ax[1].plot_date(t, T_HP.mean(0) - 273.0, fmt='-', color=GRAY, alpha=0.75, lw=1.5)
ax[0].xaxis.get_major_formatter().scaled[1/24.] = '%H:%M'
ax[-1].set_xlabel('Time of day')
fig.autofmt_xdate()
ax[1].legend([l_target, l_unctrl, l_ctrl_proxy, l_T_med_CHP],
['target', 'original', 'scheduled', 'storage temperatures (mean)'],
bbox_to_anchor=(0., 1.03, 1., .103), loc=8, ncol=4,
handletextpad=0.2, mode='expand', handlelength=3,
borderaxespad=0.25, fancybox=False, fontsize='x-small')
# import pdb
# pdb.set_trace()
return fig
def plot_aggregated_SLP(sc, bd, unctrl, ctrl, ctrl_sched, res=1):
assert hasattr(sc, 'slp_file')
t_day_start = sc.t_block_start - timedelta(hours=sc.t_block_start.hour,
minutes=sc.t_block_start.minute)
skip = (t_day_start - sc.t_start).total_seconds() / 60 / res
i_block_start = (sc.t_block_start - t_day_start).total_seconds() / 60 / res
i_block_end = (sc.t_block_end - t_day_start).total_seconds() / 60 / res
t = drange(sc.t_block_start, sc.t_block_end, timedelta(minutes=res))
P_el_unctrl = unctrl[:,0,skip + i_block_start:skip + i_block_end].sum(0)
P_el_ctrl = ctrl[:,0,skip + i_block_start:skip + i_block_end].sum(0)
# ctrl correction
P_el_ctrl = np.roll(P_el_ctrl, -1, axis=0)
P_el_sched = ctrl_sched[:,skip + i_block_start:skip + i_block_end].sum(0)
T_storage_ctrl = ctrl[:,2,skip + i_block_start:skip + i_block_end]
slp = _read_slp(sc, bd)[skip + i_block_start:skip + i_block_end]
# diff_ctrl = (P_el_ctrl - P_el_unctrl) / 1000.0
diff_ctrl = (P_el_sched - P_el_unctrl) / 1000.0
diff_ctrl_fill = np.repeat((slp + diff_ctrl)[:-1], 2)
slp_fill = np.repeat(slp[:-1], 2)
ft = np.array([t[0]] + list(np.repeat(t[1:-1], 2)) + [t[-1]])
# P_el_ctrl_fill = np.repeat(P_el_ctrl[:-1], 2)
P_el_ctrl_fill = np.repeat(P_el_sched[:-1], 2)
fig, ax = plt.subplots(2, sharex=True)
fig.subplots_adjust(left=0.11, right=0.998, hspace=0.2, top=0.95)
for a in ax:
plt.setp(list(a.spines.values()), color='k')
plt.setp([a.get_xticklines(), a.get_yticklines()], color='k')
ax[0].set_ylabel('P$_{\mathrm{el}}$ [kW]')
ymax = max(P_el_unctrl.max(), P_el_ctrl_fill.max(), P_el_sched.max(), 0) / 1000.0
ymin = min(P_el_unctrl.min(), P_el_ctrl_fill.min(), P_el_sched.min(), 0) / 1000.0
ax[0].set_ylim(ymin - abs(ymin * 0.1), ymax + abs(ymax * 0.1))
xspace = (t[-1] - t[-2])
ax[0].set_xlim(t[0], t[-1] + xspace)
l_unctrl, = ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt=':', color='k', drawstyle='steps-post', lw=0.75, label='original')
l_unctrl.set_dashes([1.0, 1.0])
# add lw=0.0 due to bug in mpl (will show as hairline in pdf though...)
l_ctrl = ax[0].fill_between(ft, P_el_ctrl_fill / 1000.0, facecolors=GRAY+(0.75,), edgecolors=EC, lw=0.0)
# Create proxy artist as l_ctrl legend handle
l_ctrl_proxy = Rectangle((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.5)
# l_sched, = ax[0].plot_date(t, P_el_sched / 1000.0, fmt='-', color=PRIM, drawstyle='steps-post', lw=0.75, label='gesteuert')
# colors = [
# '#348ABD', # blue
# '#7A68A6', # purple
# '#A60628', # red
# '#467821', # green
# '#CF4457', # pink
# '#188487', # turqoise
# '#E24A33', # orange
# '#1F4A7D', # primary
# '#BF9D23', # secondary
# '#BF5B23', # complementary
# '#94A4B6', # primaryA
# '#6581A4', # primaryB
# '#29415E', # primaryC
# '#0A2A51', # primaryD
# ][:len(unctrl)]
# for (c, P_el_unctrl, P_el_ctrl, P_el_sched) in zip(colors, unctrl[:,0,:], ctrl[:,0,:], ctrl_sched):
# ax[0].plot_date(t, P_el_unctrl / 1000.0, fmt='-', color=c, lw=1, label='unctrl')
# ax[0].plot_date(t, P_el_ctrl / 1000.0, fmt=':', color=c, lw=1, label='ctrl')
# ax[0].plot_date(t, P_el_sched / 1000.0, fmt='--x', color=c, lw=1, label='sched')
ax[1].set_ylabel('P$_{el}$ [kW]')
ax[1].set_xlabel('Time of day')
ymin = min(slp.min(), (slp + diff_ctrl).min())
ax[1].set_ylim(ymin + (ymin * 0.1), 0)
l_unctrl_slp, = ax[1].plot_date(t, slp, fmt=':', color='k', drawstyle='steps-post', lw=0.75, label='original')
l_unctrl_slp.set_dashes([1.0, 1.0])
ax[1].fill_between(ft, diff_ctrl_fill, slp_fill, where=diff_ctrl_fill>=slp_fill, facecolors=GRAY+(0.3,), edgecolors=EC, lw=0.0)
l_diff_slp = ax[1].fill_between(ft, diff_ctrl_fill, slp_fill, where=diff_ctrl_fill<slp_fill, facecolors=GRAY+(0.3,), edgecolors=EC, lw=0.0)
# Create proxy artist as l_diff_slp legend handle
l_diff_slp_proxy = Rectangle((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.3)
l_ctrl_slp, = ax[1].plot_date(t, slp + diff_ctrl, fmt='-', color='k', drawstyle='steps-post', lw=0.75, label='scheduled')
# ax[0].legend([l_sched, l_unctrl, l_T_med],
# ['Verbundfahrplan', 'ungesteuert', 'Speichertemperaturen (Median)'],
# bbox_to_anchor=(0., 1.05, 1., .105), loc=8, ncol=4,
# handletextpad=0.2, mode='expand', handlelength=3,
# borderaxespad=0.25, fancybox=False, fontsize='x-small')
ax[0].text(0.5, 1.05, 'Profile of the units under control', ha='center', va='center',
fontsize='small', transform=ax[0].transAxes)
ax[1].text(0.5, 1.05, 'Profile of the medium-voltage node', ha='center', va='center',
fontsize='small', transform=ax[1].transAxes)
ax[0].legend([l_unctrl, l_ctrl_proxy], ['original', 'scheduled'], loc='upper right', fancybox=False, fontsize='x-small')
ax[1].legend([l_unctrl_slp, l_ctrl_slp, l_diff_slp_proxy], ['original', 'scheduled', 'difference'], loc='upper right', fancybox=False, fontsize='x-small')
fig.autofmt_xdate()
ax[0].xaxis.get_major_formatter().scaled[1/24.] = '%H:%M'
return fig
def norm(minimum, maximum, value):
# return value
if maximum == minimum:
return maximum
return (value - minimum) / (maximum - minimum)
def _read_slp(sc, bd):
# Read csv data
slp = []
found = False
with open(sc.slp_file, 'r', encoding='latin-1') as f:
reader = csv.reader(f, delimiter=';')
for row in reader:
if not row:
continue
if not found and row[0] == 'Datum':
found = True
elif found:
date = datetime.strptime('_'.join(row[:2]), '%d.%m.%Y_%H:%M:%S')
if date < sc.t_start:
continue
elif date >= sc.t_end:
break
# This is a demand, so negate the values
slp.append(-1.0 * float(row[2].replace(',', '.')))
slp = np.array(slp)
# Scale values
# if hasattr(sc, 'run_unctrl_datafile'):
# slp_norm = norm(slp.min(), slp.max(), slp)
# unctrl = load(p(bd, sc.run_unctrl_datafile)).sum(0) / 1000
# slp = slp_norm * (unctrl.max() - unctrl.min()) + unctrl.min()
MS_day_mean = 13600 # kWh, derived from SmartNord Scenario document
MS_15_mean = MS_day_mean / 96
slp = slp / np.abs(slp.mean()) * MS_15_mean
return slp
# return np.array(np.roll(slp, 224, axis=0))
def p(basedir, fn):
return os.path.join(basedir, fn)
def resample(d, resolution):
# resample the innermost axis to 'resolution'
shape = tuple(d.shape[:-1]) + (int(d.shape[-1]/resolution), resolution)
return d.reshape(shape).sum(-1)/resolution
def run(sc_file):
print()
bd = os.path.dirname(sc_file)
sc = scenario_factory.Scenario()
sc.load_JSON(sc_file)
print(sc.title)
# # plot_samples(sc, bd)
# plot_samples_carpet(sc, bd)
# plt.show()
# sys.exit(0)
unctrl = load(p(bd, sc.run_unctrl_datafile))
block = load(p(bd, sc.run_ctrl_datafile))
post = load(p(bd, sc.run_post_datafile))
sched = load(p(bd, sc.sched_file))
ctrl = np.zeros(unctrl.shape)
idx = 0
for l in (block, post):
ctrl[:,:,idx:idx + l.shape[-1]] = l
idx += l.shape[-1]
if sched.shape[-1] == unctrl.shape[-1] / 15:
print('Extending schedules shape by factor 15')
sched = sched.repeat(15, axis=1)
t_start, b_start, b_end = sc.t_start, sc.t_block_start, sc.t_block_end
div = 1
if (b_end - t_start).total_seconds() / 60 == sched.shape[-1] * 15:
div = 15
elif (b_end - t_start).total_seconds() / 60 == sched.shape[-1] * 60:
div = 60
b_s = (b_start - sc.t_start).total_seconds() / 60 / div
b_e = (b_end - sc.t_start).total_seconds() / 60 / div
ctrl_sched = np.zeros((unctrl.shape[0], unctrl.shape[-1]))
ctrl_sched = np.ma.array(ctrl_sched)
ctrl_sched[:,:b_s] = np.ma.masked
ctrl_sched[:,b_s:b_e] = sched[:,b_s:b_e]
ctrl_sched[:,b_e:] = np.ma.masked
# plot_each_device(sc, unctrl, ctrl, sched)
minutes = (sc.t_end - sc.t_start).total_seconds() / 60
assert unctrl.shape[-1] == ctrl.shape[-1] == ctrl_sched.shape[-1]
shape = unctrl.shape[-1]
if hasattr(sc, 'slp_file'):
if minutes == shape:
print('data is 1-minute resolution, will be resampled by 15')
res = 15
elif minutes == shape * 15:
print('data is 15-minute resolution, all fine')
res = 1
else:
raise RuntimeError('unsupported data resolution: %.2f' % (minutes / shape))
unctrl = resample(unctrl, res)
ctrl = resample(ctrl, res)
ctrl_sched = resample(ctrl_sched, res)
fig = plot_aggregated_SLP(sc, bd, unctrl, ctrl, ctrl_sched, res=15)
else:
if minutes == shape:
print('data is 1-minute resolution, will be resampled by 60')
res = 60
elif minutes == shape * 15:
print('data is 15-minute resolution, will be resampled by 4')
res = 4
elif minutes == shape * 60:
print('data is 60-minute resolution, all fine')
res = 1
else:
raise RuntimeError('unsupported data resolution: %.2f' % (minutes / shape))
unctrl = resample(unctrl, res)
ctrl = resample(ctrl, res)
ctrl_sched = resample(ctrl_sched, res)
fig = plot_aggregated(sc, bd, unctrl, ctrl, ctrl_sched, res=60)
fig.savefig(p(bd, sc.title) + '.pdf')
fig.savefig(p(bd, sc.title) + '.png', dpi=300)
plt.show()
if __name__ == '__main__':
for n in sys.argv[1:]:
if os.path.isdir(n):
run(p(n, '0.json'))
else:
run(n)
|
[
"matplotlib.patches.Rectangle",
"numpy.repeat",
"numpy.roll",
"numpy.ma.array",
"os.path.join",
"numpy.array",
"os.path.dirname",
"numpy.zeros",
"os.path.isdir",
"datetime.timedelta",
"numpy.load",
"csv.reader",
"matplotlib.pyplot.subplots",
"scenario_factory.Scenario",
"matplotlib.pyplot.show"
] |
[((1295, 1318), 'numpy.ma.array', 'np.ma.array', (['P_el_sched'], {}), '(P_el_sched)\n', (1306, 1318), True, 'import numpy as np\n'), ((1331, 1349), 'numpy.array', 'np.array', (['sc.block'], {}), '(sc.block)\n', (1339, 1349), True, 'import numpy as np\n'), ((1723, 1751), 'numpy.repeat', 'np.repeat', (['P_el_ctrl[:-1]', '(2)'], {}), '(P_el_ctrl[:-1], 2)\n', (1732, 1751), True, 'import numpy as np\n'), ((1767, 1795), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (1779, 1795), True, 'import matplotlib.pyplot as plt\n'), ((3124, 3185), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': 'GRAY', 'ec': 'WHITE', 'lw': '(0.0)', 'alpha': '(0.5)'}), '((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.5)\n', (3133, 3185), False, 'from matplotlib.patches import Rectangle\n'), ((6634, 6664), 'numpy.roll', 'np.roll', (['P_el_ctrl', '(-1)'], {'axis': '(0)'}), '(P_el_ctrl, -1, axis=0)\n', (6641, 6664), True, 'import numpy as np\n'), ((7010, 7046), 'numpy.repeat', 'np.repeat', (['(slp + diff_ctrl)[:-1]', '(2)'], {}), '((slp + diff_ctrl)[:-1], 2)\n', (7019, 7046), True, 'import numpy as np\n'), ((7062, 7084), 'numpy.repeat', 'np.repeat', (['slp[:-1]', '(2)'], {}), '(slp[:-1], 2)\n', (7071, 7084), True, 'import numpy as np\n'), ((7225, 7254), 'numpy.repeat', 'np.repeat', (['P_el_sched[:-1]', '(2)'], {}), '(P_el_sched[:-1], 2)\n', (7234, 7254), True, 'import numpy as np\n'), ((7270, 7298), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (7282, 7298), True, 'import matplotlib.pyplot as plt\n'), ((8285, 8346), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': 'GRAY', 'ec': 'WHITE', 'lw': '(0.0)', 'alpha': '(0.5)'}), '((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.5)\n', (8294, 8346), False, 'from matplotlib.patches import Rectangle\n'), ((10185, 10246), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, 0)', '(1)', '(1)'], {'fc': 'GRAY', 'ec': 'WHITE', 'lw': '(0.0)', 'alpha': '(0.3)'}), '((0, 0), 1, 1, fc=GRAY, ec=WHITE, lw=0.0, alpha=0.3)\n', (10194, 10246), False, 'from matplotlib.patches import Rectangle\n'), ((12270, 12283), 'numpy.array', 'np.array', (['slp'], {}), '(slp)\n', (12278, 12283), True, 'import numpy as np\n'), ((12793, 12818), 'os.path.join', 'os.path.join', (['basedir', 'fn'], {}), '(basedir, fn)\n', (12805, 12818), False, 'import os\n'), ((13064, 13088), 'os.path.dirname', 'os.path.dirname', (['sc_file'], {}), '(sc_file)\n', (13079, 13088), False, 'import os\n'), ((13098, 13125), 'scenario_factory.Scenario', 'scenario_factory.Scenario', ([], {}), '()\n', (13123, 13125), False, 'import scenario_factory\n'), ((13463, 13485), 'numpy.zeros', 'np.zeros', (['unctrl.shape'], {}), '(unctrl.shape)\n', (13471, 13485), True, 'import numpy as np\n'), ((14144, 14189), 'numpy.zeros', 'np.zeros', (['(unctrl.shape[0], unctrl.shape[-1])'], {}), '((unctrl.shape[0], unctrl.shape[-1]))\n', (14152, 14189), True, 'import numpy as np\n'), ((14207, 14230), 'numpy.ma.array', 'np.ma.array', (['ctrl_sched'], {}), '(ctrl_sched)\n', (14218, 14230), True, 'import numpy as np\n'), ((15903, 15913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15911, 15913), True, 'import matplotlib.pyplot as plt\n'), ((555, 565), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (562, 565), True, 'import numpy as np\n'), ((753, 824), 'datetime.timedelta', 'timedelta', ([], {'hours': 'sc.t_block_start.hour', 'minutes': 'sc.t_block_start.minute'}), '(hours=sc.t_block_start.hour, minutes=sc.t_block_start.minute)\n', (762, 824), False, 'from datetime import datetime, timedelta\n'), ((904, 926), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'res'}), '(minutes=res)\n', (913, 926), False, 'from datetime import datetime, timedelta\n'), ((6038, 6109), 'datetime.timedelta', 'timedelta', ([], {'hours': 'sc.t_block_start.hour', 'minutes': 'sc.t_block_start.minute'}), '(hours=sc.t_block_start.hour, minutes=sc.t_block_start.minute)\n', (6047, 6109), False, 'from datetime import datetime, timedelta\n'), ((6421, 6443), 'datetime.timedelta', 'timedelta', ([], {'minutes': 'res'}), '(minutes=res)\n', (6430, 6443), False, 'from datetime import datetime, timedelta\n'), ((11717, 11745), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (11727, 11745), False, 'import csv\n'), ((15981, 15997), 'os.path.isdir', 'os.path.isdir', (['n'], {}), '(n)\n', (15994, 15997), False, 'import os\n'), ((1668, 1689), 'numpy.repeat', 'np.repeat', (['t[1:-1]', '(2)'], {}), '(t[1:-1], 2)\n', (1677, 1689), True, 'import numpy as np\n'), ((7118, 7139), 'numpy.repeat', 'np.repeat', (['t[1:-1]', '(2)'], {}), '(t[1:-1], 2)\n', (7127, 7139), True, 'import numpy as np\n')]
|
"""
This module tests nipy's uses of aliased sympy expressions.
That is, sympy.Function's whose value is an arbitrary callable.
In these tests, the callable's are scipy.interpolate.interp1d instances
representing approximations to Brownian Motions.
"""
import numpy as np
import scipy.interpolate
import pylab
import sympy
from nipy.modalities.fmri import formula, aliased
def gen_BrownianMotion():
X = np.arange(0,5,0.01)
y = np.random.standard_normal((500,))
Y = np.cumsum(y)*np.sqrt(0.01)
B = scipy.interpolate.interp1d(X, Y, bounds_error=0)
return B
def test_1d():
B = gen_BrownianMotion()
Bs = formula.aliased_function("B", B)
t = sympy.DeferredVector('t')
n={}; aliased._add_aliases_to_namespace(n, Bs)
expr = 3*sympy.exp(Bs(t)) + 4
ee = sympy.lambdify(t, expr, (n, 'numpy'))
np.testing.assert_almost_equal(ee(B.x), 3*np.exp(B.y)+4)
def test_2d():
B1, B2 = [gen_BrownianMotion() for _ in range(2)]
B1s = formula.aliased_function("B1", B1)
B2s = formula.aliased_function("B2", B2)
t = sympy.DeferredVector('t')
s = sympy.DeferredVector('s')
e = B1s(s)+B2s(t)
n={}; aliased._add_aliases_to_namespace(n, e)
ee = sympy.lambdify((s,t), e, (n, 'numpy'))
np.testing.assert_almost_equal(ee(B1.x, B2.x), B1.y + B2.y)
|
[
"numpy.random.standard_normal",
"numpy.sqrt",
"numpy.arange",
"nipy.modalities.fmri.aliased._add_aliases_to_namespace",
"sympy.lambdify",
"numpy.exp",
"nipy.modalities.fmri.formula.aliased_function",
"numpy.cumsum",
"sympy.DeferredVector"
] |
[((411, 432), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(0.01)'], {}), '(0, 5, 0.01)\n', (420, 432), True, 'import numpy as np\n'), ((439, 472), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(500,)'], {}), '((500,))\n', (464, 472), True, 'import numpy as np\n'), ((633, 665), 'nipy.modalities.fmri.formula.aliased_function', 'formula.aliased_function', (['"""B"""', 'B'], {}), "('B', B)\n", (657, 665), False, 'from nipy.modalities.fmri import formula, aliased\n'), ((674, 699), 'sympy.DeferredVector', 'sympy.DeferredVector', (['"""t"""'], {}), "('t')\n", (694, 699), False, 'import sympy\n'), ((711, 751), 'nipy.modalities.fmri.aliased._add_aliases_to_namespace', 'aliased._add_aliases_to_namespace', (['n', 'Bs'], {}), '(n, Bs)\n', (744, 751), False, 'from nipy.modalities.fmri import formula, aliased\n'), ((796, 833), 'sympy.lambdify', 'sympy.lambdify', (['t', 'expr', "(n, 'numpy')"], {}), "(t, expr, (n, 'numpy'))\n", (810, 833), False, 'import sympy\n'), ((977, 1011), 'nipy.modalities.fmri.formula.aliased_function', 'formula.aliased_function', (['"""B1"""', 'B1'], {}), "('B1', B1)\n", (1001, 1011), False, 'from nipy.modalities.fmri import formula, aliased\n'), ((1022, 1056), 'nipy.modalities.fmri.formula.aliased_function', 'formula.aliased_function', (['"""B2"""', 'B2'], {}), "('B2', B2)\n", (1046, 1056), False, 'from nipy.modalities.fmri import formula, aliased\n'), ((1066, 1091), 'sympy.DeferredVector', 'sympy.DeferredVector', (['"""t"""'], {}), "('t')\n", (1086, 1091), False, 'import sympy\n'), ((1100, 1125), 'sympy.DeferredVector', 'sympy.DeferredVector', (['"""s"""'], {}), "('s')\n", (1120, 1125), False, 'import sympy\n'), ((1159, 1198), 'nipy.modalities.fmri.aliased._add_aliases_to_namespace', 'aliased._add_aliases_to_namespace', (['n', 'e'], {}), '(n, e)\n', (1192, 1198), False, 'from nipy.modalities.fmri import formula, aliased\n'), ((1209, 1248), 'sympy.lambdify', 'sympy.lambdify', (['(s, t)', 'e', "(n, 'numpy')"], {}), "((s, t), e, (n, 'numpy'))\n", (1223, 1248), False, 'import sympy\n'), ((481, 493), 'numpy.cumsum', 'np.cumsum', (['y'], {}), '(y)\n', (490, 493), True, 'import numpy as np\n'), ((494, 507), 'numpy.sqrt', 'np.sqrt', (['(0.01)'], {}), '(0.01)\n', (501, 507), True, 'import numpy as np\n'), ((881, 892), 'numpy.exp', 'np.exp', (['B.y'], {}), '(B.y)\n', (887, 892), True, 'import numpy as np\n')]
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*(testing.product({
'batchsize': [1, 5],
'size': [10, 20],
'dtype': [numpy.float32],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestLayerNormalization(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
shape = self.batchsize, self.size
size = numpy.prod(shape) // shape[0]
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
gamma = numpy.random.uniform(-1, 1, size).astype(self.dtype)
beta = numpy.random.uniform(-1, 1, size).astype(self.dtype)
return x, gamma, beta
def forward_expected(self, inputs):
x, gamma, beta = inputs
mean = numpy.mean(x, axis=1, keepdims=True)
var = numpy.mean(numpy.square(x - mean), axis=1, keepdims=True)
std = numpy.sqrt(var + self.eps)
y_expected = (
numpy.expand_dims(gamma, axis=0) * (x - mean) / std
+ numpy.expand_dims(beta, axis=0))
return y_expected,
def forward(self, inputs, device):
x, gamma, beta = inputs
y = functions.layer_normalization(x, gamma, beta, eps=self.eps)
return y,
testing.run_module(__name__, __file__)
|
[
"numpy.mean",
"numpy.prod",
"numpy.sqrt",
"chainer.functions.layer_normalization",
"chainer.testing.run_module",
"numpy.square",
"chainer.testing.product",
"numpy.expand_dims",
"numpy.random.uniform"
] |
[((2133, 2171), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (2151, 2171), False, 'from chainer import testing\n'), ((1658, 1694), 'numpy.mean', 'numpy.mean', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (1668, 1694), False, 'import numpy\n'), ((1781, 1807), 'numpy.sqrt', 'numpy.sqrt', (['(var + self.eps)'], {}), '(var + self.eps)\n', (1791, 1807), False, 'import numpy\n'), ((2053, 2112), 'chainer.functions.layer_normalization', 'functions.layer_normalization', (['x', 'gamma', 'beta'], {'eps': 'self.eps'}), '(x, gamma, beta, eps=self.eps)\n', (2082, 2112), False, 'from chainer import functions\n'), ((98, 206), 'chainer.testing.product', 'testing.product', (["{'batchsize': [1, 5], 'size': [10, 20], 'dtype': [numpy.float32], 'eps': [\n 1e-05, 0.1]}"], {}), "({'batchsize': [1, 5], 'size': [10, 20], 'dtype': [numpy.\n float32], 'eps': [1e-05, 0.1]})\n", (113, 206), False, 'from chainer import testing\n'), ((1307, 1324), 'numpy.prod', 'numpy.prod', (['shape'], {}), '(shape)\n', (1317, 1324), False, 'import numpy\n'), ((1720, 1742), 'numpy.square', 'numpy.square', (['(x - mean)'], {}), '(x - mean)\n', (1732, 1742), False, 'import numpy\n'), ((1909, 1940), 'numpy.expand_dims', 'numpy.expand_dims', (['beta'], {'axis': '(0)'}), '(beta, axis=0)\n', (1926, 1940), False, 'import numpy\n'), ((326, 424), 'chainer.testing.product', 'testing.product', (["{'use_cuda': [True], 'use_cudnn': ['never', 'always'], 'cuda_device': [0, 1]}"], {}), "({'use_cuda': [True], 'use_cudnn': ['never', 'always'],\n 'cuda_device': [0, 1]})\n", (341, 424), False, 'from chainer import testing\n'), ((1349, 1383), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'shape'], {}), '(-1, 1, shape)\n', (1369, 1383), False, 'import numpy\n'), ((1419, 1452), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'size'], {}), '(-1, 1, size)\n', (1439, 1452), False, 'import numpy\n'), ((1487, 1520), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'size'], {}), '(-1, 1, size)\n', (1507, 1520), False, 'import numpy\n'), ((1843, 1875), 'numpy.expand_dims', 'numpy.expand_dims', (['gamma'], {'axis': '(0)'}), '(gamma, axis=0)\n', (1860, 1875), False, 'import numpy\n')]
|
import numpy as np
import pandas as pd
array = [1,3,4,7,8,10,15]
np_array = np.array(array)
print("Arranjo NumPy")
print(np_array)
print("Convertendo para serie Pandas")
ds_array = pd.Series(np_array)
print("Serie Pandas")
print(ds_array)
|
[
"pandas.Series",
"numpy.array"
] |
[((78, 93), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (86, 93), True, 'import numpy as np\n'), ((186, 205), 'pandas.Series', 'pd.Series', (['np_array'], {}), '(np_array)\n', (195, 205), True, 'import pandas as pd\n')]
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import numpy as np
import pytest
import cunumeric as cn
from legate.core import LEGATE_MAX_DIM
@pytest.mark.parametrize("ndim", range(0, LEGATE_MAX_DIM))
def test_indices(ndim):
dimensions = tuple(random.randint(2, 5) for i in range(ndim))
np_res = np.indices(dimensions)
cn_res = cn.indices(dimensions)
assert np.array_equal(np_res, cn_res)
np_res = np.indices(dimensions, dtype=float)
cn_res = cn.indices(dimensions, dtype=float)
assert np.array_equal(np_res, cn_res)
np_res = np.indices(dimensions, sparse=True)
cn_res = cn.indices(dimensions, sparse=True)
for i in range(len(np_res)):
assert np.array_equal(np_res[i], cn_res[i])
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
[
"cunumeric.indices",
"numpy.indices",
"pytest.main",
"numpy.array_equal",
"random.randint"
] |
[((861, 883), 'numpy.indices', 'np.indices', (['dimensions'], {}), '(dimensions)\n', (871, 883), True, 'import numpy as np\n'), ((897, 919), 'cunumeric.indices', 'cn.indices', (['dimensions'], {}), '(dimensions)\n', (907, 919), True, 'import cunumeric as cn\n'), ((931, 961), 'numpy.array_equal', 'np.array_equal', (['np_res', 'cn_res'], {}), '(np_res, cn_res)\n', (945, 961), True, 'import numpy as np\n'), ((976, 1011), 'numpy.indices', 'np.indices', (['dimensions'], {'dtype': 'float'}), '(dimensions, dtype=float)\n', (986, 1011), True, 'import numpy as np\n'), ((1025, 1060), 'cunumeric.indices', 'cn.indices', (['dimensions'], {'dtype': 'float'}), '(dimensions, dtype=float)\n', (1035, 1060), True, 'import cunumeric as cn\n'), ((1072, 1102), 'numpy.array_equal', 'np.array_equal', (['np_res', 'cn_res'], {}), '(np_res, cn_res)\n', (1086, 1102), True, 'import numpy as np\n'), ((1117, 1152), 'numpy.indices', 'np.indices', (['dimensions'], {'sparse': '(True)'}), '(dimensions, sparse=True)\n', (1127, 1152), True, 'import numpy as np\n'), ((1166, 1201), 'cunumeric.indices', 'cn.indices', (['dimensions'], {'sparse': '(True)'}), '(dimensions, sparse=True)\n', (1176, 1201), True, 'import cunumeric as cn\n'), ((1250, 1286), 'numpy.array_equal', 'np.array_equal', (['np_res[i]', 'cn_res[i]'], {}), '(np_res[i], cn_res[i])\n', (1264, 1286), True, 'import numpy as np\n'), ((1345, 1366), 'pytest.main', 'pytest.main', (['sys.argv'], {}), '(sys.argv)\n', (1356, 1366), False, 'import pytest\n'), ((804, 824), 'random.randint', 'random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (818, 824), False, 'import random\n')]
|
import tensorflow as tf
import numpy as np
import os
import imageio
from utils import get_shardsize, get_zeros_array
def resize(image, target_shape):
imdtype = image.dtype
with tf.device('/CPU:0'):
image = tf.image.resize(image, target_shape[:2]).numpy()
assert image.shape == target_shape
return image.astype(imdtype)
def fit_image(image, target_shape, fit_method):
assert isinstance(image, np.ndarray), "Image must be numpy array"
assert len(image.shape) == 3 and image.shape[-1] == 3, "Original Image shape must be of shape (H, W, 3)."
assert len(target_shape) == 3 and target_shape[-1] == 3, "Desired Image shape must be of shape (H, W, 3)."
assert fit_method in ['resize', 'center_crop', 'random_crop'], "Crop method must be one of 'resize', 'center_crop' or 'random_crop' "
(h, w, _), (htar, wtar, _) = image.shape, target_shape
if image.shape == target_shape:
return image
if h < htar or w < wtar:
if fit_method != 'resize':
print("Your selected fit method is {} but your desired image shape is larger than the given image's shape. Using resize instead - note that this may change the image aspect ratio.".format(fit_method), end="\r")
return resize(image, target_shape)
if fit_method == 'resize':
return resize(image, target_shape)
elif fit_method == 'center_crop':
trim_h = int((h - htar)/2)
trim_w = int((w - wtar)/2)
image = image[trim_h:h-trim_h, trim_w:w-trim_w]
if image.shape[0] != htar:
image = image[:-1]
if image.shape[1] != wtar:
image = image[:, :-1]
assert image.shape == target_shape, image.shape
return image
elif fit_method == 'random_crop':
imdtype = image.dtype
with tf.device('/CPU:0'):
image = tf.image.random_crop(tf.constant(image), target_shape).numpy()
assert image.shape == target_shape
return image.astype(imdtype)
def images_to_train_dataset(writedir, datadir, target_shape, fit_method='resize'):
'''
writedir: specifies the folder where numpy arrays are created
datadir: specifies the folder where the jpg/png files in the dataset are located
target_shape: the desired shape of the images
fit_method: how to adjust images such that they are of the same shape as target_shape. must be 'resize', 'center_crop' or 'random_crop'
remove_datadir: whether or not to delete the original dataset
returns: the number of training examples.
'''
if len(os.listdir(datadir)) == 0:
raise RuntimeError("No training images were found. Data directory should not be empty. ")
elif os.path.isfile(datadir):
raise RuntimeError("data directory should not be a file, it should be a folder. You may have to unzip your files to a new folder.")
if os.path.isfile(writedir):
raise RuntimeError("The directory you want to write to is an existing file.")
elif writedir == datadir:
raise RuntimeError("The numpy arrays should be written to a different directory than the original.")
elif os.path.isdir(writedir):
if len(os.listdir(writedir)) != 0:
print("Files already exist in this directory. Will use these for training.")
return len(os.listdir(writedir))
else:
os.mkdir(writedir)
shard_size = get_shardsize(target_shape)
numpy_dataset = get_zeros_array(target_shape)
tmp_numpy = get_zeros_array(target_shape) #appends to numpy_dataset in groups of size 50. this is faster.
count = 0
files_written = 0
for impath in sorted(os.listdir(datadir)):
impath = os.path.join(datadir, impath)
try:
image = imageio.imread(impath)
except:
continue #cant be converted to numpy array.
image = fit_image(image, target_shape, fit_method)
assert len(image.shape) == 3
image = np.expand_dims(image, axis=0)
count += 1
tmp_numpy = np.concatenate((tmp_numpy, image), axis=0)
if tmp_numpy.shape[0]%64 == 0:
numpy_dataset = np.concatenate((numpy_dataset, tmp_numpy))
tmp_numpy = get_zeros_array(target_shape)
if numpy_dataset.shape[0] >= shard_size:
data_to_write, remaining_data = numpy_dataset[:shard_size], numpy_dataset[shard_size:]
print(data_to_write.shape, remaining_data.shape)
writepath = os.path.join(writedir, 'data_{}.npy'.format(files_written))
np.save(writepath, data_to_write)
files_written += 1
numpy_dataset = remaining_data
numpy_dataset = np.concatenate((numpy_dataset, tmp_numpy))
writepath = os.path.join(writedir, 'data_{}.npy'.format(files_written))
if numpy_dataset.shape[0] != 0:
np.save(writepath, numpy_dataset)
files_written += 1
print("A maximum of %d images will be used in training." % count)
return count
|
[
"tensorflow.device",
"os.listdir",
"imageio.imread",
"tensorflow.image.resize",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"tensorflow.constant",
"os.mkdir",
"numpy.concatenate",
"numpy.expand_dims",
"utils.get_zeros_array",
"utils.get_shardsize",
"numpy.save"
] |
[((2888, 2912), 'os.path.isfile', 'os.path.isfile', (['writedir'], {}), '(writedir)\n', (2902, 2912), False, 'import os\n'), ((3406, 3433), 'utils.get_shardsize', 'get_shardsize', (['target_shape'], {}), '(target_shape)\n', (3419, 3433), False, 'from utils import get_shardsize, get_zeros_array\n'), ((3454, 3483), 'utils.get_zeros_array', 'get_zeros_array', (['target_shape'], {}), '(target_shape)\n', (3469, 3483), False, 'from utils import get_shardsize, get_zeros_array\n'), ((3500, 3529), 'utils.get_zeros_array', 'get_zeros_array', (['target_shape'], {}), '(target_shape)\n', (3515, 3529), False, 'from utils import get_shardsize, get_zeros_array\n'), ((4678, 4720), 'numpy.concatenate', 'np.concatenate', (['(numpy_dataset, tmp_numpy)'], {}), '((numpy_dataset, tmp_numpy))\n', (4692, 4720), True, 'import numpy as np\n'), ((186, 205), 'tensorflow.device', 'tf.device', (['"""/CPU:0"""'], {}), "('/CPU:0')\n", (195, 205), True, 'import tensorflow as tf\n'), ((2715, 2738), 'os.path.isfile', 'os.path.isfile', (['datadir'], {}), '(datadir)\n', (2729, 2738), False, 'import os\n'), ((3657, 3676), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (3667, 3676), False, 'import os\n'), ((3696, 3725), 'os.path.join', 'os.path.join', (['datadir', 'impath'], {}), '(datadir, impath)\n', (3708, 3725), False, 'import os\n'), ((3966, 3995), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3980, 3995), True, 'import numpy as np\n'), ((4036, 4078), 'numpy.concatenate', 'np.concatenate', (['(tmp_numpy, image)'], {'axis': '(0)'}), '((tmp_numpy, image), axis=0)\n', (4050, 4078), True, 'import numpy as np\n'), ((4850, 4883), 'numpy.save', 'np.save', (['writepath', 'numpy_dataset'], {}), '(writepath, numpy_dataset)\n', (4857, 4883), True, 'import numpy as np\n'), ((2581, 2600), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (2591, 2600), False, 'import os\n'), ((3148, 3171), 'os.path.isdir', 'os.path.isdir', (['writedir'], {}), '(writedir)\n', (3161, 3171), False, 'import os\n'), ((3759, 3781), 'imageio.imread', 'imageio.imread', (['impath'], {}), '(impath)\n', (3773, 3781), False, 'import imageio\n'), ((4146, 4188), 'numpy.concatenate', 'np.concatenate', (['(numpy_dataset, tmp_numpy)'], {}), '((numpy_dataset, tmp_numpy))\n', (4160, 4188), True, 'import numpy as np\n'), ((4213, 4242), 'utils.get_zeros_array', 'get_zeros_array', (['target_shape'], {}), '(target_shape)\n', (4228, 4242), False, 'from utils import get_shardsize, get_zeros_array\n'), ((4549, 4582), 'numpy.save', 'np.save', (['writepath', 'data_to_write'], {}), '(writepath, data_to_write)\n', (4556, 4582), True, 'import numpy as np\n'), ((223, 263), 'tensorflow.image.resize', 'tf.image.resize', (['image', 'target_shape[:2]'], {}), '(image, target_shape[:2])\n', (238, 263), True, 'import tensorflow as tf\n'), ((3369, 3387), 'os.mkdir', 'os.mkdir', (['writedir'], {}), '(writedir)\n', (3377, 3387), False, 'import os\n'), ((1814, 1833), 'tensorflow.device', 'tf.device', (['"""/CPU:0"""'], {}), "('/CPU:0')\n", (1823, 1833), True, 'import tensorflow as tf\n'), ((3188, 3208), 'os.listdir', 'os.listdir', (['writedir'], {}), '(writedir)\n', (3198, 3208), False, 'import os\n'), ((3328, 3348), 'os.listdir', 'os.listdir', (['writedir'], {}), '(writedir)\n', (3338, 3348), False, 'import os\n'), ((1876, 1894), 'tensorflow.constant', 'tf.constant', (['image'], {}), '(image)\n', (1887, 1894), True, 'import tensorflow as tf\n')]
|
import params
import numpy as np
def calc_min_delta_t(delta_x, alpha, v_max) -> int:
return min(1, 1 / 4 * delta_x ** 2 / alpha, delta_x / v_max)
def adjust_boundary(T, v_x, v_y):
T[0, :] = params.T_h
T[-1, :] = T[-2, :]
T[:, 0] = T[:, 1]
T[:, -1] = T[:, -2]
v_y[0, :] = 0
v_y[-1, :] = v_y[-2, :]
v_y[:, 0] = v_y[:, 1]
v_y[:, -1] = v_y[:, -2]
def diffusion_x_op(T, alpha, delta_t, delta_x):
T[1:-1, 1:-1] = T[1:-1, 1:-1] + alpha * delta_t / \
pow(delta_x, 2) * (T[1:-1, 0:-2]-2*T[1:-1, 1:-1]+T[1:-1, 2:])
def diffusion_y_op(T, alpha, delta_t, delta_x):
T_cen = T[1:-1, 1:-1]
T_down = T[0:-2, 1:-1]
T_up = T[2:, 1:-1]
T[1:-1, 1:-1] = T_cen + alpha * delta_t / \
pow(delta_x, 2) * (T_down-2*T_cen+T_up)
def heat_convection_y_op(T, v_y, delta_t, delta_x):
T_cen = T[1:-1, 1:-1]
T_down = T[0:-2, 1:-1]
v_y_cen = v_y[1:-1, 1:-1]
T[1:-1, 1:-1] = T_cen - delta_t / delta_x * v_y_cen * (T_cen-T_down)
def mom_convection_y_op(T, v_y, delta_t, delta_x):
T_cen = T[1:-1, 1:-1]
v_y_cen = v_y[1:-1, 1:-1]
v_y_down = v_y[0:-2, 1:-1]
T_up = T[2:, 1:-1]
b = params.g * np.maximum(np.zeros(T_cen.shape),
(T_cen-T_up)/T_up)
v_y[1:-1, 1:-1] = v_y_cen + delta_t * b - \
delta_t / delta_x * v_y_cen * (v_y_cen-v_y_down)
|
[
"numpy.zeros"
] |
[((1184, 1205), 'numpy.zeros', 'np.zeros', (['T_cen.shape'], {}), '(T_cen.shape)\n', (1192, 1205), True, 'import numpy as np\n')]
|
import warnings
import numpy as np
from tabulate import tabulate
from collections import Counter
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import precision_score, recall_score, f1_score
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def get_sentiment(documents, document_ids):
"""
Input - A document_df that basically has documents
and their IDs [tweetid/message hash etc]
Returns - A dictionary mapping document ID to sentiment
from Vader
This function is basically as a function that is generic
to the documents which at the moment are tweets & news
articles.
"""
# Vader
vader = SentimentIntensityAnalyzer()
# Create & populating dict mapping document_id
# to sentiment dict
sentiment_dict = {}
for i, document in enumerate(documents):
if document_ids[i] not in sentiment_dict:
sentiment_dict[document_ids[i]] = vader.polarity_scores(document)
return sentiment_dict
def make_predictions(location_features_dict, labels, model=None, permute=False, lead_days=2, days_window=5):
"""
Input -
location_features_dict - The dict mapping from location to features
labels - Label dict generated from process_acled_csv(..)
model - Specific sklearn model to evaluate/benchmark performance
permute - Permute the data before train-test split
Returns - None
"""
# Table for presenting on tabulate
result_table = []
# Suppress warnings for divide-by-zero error
warnings.filterwarnings("ignore")
# Compute intersection for locations present on both dicts
common_locations = set(location_features_dict.keys()) & set(labels.keys())
# Sorted for clarity
common_locations = sorted(list(common_locations))
for common_location in common_locations:
# Get data and labels
X, y = location_features_dict[common_location], labels[common_location]
X, y = np.array(X), np.array(y)
# Eliminate last days to match labels.shape
X = X[:-(lead_days + days_window)]
# Permute randomly if specified
if permute:
p = np.random.permutation(len(X))
X, y = X[p], y[p]
# Split data into train & test - 75% & 25%
split = int(0.75 * len(X))
xtrain, ytrain = X[:split], y[:split]
xtest, ytest = X[split:], y[split:]
# Default model
if model is None:
model = xgboost.XGBClassifier(n_estimators=200, n_jobs=-1)
# Fit the train data
model.fit(xtrain, ytrain)
# Make predictions
ypred = model.predict(xtest)
# Compute metrics
train_acc = model.score(xtrain, ytrain)
test_acc = model.score(xtest, ytest)
precision = precision_score(ytest, ypred)
recall = recall_score(ytest, ypred)
f1 = f1_score(ytest, ypred)
# Add row to result_table
result_row = [
common_location,
np.round(train_acc, 2), np.round(test_acc, 2),
np.round(precision, 2), np.round(recall, 2),
np.round(f1, 2), np.round(np.sum(y) / len(y), 2)
]
result_table.append(result_row)
# Average stats
# Turns out median is kind of useless
result_table_copy = (np.array(result_table)[:, 1:]).astype(np.float32)
averages = np.round(np.mean(result_table_copy, axis=0), 2)
# Sort by test accuracy
result_table = sorted(result_table, key=lambda x: -x[-2])
# Add them to the existing result table
result_table.append(["Average"] + averages.tolist())
# Header for table
header = ["Location", "Train Accuracy", "Test Accuracy",
"Precision", "Recall", "F1 Score", "+'s in data"]
# Print tabulated result
print(tabulate(result_table,
tablefmt="pipe",
stralign="center",
headers=header))
# Unsuppress warning
warnings.filterwarnings("default")
return
def get_features(date_dict):
"""
Input: date_dict to compute features for each date
Returns: Features for each date
"""
# Initialize list for features
features = []
# Iterate through dates
for date in date_dict:
feature_row = []
docs = date_dict[date]
# If no rows are present, add zero-row
if docs is None:
feature_row = [0] * 6
else:
# Compute features
feature_row.append(len(docs))
mean = docs.mean()
feature_row.extend(
[mean['pos'], mean['neg'], mean['neu'], mean['compound']])
feature_row.append(len(docs[docs['neg'] > 0]))
# Add feature_row to above list
features.append(feature_row)
return features
|
[
"vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"numpy.mean",
"sklearn.metrics.f1_score",
"tabulate.tabulate",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"numpy.sum",
"warnings.filterwarnings",
"numpy.round"
] |
[((699, 727), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (725, 727), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((1592, 1625), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1615, 1625), False, 'import warnings\n'), ((4108, 4142), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""default"""'], {}), "('default')\n", (4131, 4142), False, 'import warnings\n'), ((2846, 2875), 'sklearn.metrics.precision_score', 'precision_score', (['ytest', 'ypred'], {}), '(ytest, ypred)\n', (2861, 2875), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((2893, 2919), 'sklearn.metrics.recall_score', 'recall_score', (['ytest', 'ypred'], {}), '(ytest, ypred)\n', (2905, 2919), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((2933, 2955), 'sklearn.metrics.f1_score', 'f1_score', (['ytest', 'ypred'], {}), '(ytest, ypred)\n', (2941, 2955), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((3508, 3542), 'numpy.mean', 'np.mean', (['result_table_copy'], {'axis': '(0)'}), '(result_table_copy, axis=0)\n', (3515, 3542), True, 'import numpy as np\n'), ((3938, 4012), 'tabulate.tabulate', 'tabulate', (['result_table'], {'tablefmt': '"""pipe"""', 'stralign': '"""center"""', 'headers': 'header'}), "(result_table, tablefmt='pipe', stralign='center', headers=header)\n", (3946, 4012), False, 'from tabulate import tabulate\n'), ((2020, 2031), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2028, 2031), True, 'import numpy as np\n'), ((2033, 2044), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2041, 2044), True, 'import numpy as np\n'), ((3075, 3097), 'numpy.round', 'np.round', (['train_acc', '(2)'], {}), '(train_acc, 2)\n', (3083, 3097), True, 'import numpy as np\n'), ((3099, 3120), 'numpy.round', 'np.round', (['test_acc', '(2)'], {}), '(test_acc, 2)\n', (3107, 3120), True, 'import numpy as np\n'), ((3144, 3166), 'numpy.round', 'np.round', (['precision', '(2)'], {}), '(precision, 2)\n', (3152, 3166), True, 'import numpy as np\n'), ((3168, 3187), 'numpy.round', 'np.round', (['recall', '(2)'], {}), '(recall, 2)\n', (3176, 3187), True, 'import numpy as np\n'), ((3211, 3226), 'numpy.round', 'np.round', (['f1', '(2)'], {}), '(f1, 2)\n', (3219, 3226), True, 'import numpy as np\n'), ((3434, 3456), 'numpy.array', 'np.array', (['result_table'], {}), '(result_table)\n', (3442, 3456), True, 'import numpy as np\n'), ((3237, 3246), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (3243, 3246), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
with open('input.txt', 'r') as f:
stream = f.readline()
image = np.array(tuple(map(int, stream))).reshape(-1, 6, 25)
nonzero_counts = np.sum(np.count_nonzero(image, axis=2), axis=1)
fewest_zeros_layer = np.argsort(nonzero_counts)[-1]
unique_values, counts = np.unique(image[fewest_zeros_layer], return_counts=True)
value_counts = dict(zip(unique_values, counts))
output_image = np.zeros((6, 25))
image -= 2
for layer in image:
output_image = np.where(output_image != 0, output_image, layer)
print(f'{(value_counts[1] * value_counts[2])=}')
plt.imshow(output_image)
plt.show()
|
[
"matplotlib.pyplot.imshow",
"numpy.unique",
"numpy.where",
"numpy.count_nonzero",
"numpy.argsort",
"numpy.zeros",
"matplotlib.pyplot.show"
] |
[((316, 372), 'numpy.unique', 'np.unique', (['image[fewest_zeros_layer]'], {'return_counts': '(True)'}), '(image[fewest_zeros_layer], return_counts=True)\n', (325, 372), True, 'import numpy as np\n'), ((437, 454), 'numpy.zeros', 'np.zeros', (['(6, 25)'], {}), '((6, 25))\n', (445, 454), True, 'import numpy as np\n'), ((604, 628), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output_image'], {}), '(output_image)\n', (614, 628), True, 'import matplotlib.pyplot as plt\n'), ((629, 639), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (637, 639), True, 'import matplotlib.pyplot as plt\n'), ((199, 230), 'numpy.count_nonzero', 'np.count_nonzero', (['image'], {'axis': '(2)'}), '(image, axis=2)\n', (215, 230), True, 'import numpy as np\n'), ((261, 287), 'numpy.argsort', 'np.argsort', (['nonzero_counts'], {}), '(nonzero_counts)\n', (271, 287), True, 'import numpy as np\n'), ((505, 553), 'numpy.where', 'np.where', (['(output_image != 0)', 'output_image', 'layer'], {}), '(output_image != 0, output_image, layer)\n', (513, 553), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import glob,os
import numpy as np
from bunch import Bunch
import mygis as io
import load_data
def adjust_p(p,h,dz):
'''Convert p [Pa] at elevation h [m] by shifting its elevation by dz [m]'''
# p in pascals
# h,dz in meters
# slp = p/(1 - 2.25577E-5*h)**5.25588
# p=slp*(1 - 2.25577E-5*(h+dz))**5.25588
p*=(1 - 2.25577E-5*(h+dz))**5.25588
def update_base(base,filename,nz):
data=load_data.cols(filename)
nz=min(data.shape[0]-1,nz)
base.z=data[:nz,0]
base.dz=np.diff(data[:nz+1,0]).reshape((1,nz,1,1))
base.th=data[:nz,1].reshape((1,nz,1,1))
base.qv=data[:nz,2].reshape((1,nz,1,1))/1000.0
def main():
filename="bc"
nx,ny,nz,nt=(20.,20,10,24)
dims=[nt,nz,ny,nx]
lonmin=-110.0; lonmax=-100.0; dlon=(lonmax-lonmin)/nx
latmin=35.0; latmax=45.0; dlat=(latmax-latmin)/ny
base=Bunch(u=10.0,w=0.0,v=0.0,
qv=0.0013,qc=0.0,
p=100000.0,
th=np.arange(273.0,300,(300-273.0)/nz).reshape((1,nz,1,1)),
dz=400.0)
base.z=np.arange(0,nz*base.dz,base.dz)
if glob.glob("sounding.txt"):
update_base(base,"sounding.txt",nz)
nz=base.th.size
dims=[nt,nz,ny,nx]
u=np.zeros(dims,dtype="f")+base.u
w=np.zeros(dims,dtype="f")+base.w
v=np.zeros(dims,dtype="f")+base.v
qv=np.zeros(dims,dtype="f")+base.qv
qc=np.zeros(dims,dtype="f")+base.qc
coscurve=np.cos(np.arange(dims[2])/dims[2]*2*np.pi+np.pi)+1
hgt=(coscurve*1000).reshape((1,nx)).repeat(ny,axis=0)
lon=np.arange(lonmin,lonmax,dlon)
lat=np.arange(latmin,latmax,dlat)
lon,lat=np.meshgrid(lon,lat)
dz=np.zeros(dims)+base.dz
z=np.zeros(dims,dtype="f")+base.z.reshape((1,nz,1,1))+hgt.reshape((1,1,ny,nx))
layer1=(dz[0,0,:,:]/2)
z[0,0,:,:]+=layer1
for i in range(1,int(nz)):
z[:,i,:,:]=z[:,i-1,:,:]+(dz[:,i-1,:,:]+dz[:,i,:,:])/2.0
p=np.zeros(dims,dtype="f")+base.p
adjust_p(p,0.0,z)
th=np.zeros(dims,dtype="f")+base.th
d4dname=("t","z","y","x")
d3dname=("z","y","x")
d2dname=("y","x")
othervars=[Bunch(data=v, name="V", dims=d4dname,dtype="f",attributes=dict(units="m/s", description="Horizontal (y) wind speed")),
Bunch(data=w, name="W", dims=d4dname,dtype="f",attributes=dict(units="m/s", description="Vertical wind speed")),
Bunch(data=qv, name="QVAPOR",dims=d4dname,dtype="f",attributes=dict(units="kg/kg",description="Water vapor mixing ratio")),
Bunch(data=qc, name="QCLOUD",dims=d4dname,dtype="f",attributes=dict(units="kg/kg",description="Cloud water mixing ratio")),
Bunch(data=p, name="P", dims=d4dname,dtype="f",attributes=dict(units="Pa", description="Pressure")),
Bunch(data=th, name="T", dims=d4dname,dtype="f",attributes=dict(units="K", description="Potential temperature")),
Bunch(data=dz, name="dz", dims=d4dname,dtype="f",attributes=dict(units="m", description="Layer thickness")),
Bunch(data=z, name="Z", dims=d4dname,dtype="f",attributes=dict(units="m", description="Layer Height AGL")),
Bunch(data=lat,name="XLAT", dims=d2dname,dtype="f",attributes=dict(units="deg", description="Latitude")),
Bunch(data=lon,name="XLONG", dims=d2dname,dtype="f",attributes=dict(units="deg", description="Longitude")),
Bunch(data=hgt,name="HGT", dims=d2dname,dtype="f",attributes=dict(units="m", description="Terrain Elevation"))
]
fileexists=glob.glob(filename) or glob.glob(filename+".nc")
if fileexists:
print("Removing : "+fileexists[0])
os.remove(fileexists[0])
io.write(filename, u,varname="U", dims=d4dname,dtype="f",attributes=dict(units="m/s",description="Horizontal (x) wind speed"),
extravars=othervars)
if __name__ == '__main__':
main()
|
[
"numpy.arange",
"numpy.diff",
"numpy.zeros",
"load_data.cols",
"numpy.meshgrid",
"glob.glob",
"os.remove"
] |
[((436, 460), 'load_data.cols', 'load_data.cols', (['filename'], {}), '(filename)\n', (450, 460), False, 'import load_data\n'), ((1074, 1109), 'numpy.arange', 'np.arange', (['(0)', '(nz * base.dz)', 'base.dz'], {}), '(0, nz * base.dz, base.dz)\n', (1083, 1109), True, 'import numpy as np\n'), ((1113, 1138), 'glob.glob', 'glob.glob', (['"""sounding.txt"""'], {}), "('sounding.txt')\n", (1122, 1138), False, 'import glob, os\n'), ((1569, 1600), 'numpy.arange', 'np.arange', (['lonmin', 'lonmax', 'dlon'], {}), '(lonmin, lonmax, dlon)\n', (1578, 1600), True, 'import numpy as np\n'), ((1607, 1638), 'numpy.arange', 'np.arange', (['latmin', 'latmax', 'dlat'], {}), '(latmin, latmax, dlat)\n', (1616, 1638), True, 'import numpy as np\n'), ((1649, 1670), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (1660, 1670), True, 'import numpy as np\n'), ((1246, 1271), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (1254, 1271), True, 'import numpy as np\n'), ((1284, 1309), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (1292, 1309), True, 'import numpy as np\n'), ((1322, 1347), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (1330, 1347), True, 'import numpy as np\n'), ((1361, 1386), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (1369, 1386), True, 'import numpy as np\n'), ((1401, 1426), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (1409, 1426), True, 'import numpy as np\n'), ((1682, 1696), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (1690, 1696), True, 'import numpy as np\n'), ((1949, 1974), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (1957, 1974), True, 'import numpy as np\n'), ((2010, 2035), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (2018, 2035), True, 'import numpy as np\n'), ((3613, 3632), 'glob.glob', 'glob.glob', (['filename'], {}), '(filename)\n', (3622, 3632), False, 'import glob, os\n'), ((3636, 3663), 'glob.glob', 'glob.glob', (["(filename + '.nc')"], {}), "(filename + '.nc')\n", (3645, 3663), False, 'import glob, os\n'), ((3732, 3756), 'os.remove', 'os.remove', (['fileexists[0]'], {}), '(fileexists[0])\n', (3741, 3756), False, 'import glob, os\n'), ((527, 552), 'numpy.diff', 'np.diff', (['data[:nz + 1, 0]'], {}), '(data[:nz + 1, 0])\n', (534, 552), True, 'import numpy as np\n'), ((1711, 1736), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': '"""f"""'}), "(dims, dtype='f')\n", (1719, 1736), True, 'import numpy as np\n'), ((981, 1022), 'numpy.arange', 'np.arange', (['(273.0)', '(300)', '((300 - 273.0) / nz)'], {}), '(273.0, 300, (300 - 273.0) / nz)\n', (990, 1022), True, 'import numpy as np\n'), ((1454, 1472), 'numpy.arange', 'np.arange', (['dims[2]'], {}), '(dims[2])\n', (1463, 1472), True, 'import numpy as np\n')]
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def run(raven, inputs):
"""
Run method.
@ In, raven, object, RAVEN object
@ In, inputs, dict, input dictionary
@ Out, None
"""
# inputs: a, b, c
# outputs: d, e, f
# indices: d(), e(x), f(x, y)
a = raven.a
b = raven.b
c = raven.c
nx = 5
ny = 3
x = np.arange(nx) * 0.1
y = np.arange(ny) * 10
d = a*a
e = x * b
f = np.arange(nx*ny).reshape(nx, ny) * c
# save
raven.x = x
raven.y = y
raven.d = d
raven.e = e
raven.f = f
raven._indexMap = {'e': ['x'],
'f': ['x', 'y']
}
|
[
"numpy.arange"
] |
[((896, 909), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (905, 909), True, 'import numpy as np\n'), ((922, 935), 'numpy.arange', 'np.arange', (['ny'], {}), '(ny)\n', (931, 935), True, 'import numpy as np\n'), ((970, 988), 'numpy.arange', 'np.arange', (['(nx * ny)'], {}), '(nx * ny)\n', (979, 988), True, 'import numpy as np\n')]
|
""""
STRIP Scanning Strategy Tools test module.
"""
import unittest
import healpy as hp
import numpy as np
from ScanningTools import ScanningTools as st
from astropy.time import Time
from astropy.coordinates import SkyCoord, AltAz
from ScanningTools.Quaternions import Quaternion as q
angles = np.array([[-10, 45, 59],
[30, 35, 15],
[-180, 25, 20],
[3, 4, 5]])
hours = np.array([[23, 59, 16],
[7, 56, 59]])
t = np.array([1.546585, -0.56, 0.3333333333333333333, -1.001])
### INSTRUMENT CHARACTERISTICS ###
pointing_accuracy = np.array([0, 0, 25]) #deg (arcsec)
###
### LOCATION INFORMATION ###
LAT = np.array([28, 16, 24]) #deg
LONG = np.array([-16, 38, 32]) #deg
Height = 2400 #m
loc = st.get_location(LAT, LONG, Height)
###
### TIME INFORMATION ###
LCT_start = (0, 0, 0) #h, m, s
LCD_start = (1, 1, 2015) #g, m, y
UTC, DST = (0 , 0) #h
###
### ENGINE ROTATION INFORMATION ###
zenith_distance = 31 #deg
polarization_angle = 60 #deg
###
class TestScanningTools(unittest.TestCase):
def test_period2sec(self):
one_sidereal_year = st.period2sec(years=1, days=0, hours=0, min=0, sec=0, sidereal=True)
one_solar_year = st.period2sec(years=1, days=0, hours=0, min=0, sec=0)
one_sidereal_day = st.period2sec(years=0, days=1, hours=0, min=0, sec=0, sidereal=True)
one_solar_day = st.period2sec(years=0, days=1, hours=0, min=0, sec=0)
period_0 = st.period2sec(years=1, days=1, hours=0, min=0, sec=0, sidereal=True)
period_1 = st.period2sec(years=5, days=30, hours=0, min=0, sec=0, sidereal=True)
period_2 = st.period2sec(years=2, days=17, hours=0, min=0, sec=0, sidereal=True)
period_3 = st.period2sec(years=10, days=21, hours=15, min=3, sec=25, sidereal=True)
self.assertEqual(one_sidereal_year, 31558145)
self.assertEqual(one_solar_year, 31536000)
self.assertEqual(one_sidereal_day, 86164)
self.assertEqual(one_solar_day, 86400)
self.assertEqual(period_0, 31644309)
self.assertEqual(period_1, 160375649)
self.assertEqual(period_2, 64581080)
self.assertEqual(period_3, 317445103)
def test_sex2dec(self):
ang0 = st.sex2dec(angles)
ang1 = st.sex2dec(angles[0], radians=True)
self.assertTrue(np.allclose(ang0, np.array([-10.76638889, 30.587500, -180.422222,
3.06805556])))
self.assertEqual(ang1, np.radians(ang0[0]))
def test_dec2sex(self):
t0 = st.dec2sex(t)
t00 = st.dec2sex(t[0])
self.assertTrue(np.allclose(t0, np.array([[1, 32, 47.706], [-0, 33, 36], [0, 20, 0],
[-1, 0, 3.6]])))
self.assertTrue(np.allclose(t00, np.array([1, 32, 47.706])))
def test_degrees2hours(self):
ang0 = st.degrees2hours(angles)
ang1 = st.degrees2hours(angles[2], decimal=True)
self.assertTrue(np.allclose(ang0, st.dec2sex(st.sex2dec(angles) / 15)))
self.assertTrue(np.allclose(ang1, st.sex2dec(angles)[2] / 15))
def test_hours2degrees(self):
ang0 = st.hours2degrees(hours[1])
ang1 = st.hours2degrees(hours, decimal=True)
self.assertTrue(np.allclose(ang0, st.dec2sex(st.sex2dec(hours[1]) * 15)))
self.assertTrue(np.allclose(ang1, st.sex2dec(hours) * 15))
def test_LocalCivilTime2JulianDay(self):
"Integrated Test: it includes also the LCT2GCD and GCD2JD function conversion"
Jul_1_2013 = st.LocalCivilTime2JulianDay((3, 37, 0), (1, 7, 2013), UTC=4, DST=1)
Jun_19_2009 = st.LocalCivilTime2JulianDay((18, 0, 0), (19, 6, 2009), UTC=0, DST=0)
self.assertTrue(np.allclose(Jul_1_2013, 2456474.442))
self.assertTrue(np.allclose(Jun_19_2009, 2455002.25))
t = Time(['2015-1-1 00:00:10', '2018-1-3 5:15:24.3', '1980-4-22 19:30:2']).jd
T = np.array([st.LocalCivilTime2JulianDay((0, 0, 10), (1, 1, 2015), UTC=0, DST=0),
st.LocalCivilTime2JulianDay((5, 15, 24.3), (3, 1, 2018), UTC=0, DST=0),
st.LocalCivilTime2JulianDay((19, 30, 2), (22, 4, 1980), UTC=0, DST=0)])
self.assertTrue(np.allclose(t, T))
def test_LocalCivilTime2LocalSiderealTime(self):
LONG = st.dec2sex(0.1)
Jun_19_2009 = st.LocalCivilTime2LocalSiderealTime((18, 0, 0),
(19, 6, 2009),
LONG, UTC=0, DST=0)
self.assertTrue(np.allclose(Jun_19_2009, np.array([11, 52, 46.843])))
def test_get_nside_eff(self):
fwhm_beam0 = np.array([0, 5, 0]) #deg (arcmin)
fwhm_beam1 = np.array([0, 21, 0]) #deg (arcmin)
fwhm_beam2 = np.array([0, 32, 0]) #deg (arcmin)
self.assertEqual(st.get_nside_eff(fwhm_beam0), 1024)
self.assertEqual(st.get_nside_eff(fwhm_beam1), 256)
self.assertEqual(st.get_nside_eff(fwhm_beam2), 128)
def test_get_full_fp(self):
def general_test(x_fp, i, j):
self.assertTrue(np.allclose(x_fp[i, 0], x_fp[j, 0]))
self.assertTrue(np.allclose(x_fp[i, 1], -x_fp[j, 1]))
self.assertTrue(np.allclose(x_fp[i, 2], x_fp[j, 2]))
x_fp, n_horns = st.get_full_fp('./ScanningTools/fp_data/fp_theta.txt',
'./ScanningTools/fp_data/fp_phi.txt')
self.assertTrue(np.allclose(np.sum(x_fp**2, axis=1), 1))
self.assertEqual(n_horns, 49)
general_test(x_fp, 7, 42)
general_test(x_fp, 8, 47)
general_test(x_fp, 9, 46)
general_test(x_fp, 10, 45)
general_test(x_fp, 11, 44)
general_test(x_fp, 12, 43)
general_test(x_fp, 13, 48)
general_test(x_fp, 14, 35)
general_test(x_fp, 15, 40)
general_test(x_fp, 16, 39)
general_test(x_fp, 17, 38)
general_test(x_fp, 18, 37)
general_test(x_fp, 19, 36)
general_test(x_fp, 20, 41)
general_test(x_fp, 21, 28)
general_test(x_fp, 22, 33)
general_test(x_fp, 23, 32)
general_test(x_fp, 24, 31)
general_test(x_fp, 25, 30)
general_test(x_fp, 26, 29)
general_test(x_fp, 27, 34)
def get_full_fp_polarization_angles(self):
def general_test(x_fp, i, j):
self.assertTrue(np.allclose(x_fp[i, 0], x_fp[j, 0]))
self.assertTrue(np.allclose(x_fp[i, 1], -x_fp[j, 1]))
self.assertTrue(np.allclose(x_fp[i, 2], x_fp[j, 2]))
full_psi, polarization_versor = st.get_full_fp_polarization_angles(
'./ScanningTools/fp_data/fp_psi.txt')
self.assertTrue(np.allclose(np.sum(polarization_versor**2, axis=1), 1))
self.assertEqual(len(full_psi), 49)
self.assertEqual(len(polarization_versor), 49)
general_test(polarization_versor, 7, 42)
general_test(polarization_versor, 8, 47)
general_test(polarization_versor, 9, 46)
general_test(polarization_versor, 10, 45)
general_test(polarization_versor, 11, 44)
general_test(polarization_versor, 12, 43)
general_test(polarization_versor, 13, 48)
general_test(polarization_versor, 14, 35)
general_test(polarization_versor, 15, 40)
general_test(polarization_versor, 16, 39)
general_test(polarization_versor, 17, 38)
general_test(polarization_versor, 18, 37)
general_test(polarization_versor, 19, 36)
general_test(polarization_versor, 20, 41)
general_test(polarization_versor, 21, 28)
general_test(polarization_versor, 22, 33)
general_test(polarization_versor, 23, 32)
general_test(polarization_versor, 24, 31)
general_test(polarization_versor, 25, 30)
general_test(polarization_versor, 26, 29)
general_test(polarization_versor, 27, 34)
def test_get_timeJD(self):
def general_tests(time, sampling_rate, JD, JD_step, t0, t1):
self.assertTrue(np.allclose(time[1:] - time[0:-1], 1 / sampling_rate))
self.assertEqual(len(JD), len(time))
self.assertEqual(np.sum(np.diff(JD_step)), 0)
self.assertTrue(np.allclose((t1-t0).sec, 1 / sampling_rate, rtol=1e-3))
def tests_1h(obs_t, time, sampling_rate, JD, JD_step, t0, t1):
self.assertEqual(obs_t, 3600)
self.assertEqual(len(time), obs_t * sampling_rate)
general_tests(time, sampling_rate, JD, JD_step, t0, t1)
def tests_1d(LCT_start, LCD_start, obs_t, time, sampling_rate, JD, JD_step, t0, t1, UTC=UTC,
DST=DST):
general_tests(time, sampling_rate, JD, JD_step, t0, t1)
obs_t0, time0, JD0 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time,
UTC=UTC, DST=DST, day=None)
self.assertEqual(obs_t, 86400)
self.assertEqual(obs_t, obs_t0)
self.assertEqual(len(time), obs_t * sampling_rate)
self.assertTrue(len(time), len(time0))
self.assertTrue(len(JD), len(JD0))
def tests_1y(LCT_start, LCD_start, obs_t, time, sampling_rate, JD, JD_step, t0, t1, UTC=UTC,
DST=DST, day=None):
general_tests(time, sampling_rate, JD, JD_step, t0, t1)
self.assertEqual(obs_t, 86400 * 365)
if day:
self.assertEqual(len(time), 86400 * sampling_rate)
if day > 1:
self.assertTrue(time[0] != 0)
else:
self.assertTrue(time[0] == 0)
else:
self.assertEqual(len(time), obs_t * sampling_rate)
sampling_rate = 50 #Hz
obs_time = (0, 0, 1, 0, 0) #y, d, h, m, s
day = None
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
JD_step = JD[1:] - JD[:-1]
t0 = Time(JD[0], format='jd', location=loc)
t1 = Time(JD[0] + JD_step[0], format='jd', location=loc)
tests_1h(obs_t, time, sampling_rate, JD, JD_step, t0, t1)
sampling_rate = 5 #Hz
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
JD_step = JD[1:] - JD[:-1]
t0 = Time(JD[0], format='jd', location=loc)
t1 = Time(JD[0] + JD_step[0], format='jd', location=loc)
tests_1h(obs_t, time, sampling_rate, JD, JD_step, t0, t1)
sampling_rate = 3 #Hz
obs_time = (0, 1, 0, 0, 0) #y, d, h, m, s
day = 1
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
JD_step = JD[1:] - JD[:-1]
t0 = Time(JD[0], format='jd', location=loc)
t1 = Time(JD[0] + JD_step[0], format='jd', location=loc)
tests_1d(LCT_start, LCD_start, obs_t, time, sampling_rate, JD, JD_step, t0, t1, UTC=UTC,
DST=DST)
sampling_rate = 1 #Hz
obs_time = (1, 0, 0, 0, 0) #y, d, h, m, s
day0, day1, day2, day3, day4 = (1, 5, 364, None, None)
obs_t0, time0, JD0 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day0)
JD_step0 = JD0[1:] - JD0[:-1]
t00 = Time(JD0[0], format='jd', location=loc)
t10 = Time(JD0[0] + JD_step0[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t0, time0, sampling_rate, JD0, JD_step0, t00, t10,
UTC=UTC, DST=DST, day=day0)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day1)
JD_step1 = JD1[1:] - JD1[:-1]
t01 = Time(JD1[0], format='jd', location=loc)
t11 = Time(JD1[0] + JD_step1[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t1, time1, sampling_rate, JD1, JD_step1, t01, t11,
UTC=UTC, DST=DST, day=day1)
obs_t2, time2, JD2 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day2)
JD_step2 = JD2[1:] - JD2[:-1]
t02 = Time(JD2[0], format='jd', location=loc)
t12 = Time(JD2[0] + JD_step2[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t2, time2, sampling_rate, JD2, JD_step2, t02, t12,
UTC=UTC, DST=DST, day=day2)
obs_t3, time3, JD3 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day3)
JD_step3 = JD3[1:] - JD3[:-1]
t03 = Time(JD3[0], format='jd', location=loc)
t13 = Time(JD3[0] + JD_step3[0], format='jd', location=loc)
tests_1y(LCT_start, LCD_start, obs_t3, time3, sampling_rate, JD3, JD_step3, t03, t13,
UTC=UTC, DST=DST, day=day3)
LCT_start4 = (12, 0, 0)
obs_t4, time4, JD4 = st.get_timeJD(LCT_start4, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day4)
JD_step4 = JD4[1:] - JD4[:-1]
t04 = Time(JD4[0], format='jd', location=loc)
t14 = Time(JD4[0] + JD_step4[0], format='jd', location=loc)
tests_1y(LCT_start4, LCD_start, obs_t4, time4, sampling_rate, JD4, JD_step4, t04, t14,
UTC=UTC, DST=DST, day=day4)
def test_spin_generator(self):
def general_spin_tests(phi, obs_time, time, sampling_rate, rpm, day=None):
if day:
self.assertEqual(len(phi), 86400 * sampling_rate)
else:
self.assertEqual(len(phi), obs_time * sampling_rate)
self.assertEqual(
np.sum(np.r_[True, phi[1:] > phi[:-1]] & np.r_[phi[:-1] > phi[1:], True]),
rpm * len(phi) / sampling_rate / 60)
self.assertEqual(phi.min(), 0)
self.assertTrue(phi.max() < 2 * np.pi)
obs_time1, obs_time2, obs_time3 = ((0, 30, 0, 0, 0), (0, 1, 0, 0, 0), (0, 0, 1, 0, 0))
sampling_rate1, sampling_rate2, sampling_rate3 = (1, 3, 50)
rpm1, rpm2, rpm3 = (13, 1, 5)
day1, day2, day3 = (2, None, None)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate1, obs_time1, UTC=UTC,
DST=DST, day=day1)
phi1 = st.spin_generator(time1, rpm1)
general_spin_tests(phi1, obs_t1, time1, sampling_rate1, rpm1, day=day1)
obs_t2, time2, JD2 = st.get_timeJD(LCT_start, LCD_start, sampling_rate2, obs_time2, UTC=UTC,
DST=DST, day=day2)
phi2 = st.spin_generator(time2, rpm2)
general_spin_tests(phi2, obs_t2, time2, sampling_rate2, rpm2, day=day2)
obs_t3, time3, JD3 = st.get_timeJD(LCT_start, LCD_start, sampling_rate3, obs_time3, UTC=UTC,
DST=DST, day=day3)
phi3 = st.spin_generator(time3, rpm3)
general_spin_tests(phi3, obs_t3, time3, sampling_rate3, rpm3, day=day3)
def test_euler_rotation_matrix(self):
phi1, theta1, psi1 = np.radians(([10, 10, 10], [30, 30, 30], [0, 0, 0]))
m1 = st.euler_rotation_matrix(phi1, theta1, psi1)
M1 = np.array([[0.98480775301220802, -0.1503837331804353, 0.086824088833465152],
[0.17364817766693033, 0.85286853195244328, -0.49240387650610395],
[0, 0.49999999999999994, 0.86602540378443871]])
phi2, theta2, psi2 = np.radians(([10, 10, 10], [30, 30, 30], [45, 45, 45]))
m2 = st.euler_rotation_matrix(phi2, theta2, psi2)
M2 = np.array([[0.59002688280798476, -0.80270159783205308, 0.086824088833465152],
[0.72585692637316113, 0.4802813184352156, -0.49240387650610395],
[0.35355339059327368, 0.35355339059327373, 0.86602540378443871]])
phi3, theta3, psi3 = np.radians(([10, 10, 10], [30, 30, 30], [45, 0, 45]))
m3 = st.euler_rotation_matrix(phi3, theta3, psi3)
M3 = np.array([[[0.59002688280798476, -0.80270159783205308, 0.086824088833465152],
[0.72585692637316113, 0.4802813184352156, -0.49240387650610395],
[0.35355339059327368, 0.35355339059327373, 0.86602540378443871]],
[[0.98480775301220802, -0.1503837331804353, 0.086824088833465152],
[0.17364817766693033, 0.85286853195244328, -0.49240387650610395],
[0, 0.49999999999999994, 0.86602540378443871]],
[[0.59002688280798476, -0.80270159783205308, 0.086824088833465152],
[0.72585692637316113, 0.4802813184352156, -0.49240387650610395],
[0.35355339059327368, 0.35355339059327373, 0.86602540378443871]]])
self.assertTrue(np.allclose(m1, np.repeat(M1[None, ...], 3, axis=0)))
self.assertTrue(np.allclose(m2, np.repeat(M2[None, ...], 3, axis=0)))
self.assertTrue(np.allclose(m3, M3))
def test_engine_rotations(self):
obs_time = (0, 0, 0, 30, 0)
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, 50, obs_time, UTC=UTC, DST=DST,
day=None)
rpm = 7
theta, phi, psi = st.get_engine_rotations(time, rpm, zenith_distance, polarization_angle)
self.assertEqual(len(theta), len(time))
self.assertEqual(len(phi), len(time))
self.assertEqual(len(psi), len(time))
self.assertTrue(np.allclose(theta[1:] - theta[:-1], 0))
self.assertTrue(np.allclose(psi[1:] - psi[:-1], 0))
def test_fp_rotations(self):
def general_tests(fp_pointings, fp_pointings_c):
self.assertTrue(np.allclose(np.diff(fp_pointings_c[..., 2], axis=-1), 0))
self.assertTrue(np.degrees(fp_pointings[..., 0]).max() <= 365)
self.assertTrue(np.degrees(fp_pointings[..., 1]).max() <= 365)
self.assertTrue(np.allclose(np.sum(fp_pointings_c**2, axis=-1), 1))
x_fp, n_horns = st.get_full_fp('./ScanningTools/fp_data/fp_theta.txt',
'./ScanningTools/fp_data/fp_phi.txt')
obs_time1, obs_time2 = ((0, 0, 0, 30, 0), (0, 90, 0, 0, 0))
rpm1, rpm2 = (7, 2)
day1, day2 = (None, 10)
sampling_rate1, sampling_rate2 = (50, 1)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate1, obs_time1, UTC=UTC,
DST=DST, day=day1)
theta1, phi1, psi1 = st.get_engine_rotations(time1, rpm1, zenith_distance,
polarization_angle)
obs_t2, time2, JD2 = st.get_timeJD(LCT_start, LCD_start, sampling_rate2, obs_time2, UTC=UTC,
DST=DST, day=day2)
theta2, phi2, psi2 = st.get_engine_rotations(time2, rpm2, zenith_distance,
polarization_angle)
n1 = 30
n2 = None
fp_rot1 = st.euler_rotation_matrix(phi1, theta1, psi1)
fp_rot2 = st.euler_rotation_matrix(phi2, theta2, psi2)
fp_pointings1 = st.get_fp_rotations(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1,
cartesian=False)
fp_pointings1_c = st.get_fp_rotations(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1,
cartesian=True)
fp_pointings2 = st.get_fp_rotations(phi2, theta2, psi2, x_fp, n_horns, time2, n=n2,
cartesian=False)
fp_pointings2_c = st.get_fp_rotations(phi2, theta2, psi2, x_fp, n_horns, time2, n=n2,
cartesian=True)
i = np.random.randint(0, len(time1))
self.assertTrue(np.allclose(np.dot(fp_rot1[i], x_fp[n1]), fp_pointings1_c[i]))
rot1 = q.get_quaternion_from_euler(phi1[i], theta1[i], psi1[i])
self.assertTrue(np.allclose(rot1.rotate_vector_by_quaternion(x_fp[n1]).get_versor(),
fp_pointings1_c[i, :]))
general_tests(fp_pointings1, fp_pointings1_c)
j = np.random.randint(0, len(time2))
p = np.random.randint(0, n_horns)
self.assertTrue(np.allclose(np.dot(fp_rot2[j], x_fp[p]), fp_pointings2_c[p][j]))
rot2 = q.get_quaternion_from_euler(phi2[j], theta2[j], psi2[j])
self.assertTrue(np.allclose(rot2.rotate_vector_by_quaternion(x_fp[p]).get_versor(),
fp_pointings2_c[p, j, :]))
general_tests(fp_pointings2, fp_pointings2_c)
def test_get_horizon_coordinates(self):
def general_tests(Alt, Az):
self.assertTrue(np.degrees(Alt.max()) <= 90)
self.assertTrue(np.degrees(Alt.min()) >= 0)
self.assertTrue(np.degrees(Az.max()) <= 360)
self.assertTrue(np.degrees(Az.min()) >= 0)
x_fp, n_horns = st.get_full_fp('./ScanningTools/fp_data/fp_theta.txt',
'./ScanningTools/fp_data/fp_phi.txt')
obs_time = (0, 2, 0, 0, 0)
sampling_rate = 1
rpm = 4
day = 1
n1, n2 = (0, 15)
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
theta, phi, psi = st.get_engine_rotations(time, rpm, zenith_distance, polarization_angle)
fpp = st.get_fp_rotations(phi, theta, psi, x_fp, n_horns, time, n=None, cartesian=False)
fpp1 = st.get_fp_rotations(phi, theta, psi, x_fp, n_horns, time, n=n1, cartesian=False)
fpp2 = st.get_fp_rotations(phi, theta, psi, x_fp, n_horns, time, n=n2, cartesian=False)
Alt, Az = st.get_horizon_coordinates(fpp)
Alt1, Az1 = st.get_horizon_coordinates(fpp1)
Alt2, Az2 = st.get_horizon_coordinates(fpp2)
def test_get_icrs_coordinates(self):
x_fp, n_horns = st.get_full_fp('./ScanningTools/fp_data/fp_theta.txt',
'./ScanningTools/fp_data/fp_phi.txt')
obs_time = (0, 2, 0, 0, 0)
sampling_rate = 0.001
rpm = 3
day1, day2 = (2, None)
n1, n2 = (48, None)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day1)
obs_t2, time2, JD2 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day2)
theta1, phi1, psi1 = st.get_engine_rotations(time1, rpm, zenith_distance,
polarization_angle)
theta2, phi2, psi2 = st.get_engine_rotations(time2, rpm, zenith_distance,
polarization_angle)
fpp1 = st.get_fp_rotations(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1, cartesian=False)
fpp2 = st.get_fp_rotations(phi2, theta2, psi2, x_fp, n_horns, time2, n=n1, cartesian=False)
fpp3 = st.get_fp_rotations(phi1, theta1, psi1, x_fp, n_horns, time1, n=n2, cartesian=False)
fpp4 = st.get_fp_rotations(phi2, theta2, psi2, x_fp, n_horns, time2, n=n2, cartesian=False)
Alt1, Az1 = st.get_horizon_coordinates(fpp1)
Alt2, Az2 = st.get_horizon_coordinates(fpp2)
Alt3, Az3 = st.get_horizon_coordinates(fpp3)
Alt4, Az4 = st.get_horizon_coordinates(fpp4)
Dec1, Ra1 = st.get_icrs_coordinates(JD1, loc, Alt1, Az1) #1 day 2; n = 48
Dec2, Ra2 = st.get_icrs_coordinates(JD2, loc, Alt2, Az2) #2 day all; n = 48
Dec3, Ra3 = st.get_icrs_coordinates(JD1, loc, Alt3, Az3) #3 day 2; n = all
Dec4, Ra4 = st.get_icrs_coordinates(JD2, loc, Alt4, Az4) #4 day all; n = all
Dec5, Ra5 = st.get_icrs_coordinates(JD1[0], loc, Alt1, Az1) #5 day 2 [t=0]; n = 48
self.assertTrue(np.allclose(Dec1, Dec3[n1]))
self.assertTrue(np.allclose(Ra1, Ra3[n1]))
self.assertTrue(np.allclose(Dec1, Dec2[len(Dec1):]))
self.assertTrue(np.allclose(Ra1, Ra2[len(Ra1):]))
self.assertTrue(np.allclose(Dec1, Dec4[n1, len(Dec1):]))
self.assertTrue(np.allclose(Ra1, Ra4[n1, len(Dec1):]))
self.assertTrue(np.allclose(Dec1[0], Dec5[0]))
self.assertTrue(np.allclose(Ra1[0], Ra5[0]))
self.assertFalse(np.allclose(Dec1[1:], Dec5[1:]))
self.assertFalse(np.allclose(Ra1[1:], Ra5[1:]))
def pointing_test(name, JD, loc):
object = SkyCoord.from_name(name)
object_AltAz = object.transform_to(AltAz(obstime=Time(JD, format='jd'), location=loc))
object_Alt = object_AltAz.alt.rad
object_Az = object_AltAz.az.rad
object_Dec, object_Ra = st.get_icrs_coordinates(JD1, loc, object_Alt, object_Az)
self.assertTrue(np.allclose(object.dec.rad, object_Dec))
self.assertTrue(np.allclose(object.ra.rad, object_Ra))
name1, name2, name3 = ('M33', 'crab', 'NCG67')
pointing_test(name1, JD1, loc)
pointing_test(name2, JD1, loc)
pointing_test(name3, JD1, loc)
def test_get_practical_icrs_coordinates(self):
def general_tests(Dec, Ra, PDec, PRa, accuracy):
self.assertTrue((np.abs(Dec - PDec) <= accuracy).all())
diff_Ra = np.abs(Ra - PRa).ravel()
diff_Ra[diff_Ra > 6] = 2 * np.pi - diff_Ra[diff_Ra > 6]
self.assertTrue((diff_Ra <= accuracy).all())
x_fp, n_horns = st.get_full_fp('./ScanningTools/fp_data/fp_theta.txt',
'./ScanningTools/fp_data/fp_phi.txt')
obs_time = (0, 2, 0, 0, 0)
sampling_rate = 0.01
rpm = 3
day1 = 2
n1, n2 = (48, None)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day1)
theta1, phi1, psi1 = st.get_engine_rotations(time1, rpm, zenith_distance,
polarization_angle)
fpp1 = st.get_fp_rotations(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1, cartesian=False)
fpp3 = st.get_fp_rotations(phi1, theta1, psi1, x_fp, n_horns, time1, n=n2, cartesian=False)
Alt1, Az1 = st.get_horizon_coordinates(fpp1)
Alt3, Az3 = st.get_horizon_coordinates(fpp3)
Dec1, Ra1 = st.get_icrs_coordinates(JD1, loc, Alt1, Az1) #1 day 2; n = 48
Dec3, Ra3 = st.get_icrs_coordinates(JD1, loc, Alt3, Az3) #3 day 2; n = all
PDec1, PRa1 = st.get_practical_icrs_coordinates(JD1, loc, Alt1, Az1) #1 day 2; n = 48
PDec3, PRa3 = st.get_practical_icrs_coordinates(JD1, loc, Alt3, Az3) #3 day 2; n = all
accuracy = st.sex2dec(pointing_accuracy)
general_tests(Dec1, Ra1, PDec1, PRa1, accuracy)
general_tests(Dec3, Ra3, PDec3, PRa3, accuracy)
def test_get_polarization_angles(self):
def general_tests(x_fp_pol_versors, pol_ang_proj, fp_pol_pointings):
self.assertTrue((np.max(pol_ang_proj, axis=-1) <= np.pi).all())
self.assertTrue((np.min(pol_ang_proj, axis=-1) >= -np.pi).all())
zenith_distance, zenith_distance1 = (0, 10)
boresight_angle = 0
obs_time, obs_time1 = ((0, 0, 0, 1, 0), (0, 1, 0, 0, 0))
sampling_rate, sampling_rate1 = (50, 1)
rpm, rpm1 = (1, 5)
day, day1 = (None, 1)
n, n1 = (0, None)
obs_t, time, JD = st.get_timeJD(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC,
DST=DST, day=day)
obs_t1, time1, JD1 = st.get_timeJD(LCT_start, LCD_start, sampling_rate1, obs_time1, UTC=UTC,
DST=DST, day=day1)
theta, phi, psi = st.get_engine_rotations(time, rpm, zenith_distance, boresight_angle)
theta1, phi1, psi1 = st.get_engine_rotations(time1, rpm1, zenith_distance1, boresight_angle)
theta2, phi2, psi2 = st.get_engine_rotations(time1, rpm1, zenith_distance, boresight_angle)
x_fp_pol_angles, x_fp_pol_versors = st.get_full_fp_polarization_angles(
'./ScanningTools/fp_data/fp_psi.txt')
n_horns = len(x_fp_pol_versors)
fp_pol_pointings = st.get_fp_rotations(phi, theta, psi, x_fp_pol_versors, n_horns, time,
n=n, cartesian=True) #rad
fp_pol_pointings1 = st.get_fp_rotations(phi1, theta1, psi1, x_fp_pol_versors, n_horns,
time1, n=n1, cartesian=True) #rad
pol_ang_proj = st.get_polarization_angles(phi, theta, psi, x_fp_pol_versors, n_horns, time,
n=n)
pol_ang_proj1 = st.get_polarization_angles(phi1, theta1, psi1, x_fp_pol_versors, n_horns,
time1, n=n1)
pol_ang_proj2 = st.get_polarization_angles(phi2, theta2, psi2, x_fp_pol_versors, n_horns,
time1, n=n1)
pol_ang_proj_expected = np.concatenate((
np.linspace(0, np.pi, sampling_rate * obs_t / 2 + 1),
np.linspace(-np.pi, 0, sampling_rate * obs_t / 2 + 1)[1:-1]))
self.assertTrue(np.allclose(np.arctan2(x_fp_pol_versors[..., 1], x_fp_pol_versors[..., 0]),
x_fp_pol_angles))
self.assertTrue(np.allclose(pol_ang_proj2[..., 0], x_fp_pol_angles))
self.assertTrue(np.allclose(pol_ang_proj, pol_ang_proj_expected))
general_tests(x_fp_pol_versors, pol_ang_proj, fp_pol_pointings)
general_tests(x_fp_pol_versors, pol_ang_proj1, fp_pol_pointings1)
def test_get_scanning_strategy(self):
def general_tests(packed_values):
(x_fp, x_fp_pol_angles, n_horns, time, JD, theta, phi, psi, fp_pointings_spherical, Alt,
Az, Dec, Ra, polarization_angles) = packed_values
self.assertTrue(np.allclose(x_fp.shape, (49, 3)))
self.assertTrue(np.allclose(x_fp_pol_angles.shape, 49))
self.assertEqual(n_horns, 49)
self.assertTrue(np.allclose(time.shape, JD.shape))
self.assertTrue(np.allclose(theta.shape, JD.shape))
self.assertTrue(np.allclose(theta.shape, phi.shape))
self.assertTrue(np.allclose(psi.shape, phi.shape))
self.assertTrue(np.allclose(psi.shape, fp_pointings_spherical.shape[-2]))
self.assertTrue(np.allclose(Alt.shape, fp_pointings_spherical.shape[:-1]))
self.assertTrue(np.allclose(Alt.shape, Az.shape))
self.assertTrue(np.allclose(Dec.shape, Az.shape))
self.assertTrue(np.allclose(Dec.shape, Ra.shape))
self.assertTrue(np.allclose(polarization_angles.shape, Ra.shape))
self.assertTrue(np.allclose(time.shape, Ra.shape[-1]))
self.assertTrue(np.allclose(fp_pointings_spherical.shape[-2], time.shape))
obs_time = (0, 2, 0, 0, 0)
sampling_rate = 2
zenith_distance, polarization_angle = (10, 0)
rpm = 5
n1, n2 = (15, None)
day1, day2 = (1, None)
packed_values1 = st.get_scanning_strategy(
obs_time, sampling_rate, zenith_distance, polarization_angle, rpm, n=n1, day=day2,
LCT_start=(0, 0, 0), LCD_start=(1, 1, 2018), UTC=0, DST=0, LAT=np.array([28, 16, 24]),
LONG=np.array([-16, 38, 32]), Height=2400,
fp_theta_path='./ScanningTools/fp_data/fp_theta.txt',
fp_phi_path='./ScanningTools/fp_data/fp_phi.txt',
fp_psi_path='./ScanningTools/fp_data/fp_psi.txt')
packed_values2 = st.get_scanning_strategy(
obs_time, sampling_rate, zenith_distance, polarization_angle, rpm, n=n2, day=day1,
LCT_start=(0, 0, 0), LCD_start=(1, 1, 2018), UTC=0, DST=0, LAT=np.array([28, 16, 24]),
LONG=np.array([-16, 38, 32]), Height=2400,
fp_theta_path='./ScanningTools/fp_data/fp_theta.txt',
fp_phi_path='./ScanningTools/fp_data/fp_phi.txt',
fp_psi_path='./ScanningTools/fp_data/fp_psi.txt')
general_tests(packed_values1)
general_tests(packed_values2)
if __name__ == '__main__':
unittest.main()
|
[
"ScanningTools.ScanningTools.dec2sex",
"numpy.radians",
"ScanningTools.ScanningTools.spin_generator",
"numpy.array",
"ScanningTools.ScanningTools.get_full_fp_polarization_angles",
"numpy.arctan2",
"unittest.main",
"ScanningTools.ScanningTools.get_nside_eff",
"ScanningTools.ScanningTools.hours2degrees",
"ScanningTools.ScanningTools.euler_rotation_matrix",
"numpy.repeat",
"ScanningTools.ScanningTools.period2sec",
"numpy.diff",
"numpy.max",
"ScanningTools.ScanningTools.get_fp_rotations",
"numpy.dot",
"numpy.linspace",
"ScanningTools.ScanningTools.get_polarization_angles",
"numpy.min",
"ScanningTools.ScanningTools.sex2dec",
"numpy.degrees",
"numpy.abs",
"numpy.allclose",
"ScanningTools.ScanningTools.get_timeJD",
"astropy.coordinates.SkyCoord.from_name",
"ScanningTools.ScanningTools.get_icrs_coordinates",
"ScanningTools.ScanningTools.get_horizon_coordinates",
"ScanningTools.ScanningTools.get_full_fp",
"ScanningTools.Quaternions.Quaternion.get_quaternion_from_euler",
"ScanningTools.ScanningTools.LocalCivilTime2LocalSiderealTime",
"astropy.time.Time",
"numpy.random.randint",
"ScanningTools.ScanningTools.get_location",
"ScanningTools.ScanningTools.get_practical_icrs_coordinates",
"numpy.sum",
"ScanningTools.ScanningTools.get_engine_rotations",
"ScanningTools.ScanningTools.LocalCivilTime2JulianDay",
"ScanningTools.ScanningTools.degrees2hours"
] |
[((298, 364), 'numpy.array', 'np.array', (['[[-10, 45, 59], [30, 35, 15], [-180, 25, 20], [3, 4, 5]]'], {}), '([[-10, 45, 59], [30, 35, 15], [-180, 25, 20], [3, 4, 5]])\n', (306, 364), True, 'import numpy as np\n'), ((425, 462), 'numpy.array', 'np.array', (['[[23, 59, 16], [7, 56, 59]]'], {}), '([[23, 59, 16], [7, 56, 59]])\n', (433, 462), True, 'import numpy as np\n'), ((486, 541), 'numpy.array', 'np.array', (['[1.546585, -0.56, 0.3333333333333333, -1.001]'], {}), '([1.546585, -0.56, 0.3333333333333333, -1.001])\n', (494, 541), True, 'import numpy as np\n'), ((601, 621), 'numpy.array', 'np.array', (['[0, 0, 25]'], {}), '([0, 0, 25])\n', (609, 621), True, 'import numpy as np\n'), ((676, 698), 'numpy.array', 'np.array', (['[28, 16, 24]'], {}), '([28, 16, 24])\n', (684, 698), True, 'import numpy as np\n'), ((711, 734), 'numpy.array', 'np.array', (['[-16, 38, 32]'], {}), '([-16, 38, 32])\n', (719, 734), True, 'import numpy as np\n'), ((764, 798), 'ScanningTools.ScanningTools.get_location', 'st.get_location', (['LAT', 'LONG', 'Height'], {}), '(LAT, LONG, Height)\n', (779, 798), True, 'from ScanningTools import ScanningTools as st\n'), ((33054, 33069), 'unittest.main', 'unittest.main', ([], {}), '()\n', (33067, 33069), False, 'import unittest\n'), ((1136, 1204), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(1)', 'days': '(0)', 'hours': '(0)', 'min': '(0)', 'sec': '(0)', 'sidereal': '(True)'}), '(years=1, days=0, hours=0, min=0, sec=0, sidereal=True)\n', (1149, 1204), True, 'from ScanningTools import ScanningTools as st\n'), ((1230, 1283), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(1)', 'days': '(0)', 'hours': '(0)', 'min': '(0)', 'sec': '(0)'}), '(years=1, days=0, hours=0, min=0, sec=0)\n', (1243, 1283), True, 'from ScanningTools import ScanningTools as st\n'), ((1311, 1379), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(0)', 'days': '(1)', 'hours': '(0)', 'min': '(0)', 'sec': '(0)', 'sidereal': '(True)'}), '(years=0, days=1, hours=0, min=0, sec=0, sidereal=True)\n', (1324, 1379), True, 'from ScanningTools import ScanningTools as st\n'), ((1404, 1457), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(0)', 'days': '(1)', 'hours': '(0)', 'min': '(0)', 'sec': '(0)'}), '(years=0, days=1, hours=0, min=0, sec=0)\n', (1417, 1457), True, 'from ScanningTools import ScanningTools as st\n'), ((1477, 1545), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(1)', 'days': '(1)', 'hours': '(0)', 'min': '(0)', 'sec': '(0)', 'sidereal': '(True)'}), '(years=1, days=1, hours=0, min=0, sec=0, sidereal=True)\n', (1490, 1545), True, 'from ScanningTools import ScanningTools as st\n'), ((1565, 1634), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(5)', 'days': '(30)', 'hours': '(0)', 'min': '(0)', 'sec': '(0)', 'sidereal': '(True)'}), '(years=5, days=30, hours=0, min=0, sec=0, sidereal=True)\n', (1578, 1634), True, 'from ScanningTools import ScanningTools as st\n'), ((1654, 1723), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(2)', 'days': '(17)', 'hours': '(0)', 'min': '(0)', 'sec': '(0)', 'sidereal': '(True)'}), '(years=2, days=17, hours=0, min=0, sec=0, sidereal=True)\n', (1667, 1723), True, 'from ScanningTools import ScanningTools as st\n'), ((1743, 1815), 'ScanningTools.ScanningTools.period2sec', 'st.period2sec', ([], {'years': '(10)', 'days': '(21)', 'hours': '(15)', 'min': '(3)', 'sec': '(25)', 'sidereal': '(True)'}), '(years=10, days=21, hours=15, min=3, sec=25, sidereal=True)\n', (1756, 1815), True, 'from ScanningTools import ScanningTools as st\n'), ((2255, 2273), 'ScanningTools.ScanningTools.sex2dec', 'st.sex2dec', (['angles'], {}), '(angles)\n', (2265, 2273), True, 'from ScanningTools import ScanningTools as st\n'), ((2289, 2324), 'ScanningTools.ScanningTools.sex2dec', 'st.sex2dec', (['angles[0]'], {'radians': '(True)'}), '(angles[0], radians=True)\n', (2299, 2324), True, 'from ScanningTools import ScanningTools as st\n'), ((2588, 2601), 'ScanningTools.ScanningTools.dec2sex', 'st.dec2sex', (['t'], {}), '(t)\n', (2598, 2601), True, 'from ScanningTools import ScanningTools as st\n'), ((2616, 2632), 'ScanningTools.ScanningTools.dec2sex', 'st.dec2sex', (['t[0]'], {}), '(t[0])\n', (2626, 2632), True, 'from ScanningTools import ScanningTools as st\n'), ((2930, 2954), 'ScanningTools.ScanningTools.degrees2hours', 'st.degrees2hours', (['angles'], {}), '(angles)\n', (2946, 2954), True, 'from ScanningTools import ScanningTools as st\n'), ((2970, 3011), 'ScanningTools.ScanningTools.degrees2hours', 'st.degrees2hours', (['angles[2]'], {'decimal': '(True)'}), '(angles[2], decimal=True)\n', (2986, 3011), True, 'from ScanningTools import ScanningTools as st\n'), ((3231, 3257), 'ScanningTools.ScanningTools.hours2degrees', 'st.hours2degrees', (['hours[1]'], {}), '(hours[1])\n', (3247, 3257), True, 'from ScanningTools import ScanningTools as st\n'), ((3273, 3310), 'ScanningTools.ScanningTools.hours2degrees', 'st.hours2degrees', (['hours'], {'decimal': '(True)'}), '(hours, decimal=True)\n', (3289, 3310), True, 'from ScanningTools import ScanningTools as st\n'), ((3632, 3699), 'ScanningTools.ScanningTools.LocalCivilTime2JulianDay', 'st.LocalCivilTime2JulianDay', (['(3, 37, 0)', '(1, 7, 2013)'], {'UTC': '(4)', 'DST': '(1)'}), '((3, 37, 0), (1, 7, 2013), UTC=4, DST=1)\n', (3659, 3699), True, 'from ScanningTools import ScanningTools as st\n'), ((3722, 3790), 'ScanningTools.ScanningTools.LocalCivilTime2JulianDay', 'st.LocalCivilTime2JulianDay', (['(18, 0, 0)', '(19, 6, 2009)'], {'UTC': '(0)', 'DST': '(0)'}), '((18, 0, 0), (19, 6, 2009), UTC=0, DST=0)\n', (3749, 3790), True, 'from ScanningTools import ScanningTools as st\n'), ((4394, 4409), 'ScanningTools.ScanningTools.dec2sex', 'st.dec2sex', (['(0.1)'], {}), '(0.1)\n', (4404, 4409), True, 'from ScanningTools import ScanningTools as st\n'), ((4434, 4520), 'ScanningTools.ScanningTools.LocalCivilTime2LocalSiderealTime', 'st.LocalCivilTime2LocalSiderealTime', (['(18, 0, 0)', '(19, 6, 2009)', 'LONG'], {'UTC': '(0)', 'DST': '(0)'}), '((18, 0, 0), (19, 6, 2009), LONG, UTC=0,\n DST=0)\n', (4469, 4520), True, 'from ScanningTools import ScanningTools as st\n'), ((4785, 4804), 'numpy.array', 'np.array', (['[0, 5, 0]'], {}), '([0, 5, 0])\n', (4793, 4804), True, 'import numpy as np\n'), ((4840, 4860), 'numpy.array', 'np.array', (['[0, 21, 0]'], {}), '([0, 21, 0])\n', (4848, 4860), True, 'import numpy as np\n'), ((4896, 4916), 'numpy.array', 'np.array', (['[0, 32, 0]'], {}), '([0, 32, 0])\n', (4904, 4916), True, 'import numpy as np\n'), ((5434, 5530), 'ScanningTools.ScanningTools.get_full_fp', 'st.get_full_fp', (['"""./ScanningTools/fp_data/fp_theta.txt"""', '"""./ScanningTools/fp_data/fp_phi.txt"""'], {}), "('./ScanningTools/fp_data/fp_theta.txt',\n './ScanningTools/fp_data/fp_phi.txt')\n", (5448, 5530), True, 'from ScanningTools import ScanningTools as st\n'), ((6770, 6842), 'ScanningTools.ScanningTools.get_full_fp_polarization_angles', 'st.get_full_fp_polarization_angles', (['"""./ScanningTools/fp_data/fp_psi.txt"""'], {}), "('./ScanningTools/fp_data/fp_psi.txt')\n", (6804, 6842), True, 'from ScanningTools import ScanningTools as st\n'), ((10124, 10216), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day)\n', (10137, 10216), True, 'from ScanningTools import ScanningTools as st\n'), ((10300, 10338), 'astropy.time.Time', 'Time', (['JD[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD[0], format='jd', location=loc)\n", (10304, 10338), False, 'from astropy.time import Time\n'), ((10352, 10403), 'astropy.time.Time', 'Time', (['(JD[0] + JD_step[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD[0] + JD_step[0], format='jd', location=loc)\n", (10356, 10403), False, 'from astropy.time import Time\n'), ((10535, 10627), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day)\n', (10548, 10627), True, 'from ScanningTools import ScanningTools as st\n'), ((10711, 10749), 'astropy.time.Time', 'Time', (['JD[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD[0], format='jd', location=loc)\n", (10715, 10749), False, 'from astropy.time import Time\n'), ((10763, 10814), 'astropy.time.Time', 'Time', (['(JD[0] + JD_step[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD[0] + JD_step[0], format='jd', location=loc)\n", (10767, 10814), False, 'from astropy.time import Time\n'), ((11012, 11104), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day)\n', (11025, 11104), True, 'from ScanningTools import ScanningTools as st\n'), ((11188, 11226), 'astropy.time.Time', 'Time', (['JD[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD[0], format='jd', location=loc)\n", (11192, 11226), False, 'from astropy.time import Time\n'), ((11240, 11291), 'astropy.time.Time', 'Time', (['(JD[0] + JD_step[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD[0] + JD_step[0], format='jd', location=loc)\n", (11244, 11291), False, 'from astropy.time import Time\n'), ((11589, 11682), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day0'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day0)\n', (11602, 11682), True, 'from ScanningTools import ScanningTools as st\n'), ((11773, 11812), 'astropy.time.Time', 'Time', (['JD0[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD0[0], format='jd', location=loc)\n", (11777, 11812), False, 'from astropy.time import Time\n'), ((11827, 11880), 'astropy.time.Time', 'Time', (['(JD0[0] + JD_step0[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD0[0] + JD_step0[0], format='jd', location=loc)\n", (11831, 11880), False, 'from astropy.time import Time\n'), ((12049, 12142), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day1'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day1)\n', (12062, 12142), True, 'from ScanningTools import ScanningTools as st\n'), ((12230, 12269), 'astropy.time.Time', 'Time', (['JD1[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD1[0], format='jd', location=loc)\n", (12234, 12269), False, 'from astropy.time import Time\n'), ((12284, 12337), 'astropy.time.Time', 'Time', (['(JD1[0] + JD_step1[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD1[0] + JD_step1[0], format='jd', location=loc)\n", (12288, 12337), False, 'from astropy.time import Time\n'), ((12506, 12599), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day2'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day2)\n', (12519, 12599), True, 'from ScanningTools import ScanningTools as st\n'), ((12690, 12729), 'astropy.time.Time', 'Time', (['JD2[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD2[0], format='jd', location=loc)\n", (12694, 12729), False, 'from astropy.time import Time\n'), ((12744, 12797), 'astropy.time.Time', 'Time', (['(JD2[0] + JD_step2[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD2[0] + JD_step2[0], format='jd', location=loc)\n", (12748, 12797), False, 'from astropy.time import Time\n'), ((12966, 13059), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day3'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day3)\n', (12979, 13059), True, 'from ScanningTools import ScanningTools as st\n'), ((13150, 13189), 'astropy.time.Time', 'Time', (['JD3[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD3[0], format='jd', location=loc)\n", (13154, 13189), False, 'from astropy.time import Time\n'), ((13204, 13257), 'astropy.time.Time', 'Time', (['(JD3[0] + JD_step3[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD3[0] + JD_step3[0], format='jd', location=loc)\n", (13208, 13257), False, 'from astropy.time import Time\n'), ((13458, 13552), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start4', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day4'}), '(LCT_start4, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day4)\n', (13471, 13552), True, 'from ScanningTools import ScanningTools as st\n'), ((13643, 13682), 'astropy.time.Time', 'Time', (['JD4[0]'], {'format': '"""jd"""', 'location': 'loc'}), "(JD4[0], format='jd', location=loc)\n", (13647, 13682), False, 'from astropy.time import Time\n'), ((13697, 13750), 'astropy.time.Time', 'Time', (['(JD4[0] + JD_step4[0])'], {'format': '"""jd"""', 'location': 'loc'}), "(JD4[0] + JD_step4[0], format='jd', location=loc)\n", (13701, 13750), False, 'from astropy.time import Time\n'), ((14766, 14861), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate1', 'obs_time1'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day1'}), '(LCT_start, LCD_start, sampling_rate1, obs_time1, UTC=UTC, DST\n =DST, day=day1)\n', (14779, 14861), True, 'from ScanningTools import ScanningTools as st\n'), ((14915, 14945), 'ScanningTools.ScanningTools.spin_generator', 'st.spin_generator', (['time1', 'rpm1'], {}), '(time1, rpm1)\n', (14932, 14945), True, 'from ScanningTools import ScanningTools as st\n'), ((15055, 15150), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate2', 'obs_time2'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day2'}), '(LCT_start, LCD_start, sampling_rate2, obs_time2, UTC=UTC, DST\n =DST, day=day2)\n', (15068, 15150), True, 'from ScanningTools import ScanningTools as st\n'), ((15204, 15234), 'ScanningTools.ScanningTools.spin_generator', 'st.spin_generator', (['time2', 'rpm2'], {}), '(time2, rpm2)\n', (15221, 15234), True, 'from ScanningTools import ScanningTools as st\n'), ((15344, 15439), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate3', 'obs_time3'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day3'}), '(LCT_start, LCD_start, sampling_rate3, obs_time3, UTC=UTC, DST\n =DST, day=day3)\n', (15357, 15439), True, 'from ScanningTools import ScanningTools as st\n'), ((15493, 15523), 'ScanningTools.ScanningTools.spin_generator', 'st.spin_generator', (['time3', 'rpm3'], {}), '(time3, rpm3)\n', (15510, 15523), True, 'from ScanningTools import ScanningTools as st\n'), ((15686, 15737), 'numpy.radians', 'np.radians', (['([10, 10, 10], [30, 30, 30], [0, 0, 0])'], {}), '(([10, 10, 10], [30, 30, 30], [0, 0, 0]))\n', (15696, 15737), True, 'import numpy as np\n'), ((15751, 15795), 'ScanningTools.ScanningTools.euler_rotation_matrix', 'st.euler_rotation_matrix', (['phi1', 'theta1', 'psi1'], {}), '(phi1, theta1, psi1)\n', (15775, 15795), True, 'from ScanningTools import ScanningTools as st\n'), ((15809, 16003), 'numpy.array', 'np.array', (['[[0.984807753012208, -0.1503837331804353, 0.08682408883346515], [\n 0.17364817766693033, 0.8528685319524433, -0.49240387650610395], [0, \n 0.49999999999999994, 0.8660254037844387]]'], {}), '([[0.984807753012208, -0.1503837331804353, 0.08682408883346515], [\n 0.17364817766693033, 0.8528685319524433, -0.49240387650610395], [0, \n 0.49999999999999994, 0.8660254037844387]])\n', (15817, 16003), True, 'import numpy as np\n'), ((16074, 16128), 'numpy.radians', 'np.radians', (['([10, 10, 10], [30, 30, 30], [45, 45, 45])'], {}), '(([10, 10, 10], [30, 30, 30], [45, 45, 45]))\n', (16084, 16128), True, 'import numpy as np\n'), ((16142, 16186), 'ScanningTools.ScanningTools.euler_rotation_matrix', 'st.euler_rotation_matrix', (['phi2', 'theta2', 'psi2'], {}), '(phi2, theta2, psi2)\n', (16166, 16186), True, 'from ScanningTools import ScanningTools as st\n'), ((16200, 16411), 'numpy.array', 'np.array', (['[[0.5900268828079848, -0.8027015978320531, 0.08682408883346515], [\n 0.7258569263731611, 0.4802813184352156, -0.49240387650610395], [\n 0.3535533905932737, 0.35355339059327373, 0.8660254037844387]]'], {}), '([[0.5900268828079848, -0.8027015978320531, 0.08682408883346515], [\n 0.7258569263731611, 0.4802813184352156, -0.49240387650610395], [\n 0.3535533905932737, 0.35355339059327373, 0.8660254037844387]])\n', (16208, 16411), True, 'import numpy as np\n'), ((16483, 16536), 'numpy.radians', 'np.radians', (['([10, 10, 10], [30, 30, 30], [45, 0, 45])'], {}), '(([10, 10, 10], [30, 30, 30], [45, 0, 45]))\n', (16493, 16536), True, 'import numpy as np\n'), ((16550, 16594), 'ScanningTools.ScanningTools.euler_rotation_matrix', 'st.euler_rotation_matrix', (['phi3', 'theta3', 'psi3'], {}), '(phi3, theta3, psi3)\n', (16574, 16594), True, 'from ScanningTools import ScanningTools as st\n'), ((16608, 17219), 'numpy.array', 'np.array', (['[[[0.5900268828079848, -0.8027015978320531, 0.08682408883346515], [\n 0.7258569263731611, 0.4802813184352156, -0.49240387650610395], [\n 0.3535533905932737, 0.35355339059327373, 0.8660254037844387]], [[\n 0.984807753012208, -0.1503837331804353, 0.08682408883346515], [\n 0.17364817766693033, 0.8528685319524433, -0.49240387650610395], [0, \n 0.49999999999999994, 0.8660254037844387]], [[0.5900268828079848, -\n 0.8027015978320531, 0.08682408883346515], [0.7258569263731611, \n 0.4802813184352156, -0.49240387650610395], [0.3535533905932737, \n 0.35355339059327373, 0.8660254037844387]]]'], {}), '([[[0.5900268828079848, -0.8027015978320531, 0.08682408883346515],\n [0.7258569263731611, 0.4802813184352156, -0.49240387650610395], [\n 0.3535533905932737, 0.35355339059327373, 0.8660254037844387]], [[\n 0.984807753012208, -0.1503837331804353, 0.08682408883346515], [\n 0.17364817766693033, 0.8528685319524433, -0.49240387650610395], [0, \n 0.49999999999999994, 0.8660254037844387]], [[0.5900268828079848, -\n 0.8027015978320531, 0.08682408883346515], [0.7258569263731611, \n 0.4802813184352156, -0.49240387650610395], [0.3535533905932737, \n 0.35355339059327373, 0.8660254037844387]]])\n', (16616, 17219), True, 'import numpy as np\n'), ((17702, 17779), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', '(50)', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'None'}), '(LCT_start, LCD_start, 50, obs_time, UTC=UTC, DST=DST, day=None)\n', (17715, 17779), True, 'from ScanningTools import ScanningTools as st\n'), ((17862, 17933), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time', 'rpm', 'zenith_distance', 'polarization_angle'], {}), '(time, rpm, zenith_distance, polarization_angle)\n', (17885, 17933), True, 'from ScanningTools import ScanningTools as st\n'), ((18640, 18736), 'ScanningTools.ScanningTools.get_full_fp', 'st.get_full_fp', (['"""./ScanningTools/fp_data/fp_theta.txt"""', '"""./ScanningTools/fp_data/fp_phi.txt"""'], {}), "('./ScanningTools/fp_data/fp_theta.txt',\n './ScanningTools/fp_data/fp_phi.txt')\n", (18654, 18736), True, 'from ScanningTools import ScanningTools as st\n'), ((18978, 19073), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate1', 'obs_time1'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day1'}), '(LCT_start, LCD_start, sampling_rate1, obs_time1, UTC=UTC, DST\n =DST, day=day1)\n', (18991, 19073), True, 'from ScanningTools import ScanningTools as st\n'), ((19141, 19214), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time1', 'rpm1', 'zenith_distance', 'polarization_angle'], {}), '(time1, rpm1, zenith_distance, polarization_angle)\n', (19164, 19214), True, 'from ScanningTools import ScanningTools as st\n'), ((19297, 19392), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate2', 'obs_time2'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day2'}), '(LCT_start, LCD_start, sampling_rate2, obs_time2, UTC=UTC, DST\n =DST, day=day2)\n', (19310, 19392), True, 'from ScanningTools import ScanningTools as st\n'), ((19460, 19533), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time2', 'rpm2', 'zenith_distance', 'polarization_angle'], {}), '(time2, rpm2, zenith_distance, polarization_angle)\n', (19483, 19533), True, 'from ScanningTools import ScanningTools as st\n'), ((19639, 19683), 'ScanningTools.ScanningTools.euler_rotation_matrix', 'st.euler_rotation_matrix', (['phi1', 'theta1', 'psi1'], {}), '(phi1, theta1, psi1)\n', (19663, 19683), True, 'from ScanningTools import ScanningTools as st\n'), ((19702, 19746), 'ScanningTools.ScanningTools.euler_rotation_matrix', 'st.euler_rotation_matrix', (['phi2', 'theta2', 'psi2'], {}), '(phi2, theta2, psi2)\n', (19726, 19746), True, 'from ScanningTools import ScanningTools as st\n'), ((19771, 19859), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi1', 'theta1', 'psi1', 'x_fp', 'n_horns', 'time1'], {'n': 'n1', 'cartesian': '(False)'}), '(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1,\n cartesian=False)\n', (19790, 19859), True, 'from ScanningTools import ScanningTools as st\n'), ((19926, 20013), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi1', 'theta1', 'psi1', 'x_fp', 'n_horns', 'time1'], {'n': 'n1', 'cartesian': '(True)'}), '(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1,\n cartesian=True)\n', (19945, 20013), True, 'from ScanningTools import ScanningTools as st\n'), ((20080, 20168), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi2', 'theta2', 'psi2', 'x_fp', 'n_horns', 'time2'], {'n': 'n2', 'cartesian': '(False)'}), '(phi2, theta2, psi2, x_fp, n_horns, time2, n=n2,\n cartesian=False)\n', (20099, 20168), True, 'from ScanningTools import ScanningTools as st\n'), ((20235, 20322), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi2', 'theta2', 'psi2', 'x_fp', 'n_horns', 'time2'], {'n': 'n2', 'cartesian': '(True)'}), '(phi2, theta2, psi2, x_fp, n_horns, time2, n=n2,\n cartesian=True)\n', (20254, 20322), True, 'from ScanningTools import ScanningTools as st\n'), ((20510, 20566), 'ScanningTools.Quaternions.Quaternion.get_quaternion_from_euler', 'q.get_quaternion_from_euler', (['phi1[i]', 'theta1[i]', 'psi1[i]'], {}), '(phi1[i], theta1[i], psi1[i])\n', (20537, 20566), True, 'from ScanningTools.Quaternions import Quaternion as q\n'), ((20831, 20860), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_horns'], {}), '(0, n_horns)\n', (20848, 20860), True, 'import numpy as np\n'), ((20965, 21021), 'ScanningTools.Quaternions.Quaternion.get_quaternion_from_euler', 'q.get_quaternion_from_euler', (['phi2[j]', 'theta2[j]', 'psi2[j]'], {}), '(phi2[j], theta2[j], psi2[j])\n', (20992, 21021), True, 'from ScanningTools.Quaternions import Quaternion as q\n'), ((21572, 21668), 'ScanningTools.ScanningTools.get_full_fp', 'st.get_full_fp', (['"""./ScanningTools/fp_data/fp_theta.txt"""', '"""./ScanningTools/fp_data/fp_phi.txt"""'], {}), "('./ScanningTools/fp_data/fp_theta.txt',\n './ScanningTools/fp_data/fp_phi.txt')\n", (21586, 21668), True, 'from ScanningTools import ScanningTools as st\n'), ((21848, 21940), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day)\n', (21861, 21940), True, 'from ScanningTools import ScanningTools as st\n'), ((22002, 22073), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time', 'rpm', 'zenith_distance', 'polarization_angle'], {}), '(time, rpm, zenith_distance, polarization_angle)\n', (22025, 22073), True, 'from ScanningTools import ScanningTools as st\n'), ((22088, 22175), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi', 'theta', 'psi', 'x_fp', 'n_horns', 'time'], {'n': 'None', 'cartesian': '(False)'}), '(phi, theta, psi, x_fp, n_horns, time, n=None, cartesian\n =False)\n', (22107, 22175), True, 'from ScanningTools import ScanningTools as st\n'), ((22186, 22271), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi', 'theta', 'psi', 'x_fp', 'n_horns', 'time'], {'n': 'n1', 'cartesian': '(False)'}), '(phi, theta, psi, x_fp, n_horns, time, n=n1, cartesian=False\n )\n', (22205, 22271), True, 'from ScanningTools import ScanningTools as st\n'), ((22282, 22367), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi', 'theta', 'psi', 'x_fp', 'n_horns', 'time'], {'n': 'n2', 'cartesian': '(False)'}), '(phi, theta, psi, x_fp, n_horns, time, n=n2, cartesian=False\n )\n', (22301, 22367), True, 'from ScanningTools import ScanningTools as st\n'), ((22381, 22412), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp'], {}), '(fpp)\n', (22407, 22412), True, 'from ScanningTools import ScanningTools as st\n'), ((22433, 22465), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp1'], {}), '(fpp1)\n', (22459, 22465), True, 'from ScanningTools import ScanningTools as st\n'), ((22486, 22518), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp2'], {}), '(fpp2)\n', (22512, 22518), True, 'from ScanningTools import ScanningTools as st\n'), ((22595, 22691), 'ScanningTools.ScanningTools.get_full_fp', 'st.get_full_fp', (['"""./ScanningTools/fp_data/fp_theta.txt"""', '"""./ScanningTools/fp_data/fp_phi.txt"""'], {}), "('./ScanningTools/fp_data/fp_theta.txt',\n './ScanningTools/fp_data/fp_phi.txt')\n", (22609, 22691), True, 'from ScanningTools import ScanningTools as st\n'), ((22896, 22989), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day1'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day1)\n', (22909, 22989), True, 'from ScanningTools import ScanningTools as st\n'), ((23057, 23150), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day2'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day2)\n', (23070, 23150), True, 'from ScanningTools import ScanningTools as st\n'), ((23218, 23290), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time1', 'rpm', 'zenith_distance', 'polarization_angle'], {}), '(time1, rpm, zenith_distance, polarization_angle)\n', (23241, 23290), True, 'from ScanningTools import ScanningTools as st\n'), ((23373, 23445), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time2', 'rpm', 'zenith_distance', 'polarization_angle'], {}), '(time2, rpm, zenith_distance, polarization_angle)\n', (23396, 23445), True, 'from ScanningTools import ScanningTools as st\n'), ((23516, 23604), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi1', 'theta1', 'psi1', 'x_fp', 'n_horns', 'time1'], {'n': 'n1', 'cartesian': '(False)'}), '(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1,\n cartesian=False)\n', (23535, 23604), True, 'from ScanningTools import ScanningTools as st\n'), ((23616, 23704), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi2', 'theta2', 'psi2', 'x_fp', 'n_horns', 'time2'], {'n': 'n1', 'cartesian': '(False)'}), '(phi2, theta2, psi2, x_fp, n_horns, time2, n=n1,\n cartesian=False)\n', (23635, 23704), True, 'from ScanningTools import ScanningTools as st\n'), ((23716, 23804), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi1', 'theta1', 'psi1', 'x_fp', 'n_horns', 'time1'], {'n': 'n2', 'cartesian': '(False)'}), '(phi1, theta1, psi1, x_fp, n_horns, time1, n=n2,\n cartesian=False)\n', (23735, 23804), True, 'from ScanningTools import ScanningTools as st\n'), ((23816, 23904), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi2', 'theta2', 'psi2', 'x_fp', 'n_horns', 'time2'], {'n': 'n2', 'cartesian': '(False)'}), '(phi2, theta2, psi2, x_fp, n_horns, time2, n=n2,\n cartesian=False)\n', (23835, 23904), True, 'from ScanningTools import ScanningTools as st\n'), ((23921, 23953), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp1'], {}), '(fpp1)\n', (23947, 23953), True, 'from ScanningTools import ScanningTools as st\n'), ((23974, 24006), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp2'], {}), '(fpp2)\n', (24000, 24006), True, 'from ScanningTools import ScanningTools as st\n'), ((24027, 24059), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp3'], {}), '(fpp3)\n', (24053, 24059), True, 'from ScanningTools import ScanningTools as st\n'), ((24080, 24112), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp4'], {}), '(fpp4)\n', (24106, 24112), True, 'from ScanningTools import ScanningTools as st\n'), ((24133, 24177), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD1', 'loc', 'Alt1', 'Az1'], {}), '(JD1, loc, Alt1, Az1)\n', (24156, 24177), True, 'from ScanningTools import ScanningTools as st\n'), ((24221, 24265), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD2', 'loc', 'Alt2', 'Az2'], {}), '(JD2, loc, Alt2, Az2)\n', (24244, 24265), True, 'from ScanningTools import ScanningTools as st\n'), ((24310, 24354), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD1', 'loc', 'Alt3', 'Az3'], {}), '(JD1, loc, Alt3, Az3)\n', (24333, 24354), True, 'from ScanningTools import ScanningTools as st\n'), ((24398, 24442), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD2', 'loc', 'Alt4', 'Az4'], {}), '(JD2, loc, Alt4, Az4)\n', (24421, 24442), True, 'from ScanningTools import ScanningTools as st\n'), ((24489, 24536), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD1[0]', 'loc', 'Alt1', 'Az1'], {}), '(JD1[0], loc, Alt1, Az1)\n', (24512, 24536), True, 'from ScanningTools import ScanningTools as st\n'), ((26219, 26315), 'ScanningTools.ScanningTools.get_full_fp', 'st.get_full_fp', (['"""./ScanningTools/fp_data/fp_theta.txt"""', '"""./ScanningTools/fp_data/fp_phi.txt"""'], {}), "('./ScanningTools/fp_data/fp_theta.txt',\n './ScanningTools/fp_data/fp_phi.txt')\n", (26233, 26315), True, 'from ScanningTools import ScanningTools as st\n'), ((26505, 26598), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day1'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day1)\n', (26518, 26598), True, 'from ScanningTools import ScanningTools as st\n'), ((26666, 26738), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time1', 'rpm', 'zenith_distance', 'polarization_angle'], {}), '(time1, rpm, zenith_distance, polarization_angle)\n', (26689, 26738), True, 'from ScanningTools import ScanningTools as st\n'), ((26807, 26895), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi1', 'theta1', 'psi1', 'x_fp', 'n_horns', 'time1'], {'n': 'n1', 'cartesian': '(False)'}), '(phi1, theta1, psi1, x_fp, n_horns, time1, n=n1,\n cartesian=False)\n', (26826, 26895), True, 'from ScanningTools import ScanningTools as st\n'), ((26907, 26995), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi1', 'theta1', 'psi1', 'x_fp', 'n_horns', 'time1'], {'n': 'n2', 'cartesian': '(False)'}), '(phi1, theta1, psi1, x_fp, n_horns, time1, n=n2,\n cartesian=False)\n', (26926, 26995), True, 'from ScanningTools import ScanningTools as st\n'), ((27012, 27044), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp1'], {}), '(fpp1)\n', (27038, 27044), True, 'from ScanningTools import ScanningTools as st\n'), ((27065, 27097), 'ScanningTools.ScanningTools.get_horizon_coordinates', 'st.get_horizon_coordinates', (['fpp3'], {}), '(fpp3)\n', (27091, 27097), True, 'from ScanningTools import ScanningTools as st\n'), ((27118, 27162), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD1', 'loc', 'Alt1', 'Az1'], {}), '(JD1, loc, Alt1, Az1)\n', (27141, 27162), True, 'from ScanningTools import ScanningTools as st\n'), ((27206, 27250), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD1', 'loc', 'Alt3', 'Az3'], {}), '(JD1, loc, Alt3, Az3)\n', (27229, 27250), True, 'from ScanningTools import ScanningTools as st\n'), ((27296, 27350), 'ScanningTools.ScanningTools.get_practical_icrs_coordinates', 'st.get_practical_icrs_coordinates', (['JD1', 'loc', 'Alt1', 'Az1'], {}), '(JD1, loc, Alt1, Az1)\n', (27329, 27350), True, 'from ScanningTools import ScanningTools as st\n'), ((27394, 27448), 'ScanningTools.ScanningTools.get_practical_icrs_coordinates', 'st.get_practical_icrs_coordinates', (['JD1', 'loc', 'Alt3', 'Az3'], {}), '(JD1, loc, Alt3, Az3)\n', (27427, 27448), True, 'from ScanningTools import ScanningTools as st\n'), ((27489, 27518), 'ScanningTools.ScanningTools.sex2dec', 'st.sex2dec', (['pointing_accuracy'], {}), '(pointing_accuracy)\n', (27499, 27518), True, 'from ScanningTools import ScanningTools as st\n'), ((28239, 28331), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=day)\n', (28252, 28331), True, 'from ScanningTools import ScanningTools as st\n'), ((28399, 28494), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate1', 'obs_time1'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'day1'}), '(LCT_start, LCD_start, sampling_rate1, obs_time1, UTC=UTC, DST\n =DST, day=day1)\n', (28412, 28494), True, 'from ScanningTools import ScanningTools as st\n'), ((28559, 28627), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time', 'rpm', 'zenith_distance', 'boresight_angle'], {}), '(time, rpm, zenith_distance, boresight_angle)\n', (28582, 28627), True, 'from ScanningTools import ScanningTools as st\n'), ((28657, 28728), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time1', 'rpm1', 'zenith_distance1', 'boresight_angle'], {}), '(time1, rpm1, zenith_distance1, boresight_angle)\n', (28680, 28728), True, 'from ScanningTools import ScanningTools as st\n'), ((28758, 28828), 'ScanningTools.ScanningTools.get_engine_rotations', 'st.get_engine_rotations', (['time1', 'rpm1', 'zenith_distance', 'boresight_angle'], {}), '(time1, rpm1, zenith_distance, boresight_angle)\n', (28781, 28828), True, 'from ScanningTools import ScanningTools as st\n'), ((28873, 28945), 'ScanningTools.ScanningTools.get_full_fp_polarization_angles', 'st.get_full_fp_polarization_angles', (['"""./ScanningTools/fp_data/fp_psi.txt"""'], {}), "('./ScanningTools/fp_data/fp_psi.txt')\n", (28907, 28945), True, 'from ScanningTools import ScanningTools as st\n'), ((29026, 29120), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi', 'theta', 'psi', 'x_fp_pol_versors', 'n_horns', 'time'], {'n': 'n', 'cartesian': '(True)'}), '(phi, theta, psi, x_fp_pol_versors, n_horns, time, n=n,\n cartesian=True)\n', (29045, 29120), True, 'from ScanningTools import ScanningTools as st\n'), ((29197, 29297), 'ScanningTools.ScanningTools.get_fp_rotations', 'st.get_fp_rotations', (['phi1', 'theta1', 'psi1', 'x_fp_pol_versors', 'n_horns', 'time1'], {'n': 'n1', 'cartesian': '(True)'}), '(phi1, theta1, psi1, x_fp_pol_versors, n_horns, time1, n\n =n1, cartesian=True)\n', (29216, 29297), True, 'from ScanningTools import ScanningTools as st\n'), ((29369, 29454), 'ScanningTools.ScanningTools.get_polarization_angles', 'st.get_polarization_angles', (['phi', 'theta', 'psi', 'x_fp_pol_versors', 'n_horns', 'time'], {'n': 'n'}), '(phi, theta, psi, x_fp_pol_versors, n_horns, time,\n n=n)\n', (29395, 29454), True, 'from ScanningTools import ScanningTools as st\n'), ((29525, 29615), 'ScanningTools.ScanningTools.get_polarization_angles', 'st.get_polarization_angles', (['phi1', 'theta1', 'psi1', 'x_fp_pol_versors', 'n_horns', 'time1'], {'n': 'n1'}), '(phi1, theta1, psi1, x_fp_pol_versors, n_horns,\n time1, n=n1)\n', (29551, 29615), True, 'from ScanningTools import ScanningTools as st\n'), ((29687, 29777), 'ScanningTools.ScanningTools.get_polarization_angles', 'st.get_polarization_angles', (['phi2', 'theta2', 'psi2', 'x_fp_pol_versors', 'n_horns', 'time1'], {'n': 'n1'}), '(phi2, theta2, psi2, x_fp_pol_versors, n_horns,\n time1, n=n1)\n', (29713, 29777), True, 'from ScanningTools import ScanningTools as st\n'), ((2514, 2533), 'numpy.radians', 'np.radians', (['ang0[0]'], {}), '(ang0[0])\n', (2524, 2533), True, 'import numpy as np\n'), ((3815, 3851), 'numpy.allclose', 'np.allclose', (['Jul_1_2013', '(2456474.442)'], {}), '(Jul_1_2013, 2456474.442)\n', (3826, 3851), True, 'import numpy as np\n'), ((3877, 3913), 'numpy.allclose', 'np.allclose', (['Jun_19_2009', '(2455002.25)'], {}), '(Jun_19_2009, 2455002.25)\n', (3888, 3913), True, 'import numpy as np\n'), ((3927, 3997), 'astropy.time.Time', 'Time', (["['2015-1-1 00:00:10', '2018-1-3 5:15:24.3', '1980-4-22 19:30:2']"], {}), "(['2015-1-1 00:00:10', '2018-1-3 5:15:24.3', '1980-4-22 19:30:2'])\n", (3931, 3997), False, 'from astropy.time import Time\n'), ((4304, 4321), 'numpy.allclose', 'np.allclose', (['t', 'T'], {}), '(t, T)\n', (4315, 4321), True, 'import numpy as np\n'), ((4956, 4984), 'ScanningTools.ScanningTools.get_nside_eff', 'st.get_nside_eff', (['fwhm_beam0'], {}), '(fwhm_beam0)\n', (4972, 4984), True, 'from ScanningTools import ScanningTools as st\n'), ((5017, 5045), 'ScanningTools.ScanningTools.get_nside_eff', 'st.get_nside_eff', (['fwhm_beam1'], {}), '(fwhm_beam1)\n', (5033, 5045), True, 'from ScanningTools import ScanningTools as st\n'), ((5077, 5105), 'ScanningTools.ScanningTools.get_nside_eff', 'st.get_nside_eff', (['fwhm_beam2'], {}), '(fwhm_beam2)\n', (5093, 5105), True, 'from ScanningTools import ScanningTools as st\n'), ((9017, 9110), 'ScanningTools.ScanningTools.get_timeJD', 'st.get_timeJD', (['LCT_start', 'LCD_start', 'sampling_rate', 'obs_time'], {'UTC': 'UTC', 'DST': 'DST', 'day': 'None'}), '(LCT_start, LCD_start, sampling_rate, obs_time, UTC=UTC, DST=\n DST, day=None)\n', (9030, 9110), True, 'from ScanningTools import ScanningTools as st\n'), ((17563, 17582), 'numpy.allclose', 'np.allclose', (['m3', 'M3'], {}), '(m3, M3)\n', (17574, 17582), True, 'import numpy as np\n'), ((18098, 18136), 'numpy.allclose', 'np.allclose', (['(theta[1:] - theta[:-1])', '(0)'], {}), '(theta[1:] - theta[:-1], 0)\n', (18109, 18136), True, 'import numpy as np\n'), ((18162, 18196), 'numpy.allclose', 'np.allclose', (['(psi[1:] - psi[:-1])', '(0)'], {}), '(psi[1:] - psi[:-1], 0)\n', (18173, 18196), True, 'import numpy as np\n'), ((24586, 24613), 'numpy.allclose', 'np.allclose', (['Dec1', 'Dec3[n1]'], {}), '(Dec1, Dec3[n1])\n', (24597, 24613), True, 'import numpy as np\n'), ((24639, 24664), 'numpy.allclose', 'np.allclose', (['Ra1', 'Ra3[n1]'], {}), '(Ra1, Ra3[n1])\n', (24650, 24664), True, 'import numpy as np\n'), ((24937, 24966), 'numpy.allclose', 'np.allclose', (['Dec1[0]', 'Dec5[0]'], {}), '(Dec1[0], Dec5[0])\n', (24948, 24966), True, 'import numpy as np\n'), ((24992, 25019), 'numpy.allclose', 'np.allclose', (['Ra1[0]', 'Ra5[0]'], {}), '(Ra1[0], Ra5[0])\n', (25003, 25019), True, 'import numpy as np\n'), ((25046, 25077), 'numpy.allclose', 'np.allclose', (['Dec1[1:]', 'Dec5[1:]'], {}), '(Dec1[1:], Dec5[1:])\n', (25057, 25077), True, 'import numpy as np\n'), ((25104, 25133), 'numpy.allclose', 'np.allclose', (['Ra1[1:]', 'Ra5[1:]'], {}), '(Ra1[1:], Ra5[1:])\n', (25115, 25133), True, 'import numpy as np\n'), ((25199, 25223), 'astropy.coordinates.SkyCoord.from_name', 'SkyCoord.from_name', (['name'], {}), '(name)\n', (25217, 25223), False, 'from astropy.coordinates import SkyCoord, AltAz\n'), ((25449, 25505), 'ScanningTools.ScanningTools.get_icrs_coordinates', 'st.get_icrs_coordinates', (['JD1', 'loc', 'object_Alt', 'object_Az'], {}), '(JD1, loc, object_Alt, object_Az)\n', (25472, 25505), True, 'from ScanningTools import ScanningTools as st\n'), ((30192, 30243), 'numpy.allclose', 'np.allclose', (['pol_ang_proj2[..., 0]', 'x_fp_pol_angles'], {}), '(pol_ang_proj2[..., 0], x_fp_pol_angles)\n', (30203, 30243), True, 'import numpy as np\n'), ((30281, 30329), 'numpy.allclose', 'np.allclose', (['pol_ang_proj', 'pol_ang_proj_expected'], {}), '(pol_ang_proj, pol_ang_proj_expected)\n', (30292, 30329), True, 'import numpy as np\n'), ((2367, 2425), 'numpy.array', 'np.array', (['[-10.76638889, 30.5875, -180.422222, 3.06805556]'], {}), '([-10.76638889, 30.5875, -180.422222, 3.06805556])\n', (2375, 2425), True, 'import numpy as np\n'), ((2673, 2740), 'numpy.array', 'np.array', (['[[1, 32, 47.706], [-0, 33, 36], [0, 20, 0], [-1, 0, 3.6]]'], {}), '([[1, 32, 47.706], [-0, 33, 36], [0, 20, 0], [-1, 0, 3.6]])\n', (2681, 2740), True, 'import numpy as np\n'), ((2834, 2859), 'numpy.array', 'np.array', (['[1, 32, 47.706]'], {}), '([1, 32, 47.706])\n', (2842, 2859), True, 'import numpy as np\n'), ((4023, 4090), 'ScanningTools.ScanningTools.LocalCivilTime2JulianDay', 'st.LocalCivilTime2JulianDay', (['(0, 0, 10)', '(1, 1, 2015)'], {'UTC': '(0)', 'DST': '(0)'}), '((0, 0, 10), (1, 1, 2015), UTC=0, DST=0)\n', (4050, 4090), True, 'from ScanningTools import ScanningTools as st\n'), ((4114, 4184), 'ScanningTools.ScanningTools.LocalCivilTime2JulianDay', 'st.LocalCivilTime2JulianDay', (['(5, 15, 24.3)', '(3, 1, 2018)'], {'UTC': '(0)', 'DST': '(0)'}), '((5, 15, 24.3), (3, 1, 2018), UTC=0, DST=0)\n', (4141, 4184), True, 'from ScanningTools import ScanningTools as st\n'), ((4208, 4277), 'ScanningTools.ScanningTools.LocalCivilTime2JulianDay', 'st.LocalCivilTime2JulianDay', (['(19, 30, 2)', '(22, 4, 1980)'], {'UTC': '(0)', 'DST': '(0)'}), '((19, 30, 2), (22, 4, 1980), UTC=0, DST=0)\n', (4235, 4277), True, 'from ScanningTools import ScanningTools as st\n'), ((4682, 4708), 'numpy.array', 'np.array', (['[11, 52, 46.843]'], {}), '([11, 52, 46.843])\n', (4690, 4708), True, 'import numpy as np\n'), ((5229, 5264), 'numpy.allclose', 'np.allclose', (['x_fp[i, 0]', 'x_fp[j, 0]'], {}), '(x_fp[i, 0], x_fp[j, 0])\n', (5240, 5264), True, 'import numpy as np\n'), ((5294, 5330), 'numpy.allclose', 'np.allclose', (['x_fp[i, 1]', '(-x_fp[j, 1])'], {}), '(x_fp[i, 1], -x_fp[j, 1])\n', (5305, 5330), True, 'import numpy as np\n'), ((5360, 5395), 'numpy.allclose', 'np.allclose', (['x_fp[i, 2]', 'x_fp[j, 2]'], {}), '(x_fp[i, 2], x_fp[j, 2])\n', (5371, 5395), True, 'import numpy as np\n'), ((5602, 5627), 'numpy.sum', 'np.sum', (['(x_fp ** 2)'], {'axis': '(1)'}), '(x_fp ** 2, axis=1)\n', (5608, 5627), True, 'import numpy as np\n'), ((6549, 6584), 'numpy.allclose', 'np.allclose', (['x_fp[i, 0]', 'x_fp[j, 0]'], {}), '(x_fp[i, 0], x_fp[j, 0])\n', (6560, 6584), True, 'import numpy as np\n'), ((6614, 6650), 'numpy.allclose', 'np.allclose', (['x_fp[i, 1]', '(-x_fp[j, 1])'], {}), '(x_fp[i, 1], -x_fp[j, 1])\n', (6625, 6650), True, 'import numpy as np\n'), ((6680, 6715), 'numpy.allclose', 'np.allclose', (['x_fp[i, 2]', 'x_fp[j, 2]'], {}), '(x_fp[i, 2], x_fp[j, 2])\n', (6691, 6715), True, 'import numpy as np\n'), ((6892, 6932), 'numpy.sum', 'np.sum', (['(polarization_versor ** 2)'], {'axis': '(1)'}), '(polarization_versor ** 2, axis=1)\n', (6898, 6932), True, 'import numpy as np\n'), ((8261, 8314), 'numpy.allclose', 'np.allclose', (['(time[1:] - time[0:-1])', '(1 / sampling_rate)'], {}), '(time[1:] - time[0:-1], 1 / sampling_rate)\n', (8272, 8314), True, 'import numpy as np\n'), ((8451, 8508), 'numpy.allclose', 'np.allclose', (['(t1 - t0).sec', '(1 / sampling_rate)'], {'rtol': '(0.001)'}), '((t1 - t0).sec, 1 / sampling_rate, rtol=0.001)\n', (8462, 8508), True, 'import numpy as np\n'), ((14255, 14328), 'numpy.sum', 'np.sum', (['(np.r_[True, phi[1:] > phi[:-1]] & np.r_[phi[:-1] > phi[1:], True])'], {}), '(np.r_[True, phi[1:] > phi[:-1]] & np.r_[phi[:-1] > phi[1:], True])\n', (14261, 14328), True, 'import numpy as np\n'), ((17423, 17458), 'numpy.repeat', 'np.repeat', (['M1[None, ...]', '(3)'], {'axis': '(0)'}), '(M1[None, ...], 3, axis=0)\n', (17432, 17458), True, 'import numpy as np\n'), ((17501, 17536), 'numpy.repeat', 'np.repeat', (['M2[None, ...]', '(3)'], {'axis': '(0)'}), '(M2[None, ...], 3, axis=0)\n', (17510, 17536), True, 'import numpy as np\n'), ((20444, 20472), 'numpy.dot', 'np.dot', (['fp_rot1[i]', 'x_fp[n1]'], {}), '(fp_rot1[i], x_fp[n1])\n', (20450, 20472), True, 'import numpy as np\n'), ((20897, 20924), 'numpy.dot', 'np.dot', (['fp_rot2[j]', 'x_fp[p]'], {}), '(fp_rot2[j], x_fp[p])\n', (20903, 20924), True, 'import numpy as np\n'), ((25534, 25573), 'numpy.allclose', 'np.allclose', (['object.dec.rad', 'object_Dec'], {}), '(object.dec.rad, object_Dec)\n', (25545, 25573), True, 'import numpy as np\n'), ((25603, 25640), 'numpy.allclose', 'np.allclose', (['object.ra.rad', 'object_Ra'], {}), '(object.ra.rad, object_Ra)\n', (25614, 25640), True, 'import numpy as np\n'), ((29886, 29938), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(sampling_rate * obs_t / 2 + 1)'], {}), '(0, np.pi, sampling_rate * obs_t / 2 + 1)\n', (29897, 29938), True, 'import numpy as np\n'), ((30050, 30112), 'numpy.arctan2', 'np.arctan2', (['x_fp_pol_versors[..., 1]', 'x_fp_pol_versors[..., 0]'], {}), '(x_fp_pol_versors[..., 1], x_fp_pol_versors[..., 0])\n', (30060, 30112), True, 'import numpy as np\n'), ((30765, 30797), 'numpy.allclose', 'np.allclose', (['x_fp.shape', '(49, 3)'], {}), '(x_fp.shape, (49, 3))\n', (30776, 30797), True, 'import numpy as np\n'), ((30827, 30865), 'numpy.allclose', 'np.allclose', (['x_fp_pol_angles.shape', '(49)'], {}), '(x_fp_pol_angles.shape, 49)\n', (30838, 30865), True, 'import numpy as np\n'), ((30937, 30970), 'numpy.allclose', 'np.allclose', (['time.shape', 'JD.shape'], {}), '(time.shape, JD.shape)\n', (30948, 30970), True, 'import numpy as np\n'), ((31000, 31034), 'numpy.allclose', 'np.allclose', (['theta.shape', 'JD.shape'], {}), '(theta.shape, JD.shape)\n', (31011, 31034), True, 'import numpy as np\n'), ((31064, 31099), 'numpy.allclose', 'np.allclose', (['theta.shape', 'phi.shape'], {}), '(theta.shape, phi.shape)\n', (31075, 31099), True, 'import numpy as np\n'), ((31129, 31162), 'numpy.allclose', 'np.allclose', (['psi.shape', 'phi.shape'], {}), '(psi.shape, phi.shape)\n', (31140, 31162), True, 'import numpy as np\n'), ((31192, 31248), 'numpy.allclose', 'np.allclose', (['psi.shape', 'fp_pointings_spherical.shape[-2]'], {}), '(psi.shape, fp_pointings_spherical.shape[-2])\n', (31203, 31248), True, 'import numpy as np\n'), ((31278, 31335), 'numpy.allclose', 'np.allclose', (['Alt.shape', 'fp_pointings_spherical.shape[:-1]'], {}), '(Alt.shape, fp_pointings_spherical.shape[:-1])\n', (31289, 31335), True, 'import numpy as np\n'), ((31365, 31397), 'numpy.allclose', 'np.allclose', (['Alt.shape', 'Az.shape'], {}), '(Alt.shape, Az.shape)\n', (31376, 31397), True, 'import numpy as np\n'), ((31427, 31459), 'numpy.allclose', 'np.allclose', (['Dec.shape', 'Az.shape'], {}), '(Dec.shape, Az.shape)\n', (31438, 31459), True, 'import numpy as np\n'), ((31489, 31521), 'numpy.allclose', 'np.allclose', (['Dec.shape', 'Ra.shape'], {}), '(Dec.shape, Ra.shape)\n', (31500, 31521), True, 'import numpy as np\n'), ((31551, 31599), 'numpy.allclose', 'np.allclose', (['polarization_angles.shape', 'Ra.shape'], {}), '(polarization_angles.shape, Ra.shape)\n', (31562, 31599), True, 'import numpy as np\n'), ((31629, 31666), 'numpy.allclose', 'np.allclose', (['time.shape', 'Ra.shape[-1]'], {}), '(time.shape, Ra.shape[-1])\n', (31640, 31666), True, 'import numpy as np\n'), ((31696, 31753), 'numpy.allclose', 'np.allclose', (['fp_pointings_spherical.shape[-2]', 'time.shape'], {}), '(fp_pointings_spherical.shape[-2], time.shape)\n', (31707, 31753), True, 'import numpy as np\n'), ((32176, 32198), 'numpy.array', 'np.array', (['[28, 16, 24]'], {}), '([28, 16, 24])\n', (32184, 32198), True, 'import numpy as np\n'), ((32217, 32240), 'numpy.array', 'np.array', (['[-16, 38, 32]'], {}), '([-16, 38, 32])\n', (32225, 32240), True, 'import numpy as np\n'), ((32667, 32689), 'numpy.array', 'np.array', (['[28, 16, 24]'], {}), '([28, 16, 24])\n', (32675, 32689), True, 'import numpy as np\n'), ((32708, 32731), 'numpy.array', 'np.array', (['[-16, 38, 32]'], {}), '([-16, 38, 32])\n', (32716, 32731), True, 'import numpy as np\n'), ((3435, 3452), 'ScanningTools.ScanningTools.sex2dec', 'st.sex2dec', (['hours'], {}), '(hours)\n', (3445, 3452), True, 'from ScanningTools import ScanningTools as st\n'), ((8401, 8417), 'numpy.diff', 'np.diff', (['JD_step'], {}), '(JD_step)\n', (8408, 8417), True, 'import numpy as np\n'), ((18339, 18379), 'numpy.diff', 'np.diff', (['fp_pointings_c[..., 2]'], {'axis': '(-1)'}), '(fp_pointings_c[..., 2], axis=-1)\n', (18346, 18379), True, 'import numpy as np\n'), ((18575, 18611), 'numpy.sum', 'np.sum', (['(fp_pointings_c ** 2)'], {'axis': '(-1)'}), '(fp_pointings_c ** 2, axis=-1)\n', (18581, 18611), True, 'import numpy as np\n'), ((26032, 26048), 'numpy.abs', 'np.abs', (['(Ra - PRa)'], {}), '(Ra - PRa)\n', (26038, 26048), True, 'import numpy as np\n'), ((29952, 30005), 'numpy.linspace', 'np.linspace', (['(-np.pi)', '(0)', '(sampling_rate * obs_t / 2 + 1)'], {}), '(-np.pi, 0, sampling_rate * obs_t / 2 + 1)\n', (29963, 30005), True, 'import numpy as np\n'), ((3065, 3083), 'ScanningTools.ScanningTools.sex2dec', 'st.sex2dec', (['angles'], {}), '(angles)\n', (3075, 3083), True, 'from ScanningTools import ScanningTools as st\n'), ((3134, 3152), 'ScanningTools.ScanningTools.sex2dec', 'st.sex2dec', (['angles'], {}), '(angles)\n', (3144, 3152), True, 'from ScanningTools import ScanningTools as st\n'), ((3364, 3384), 'ScanningTools.ScanningTools.sex2dec', 'st.sex2dec', (['hours[1]'], {}), '(hours[1])\n', (3374, 3384), True, 'from ScanningTools import ScanningTools as st\n'), ((25285, 25306), 'astropy.time.Time', 'Time', (['JD'], {'format': '"""jd"""'}), "(JD, format='jd')\n", (25289, 25306), False, 'from astropy.time import Time\n'), ((18413, 18445), 'numpy.degrees', 'np.degrees', (['fp_pointings[..., 0]'], {}), '(fp_pointings[..., 0])\n', (18423, 18445), True, 'import numpy as np\n'), ((18488, 18520), 'numpy.degrees', 'np.degrees', (['fp_pointings[..., 1]'], {}), '(fp_pointings[..., 1])\n', (18498, 18520), True, 'import numpy as np\n'), ((25971, 25989), 'numpy.abs', 'np.abs', (['(Dec - PDec)'], {}), '(Dec - PDec)\n', (25977, 25989), True, 'import numpy as np\n'), ((27792, 27821), 'numpy.max', 'np.max', (['pol_ang_proj'], {'axis': '(-1)'}), '(pol_ang_proj, axis=-1)\n', (27798, 27821), True, 'import numpy as np\n'), ((27868, 27897), 'numpy.min', 'np.min', (['pol_ang_proj'], {'axis': '(-1)'}), '(pol_ang_proj, axis=-1)\n', (27874, 27897), True, 'import numpy as np\n')]
|
r"""
Mutual Coherence and Babel Function are the properties of a matrix, used to
estimate the Spark of a matrix, which in turn is used to determine the
optimality of the solution to :math:`\text{P}_0` problem.
Babel Function gives a tighter bound on the Spark of a matrix.
Spark of a matrix :math:`\boldsymbol{A}` is the size of the smallest subset of
linearly dependent columns of :math:`\boldsymbol{A}`.
.. currentmodule:: sparse.coherence
.. autosummary::
:toctree: toctree/coherence/
mutual_coherence
babel
"""
import math
from collections import namedtuple
import numpy as np
CoherenceSpark = namedtuple("CoherenceSpark", ("coherence", "spark"))
def mutual_coherence(mat):
r"""
For an arbitrary input matrix :math:`\boldsymbol{A}` of size `N` x `M`, the
mutual coherence is the maximal absolute inner-product between its
normalized columns :math:`\{ a_i \mid i=1,2,...,M \}`:
.. math::
\mu (\boldsymbol{A}) = \max_{1 \le i < j \le M}
\frac{\mid a_i^\top a_j \mid}{\|a_i\|_2 \|a_j\|_2}
:label: coh
The mutual coherence :math:`\mu` lies in range `[0, 1]`.
At the same time, the Spark lower bound of a matrix is estimated as
.. math::
\text{Spark}(\boldsymbol{A}) \ge 1 + \frac{1}{\mu(\boldsymbol{A})}
:label: spark
Parameters
----------
mat : (N, M) np.ndarray
The input matrix :math:`\boldsymbol{A}`.
Returns
-------
CoherenceSpark
A namedtuple with two attributes:
`.coherence` - mutual coherence of `mat`;
`.spark` - Spark lower bound :eq:`spark` of `mat`.
"""
mat = mat / np.linalg.norm(mat, axis=0)
gram = np.abs(mat.T.dot(mat))
np.fill_diagonal(gram, 0)
mu = gram.max()
spark = math.ceil(1 + 1 / mu)
return CoherenceSpark(mu, spark)
def babel(mat):
r"""
For an arbitrary input matrix :math:`\boldsymbol{A}` of size `N` x `M` and
normalized columns :math:`\{ a_i \mid i=1,2,...,M \}`, the Babel-Function
is defined by
.. math::
\mu_1(k) = \max_{\mid \Lambda \mid = k} \left[ \max_{j \notin \Lambda}
\sum_{i \in \Lambda}{\mid a_i^\top a_j \mid} \right]
:label: babel
If :math:`\mu_1(k-1) < 1`, this implies that any set of :math:`k` columns
from :math:`\boldsymbol{A}` are linearly dependent. In this case, the Spark
necessarily satisfies
.. math::
\text{Spark}(\boldsymbol{A}) > k = 1 + \arg \min_k
\left({\mu_1(k) > 1}\right)
:label: spark_babel
Parameters
----------
mat : (N, M) np.ndarray
The input matrix :math:`\boldsymbol{A}`.
Returns
-------
CoherenceSpark
A `namedtuple` with two attributes:
`.coherence` - a list of `M-1` elements of
:math:`\mu_1(k), \ k=1,2,...,M-1`;
`.spark` - Spark lower bound :eq:`spark_babel` of `mat`.
Notes
-----
:eq:`spark_babel` is a tighter bound on Spark than :eq:`spark`.
"""
mat = mat / np.linalg.norm(mat, axis=0)
gram = np.abs(mat.T.dot(mat))
# Gram matrix' of L2 normalized matrix entries are in range [0, 1]
# with 1s on the diagonal
gram.sort(axis=1) # sort rows
gram = gram[:, ::-1] # in descending order
gram = gram[:, 1:] # skip the first column of 1s (diagonal elements)
gram = gram.cumsum(axis=1) # cumsum rows
mu1 = gram.max(axis=0)
spark = np.nonzero(mu1 > 1)[0][0] + 2
return CoherenceSpark(mu1, spark)
def _quiz4():
mat = np.reshape([16, -2, 15, 13, 5, 6, 8, 8, 9, 4, 11, 12, 4, 12, 10, 1],
(4, 4))
print(mutual_coherence(mat))
print(babel(mat))
if __name__ == '__main__':
_quiz4()
|
[
"collections.namedtuple",
"numpy.reshape",
"math.ceil",
"numpy.fill_diagonal",
"numpy.nonzero",
"numpy.linalg.norm"
] |
[((615, 667), 'collections.namedtuple', 'namedtuple', (['"""CoherenceSpark"""', "('coherence', 'spark')"], {}), "('CoherenceSpark', ('coherence', 'spark'))\n", (625, 667), False, 'from collections import namedtuple\n'), ((1712, 1737), 'numpy.fill_diagonal', 'np.fill_diagonal', (['gram', '(0)'], {}), '(gram, 0)\n', (1728, 1737), True, 'import numpy as np\n'), ((1770, 1791), 'math.ceil', 'math.ceil', (['(1 + 1 / mu)'], {}), '(1 + 1 / mu)\n', (1779, 1791), False, 'import math\n'), ((3506, 3582), 'numpy.reshape', 'np.reshape', (['[16, -2, 15, 13, 5, 6, 8, 8, 9, 4, 11, 12, 4, 12, 10, 1]', '(4, 4)'], {}), '([16, -2, 15, 13, 5, 6, 8, 8, 9, 4, 11, 12, 4, 12, 10, 1], (4, 4))\n', (3516, 3582), True, 'import numpy as np\n'), ((1646, 1673), 'numpy.linalg.norm', 'np.linalg.norm', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (1660, 1673), True, 'import numpy as np\n'), ((3007, 3034), 'numpy.linalg.norm', 'np.linalg.norm', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (3021, 3034), True, 'import numpy as np\n'), ((3412, 3431), 'numpy.nonzero', 'np.nonzero', (['(mu1 > 1)'], {}), '(mu1 > 1)\n', (3422, 3431), True, 'import numpy as np\n')]
|
"""
Created on Sun Feb 12 11:51:29 2017
@author: <NAME>
Class: Computer Architecture
Language Python 2.7
Input an array of hex-instructions, and return a of decoded MIPS instructions (e.g. 7a078 ADD $2, $9, $8).
Instruction types de-constructed in this assignment are ADD, AND, OR, SLT, SUB, BEQ, BNE, LW, and SW.
"""
import numpy as np
hex_instructions = [0x022da822, 0x8ef30018, 0x12a70004, 0x02689820,
0xad930018, 0x02697824, 0xad8ffff4, 0x018c6020,
0x02a4a825, 0x158ffff6, 0x8ef9fff0]
# Vectorize hex_instructions as a numpy array so it can be printed in hex format
A = np.array(hex_instructions)
vhex = np.vectorize(hex)
def deconstruct(x):
# Start the address at 4 less than the actual target address, since the PC will be incremented at the begining of each loop iteration
address = int("7a05c", 16)
print
print
print
"The entered hex instructions are :" + str(vhex(A))
print
print
"---------------------------------------------------------------------"
print
"The deconstructed MIPS instructions are:"
print
"---------------------------------------------------------------------"
print
instruction = 0
for x in hex_instructions:
opcode = 0
address += 4 # Increment the PC address for each entry (Branchse are assumed to fail)
"""The for loop will pass each 32-bit instruction through a series of bitwise & maskes that
will isolate a given range. A shift will follow each range-set"""
bits1_32 = bin((x & 0b1111111111111111111111111111111))
bits1_6 = bin((x & 0b11111100000000000000000000000000) >> 26)
bits7_11 = bin((x & 0b00000011111000000000000000000000) >> 21)
bits12_16 = bin((x & 0b00000000000111110000000000000000) >> 16)
bits17_21 = bin((x & 0b00000000000000001111100000000000) >> 11)
bits22_26 = bin((x & 0b00000000000000000000011111000000) >> 6)
bits27_32 = bin((x & 0b00000000000000000000000000111111) >> 0)
bits17_32 = bin((x & 0b00000000000000001111111111111111) >> 0)
bit17 = bin((x & 0b00000000000000001000000000000000) >> 15)
"""A block of if statements conditionally identify each instruction type, by evaluating the contents
of specific bitfields. The first 5 instruction types (ADD, AND, OR, SLT AND SUB) are evaluated by """
if bits1_6 == "0b0" and bits27_32 == "0b100000":
opcode = "ADD"
elif bits1_6 == "0b0" and bits27_32 == "0b100100":
opcode = "AND"
elif bits1_6 == "0b0" and bits27_32 == "0b100101":
opcode = "OR"
elif bits1_6 == "0b0" and bits27_32 == "0b101010":
opcode = "SLT"
elif bits1_6 == "0b0" and bits27_32 == "0b100010":
opcode = "SUB"
elif bits1_6 == "0b100":
opcode = "BEQ"
elif bits1_6 == "0b101":
opcode = "BNE"
elif bits1_6 == "0b100011":
opcode = "LW"
elif bits1_6 == "0b101011":
opcode = "SW"
else:
print
"No opcode found"
"""Once an instruction type has been identified, the bitfields will be used construct an assembly
instruction. Each instruction's rs, rt, rd and offset is isolated according to its instruction
type, and using these binary values, an instruction type will be formed using concatenated strings"""
if opcode == "ADD":
rs = bits7_11
rt = bits12_16
rd = bits17_21
x = bits22_26
offset = 0
func = bits27_32
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rd, 2)) + ", " + "$" + str(
int(rs, 2)) + ", " + "$" + str(int(rt, 2))
elif opcode == "AND":
rs = bits7_11
rt = bits12_16
rd = bits17_21
x = bits22_26
offset = 0
func = bits27_32
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rd, 2)) + ", " + "$" + str(
int(rs, 2)) + ", " + "$" + str(int(rt, 2))
elif opcode == "OR":
rs = bits7_11
rt = bits12_16
rd = bits17_21
x = bits22_26
offset = 0
func = bits27_32
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rd, 2)) + ", " + "$" + str(
int(rs, 2)) + ", " + "$" + str(int(rt, 2))
elif opcode == "SLT":
rs = bits7_11
rt = bits12_16
rd = bits17_21
x = bits22_26
offset = 0
func = bits27_32
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rd, 2)) + ", " + "$" + str(
int(rs, 2)) + ", " + "$" + str(int(rt, 2))
elif opcode == "SUB":
rs = bits7_11
rt = bits12_16
rd = bits17_21
x = bits22_26
offset = 0
func = bits27_32
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rd, 2)) + ", " + "$" + str(
int(rs, 2)) + ", " + "$" + str(int(rt, 2))
elif opcode == "BEQ":
rs = bits7_11
rt = bits12_16
offset = int(bits17_32, 2)
if bit17 == '0b1':
mask = 2 ** ((len((offset)[2:])) - 1)
offset = -(int(offset, 2) & mask) + (int(offset, 2) & ~mask)
new_address = (address) + (offset * 4)
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rs, 2)) + ", " + "$" + str(
int(rt, 2)) + ", " + format(new_address, '02x')
else:
new_address = (address) + (offset * 4)
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rs, 2)) + ", " + "$" + str(
int(rt, 2)) + ", " + format(new_address, '02x')
elif opcode == "BNE":
rs = bits7_11
rt = bits12_16
offset = bits17_32
if bit17 == '0b1':
mask = 2 ** ((len((offset)[2:])) - 1)
offset = -(int(offset, 2) & mask) + (int(offset, 2) & ~mask)
new_address = (address) + (offset * 4)
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rs, 2)) + ", " + "$" + str(
int(rt, 2)) + ", " + format(new_address, '02x')
else:
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rs, 2)) + ", " + "$" + str(
int(rt, 2)) + ", " + format(new_address, '02x')
new_address = (address) + (offset * 4)
elif opcode == "LW":
rs = bits7_11
rt = bits12_16
offset = bits17_32
if bit17 == '0b1':
mask = 2 ** ((len((offset)[2:])) - 1)
offset = -(int(offset, 2) & mask) + (int(offset, 2) & ~mask)
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rt, 2)) + ", " + str(
offset) + ", " + "(" + str(int(rs, 2)) + ")"
else:
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rt, 2)) + ", " + str(
int(offset, 2)) + ", " + "(" + str(int(rs, 2)) + ")"
elif opcode == "SW":
rs = bits7_11
rt = bits12_16
offset = bits17_32
if bit17 == '0b1':
mask = 2 ** ((len((offset)[2:])) - 1)
offset = -(int(offset, 2) & mask) + (int(offset, 2) & ~mask)
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rt, 2)) + ", " + str(
offset) + ", " + "(" + str(int(rs, 2)) + ")"
else:
instruction = format(address, '02x') + ": " + str(opcode) + " $" + str(int(rt, 2)) + ", " + str(
int(offset, 2)) + ", " + "(" + str(int(rs, 2)) + ")"
"""A print instruction set within the for loop will print out the assembly instruction produced by
each iteration through the hex_instructions list. As currently set, the list will only print out the
compiled instruction for each hex input, however un-quoting the
"""
print
instruction
# Unquote the print block belowhere to enable troubleshooting (showing bit fields after each instruction)
"""
print "--------------------------------------------------------------"
print "Address :" + format(address, '02x')
print "bits 1:32 = " + bits1_32
print "bits 1:6 = " + bits1_6
print "bits 7:11 = " + bits7_11
print "bits 12:16 = " + bits12_16
print "bits 17:21 = " + bits17_21
print "bits 22:26 = " + bits22_26
print "bits 27:32 = " + bits27_32
print "bits 17:32 = " + bits17_32
print "bit 17 =" + bit17
print "offset = " + str(offset)
print type(offset)
print "--------------------------------------------------------------"
"""
# Calling the function on the hex_instructions list will execute the defined program.
deconstruct(hex_instructions)
|
[
"numpy.array",
"numpy.vectorize"
] |
[((642, 668), 'numpy.array', 'np.array', (['hex_instructions'], {}), '(hex_instructions)\n', (650, 668), True, 'import numpy as np\n'), ((677, 694), 'numpy.vectorize', 'np.vectorize', (['hex'], {}), '(hex)\n', (689, 694), True, 'import numpy as np\n')]
|
import sklearn.datasets
import sklearn.model_selection
import sklearn.linear_model
import numpy
import compare_auc_delong_xu
import unittest
import scipy.stats
class TestIris(unittest.TestCase):
@classmethod
def setUpClass(cls):
data = sklearn.datasets.load_iris()
x_train, x_test, y_train, cls.y_test = sklearn.model_selection.train_test_split(
data.data, (data.target == 1).astype(numpy.int), test_size=0.8, random_state=42)
cls.predictions = sklearn.linear_model.LogisticRegression(solver="lbfgs").fit(
x_train, y_train).predict_proba(x_test)[:, 1]
cls.sklearn_auc = sklearn.metrics.roc_auc_score(cls.y_test, cls.predictions)
def test_variance_const(self):
auc, variance = compare_auc_delong_xu.delong_roc_variance(self.y_test, self.predictions)
numpy.testing.assert_allclose(self.sklearn_auc, auc)
numpy.testing.assert_allclose(0.0015359814789736538, variance)
class TestGauss(unittest.TestCase):
x_distr = scipy.stats.norm(0.5, 1)
y_distr = scipy.stats.norm(-0.5, 1)
def test_variance(self):
sample_size_x = 7
sample_size_y = 14
n_trials = 50000
aucs = numpy.empty(n_trials)
variances = numpy.empty(n_trials)
numpy.random.seed(1234235)
labels = numpy.concatenate([numpy.ones(sample_size_x), numpy.zeros(sample_size_y)])
for trial in range(n_trials):
scores = numpy.concatenate([
self.x_distr.rvs(sample_size_x),
self.y_distr.rvs(sample_size_y)])
aucs[trial] = sklearn.metrics.roc_auc_score(labels, scores)
auc_delong, variances[trial] = compare_auc_delong_xu.delong_roc_variance(
labels, scores)
numpy.testing.assert_allclose(aucs[trial], auc_delong)
numpy.testing.assert_allclose(variances.mean(), aucs.var(), rtol=0.1)
|
[
"numpy.ones",
"numpy.testing.assert_allclose",
"compare_auc_delong_xu.delong_roc_variance",
"numpy.zeros",
"numpy.empty",
"numpy.random.seed"
] |
[((758, 830), 'compare_auc_delong_xu.delong_roc_variance', 'compare_auc_delong_xu.delong_roc_variance', (['self.y_test', 'self.predictions'], {}), '(self.y_test, self.predictions)\n', (799, 830), False, 'import compare_auc_delong_xu\n'), ((839, 891), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['self.sklearn_auc', 'auc'], {}), '(self.sklearn_auc, auc)\n', (868, 891), False, 'import numpy\n'), ((900, 962), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['(0.0015359814789736538)', 'variance'], {}), '(0.0015359814789736538, variance)\n', (929, 962), False, 'import numpy\n'), ((1203, 1224), 'numpy.empty', 'numpy.empty', (['n_trials'], {}), '(n_trials)\n', (1214, 1224), False, 'import numpy\n'), ((1245, 1266), 'numpy.empty', 'numpy.empty', (['n_trials'], {}), '(n_trials)\n', (1256, 1266), False, 'import numpy\n'), ((1275, 1301), 'numpy.random.seed', 'numpy.random.seed', (['(1234235)'], {}), '(1234235)\n', (1292, 1301), False, 'import numpy\n'), ((1687, 1744), 'compare_auc_delong_xu.delong_roc_variance', 'compare_auc_delong_xu.delong_roc_variance', (['labels', 'scores'], {}), '(labels, scores)\n', (1728, 1744), False, 'import compare_auc_delong_xu\n'), ((1774, 1828), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['aucs[trial]', 'auc_delong'], {}), '(aucs[trial], auc_delong)\n', (1803, 1828), False, 'import numpy\n'), ((1338, 1363), 'numpy.ones', 'numpy.ones', (['sample_size_x'], {}), '(sample_size_x)\n', (1348, 1363), False, 'import numpy\n'), ((1365, 1391), 'numpy.zeros', 'numpy.zeros', (['sample_size_y'], {}), '(sample_size_y)\n', (1376, 1391), False, 'import numpy\n')]
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
"""
import pytest
import os
import tarfile
from pathlib import Path
import nibabel as nib
import numpy as np
from ....tests.resource import setup as setuptestresources
from ....resource import get as getresource
from ..flame1 import flame1
from ...fixes import FLAMEO as FSLFLAMEO
from nipype.interfaces import fsl, ants
from nipype.pipeline import engine as pe
from templateflow.api import get as get_template
from ...imagemaths.merge import _merge, _merge_mask
from ...stats.model import _group_model
from ....utils import first
@pytest.fixture(scope="module")
def wakemandg_hensonrn(tmp_path_factory):
tmp_path = tmp_path_factory.mktemp(basename="wakemandg_hensonrn")
os.chdir(str(tmp_path))
setuptestresources()
inputtarpath = getresource("wakemandg_hensonrn_statmaps.tar.gz")
with tarfile.open(inputtarpath) as fp:
fp.extractall(tmp_path)
subjects = [f"{i+1:02d}" for i in range(16)]
suffixes = ["stat-effect_statmap", "stat-variance_statmap", "mask"]
data = {
suffix: [
tmp_path / f"sub-{subject}_task-faces_feature-taskBased_taskcontrast-facesGtScrambled_model-aggregateTaskBasedAcrossRuns_contrast-intercept_{suffix}.nii.gz"
for subject in subjects
]
for suffix in suffixes
}
data.update({
"subjects": subjects,
"spreadsheet": tmp_path / "subjects_age_sex.csv",
})
return data
@pytest.fixture(scope="module")
def mni_downsampled(tmp_path_factory):
tmp_path = tmp_path_factory.mktemp(basename="mni_downsampled")
os.chdir(str(tmp_path))
tpl = get_template("MNI152NLin2009cAsym", resolution=2, desc="brain", suffix="mask")
result = ants.ResampleImageBySpacing(
dimension=3,
input_image=tpl,
out_spacing=(6, 6, 6)
).run()
return result.outputs.output_image
@pytest.fixture(scope="module")
def wakemandg_hensonrn_downsampled(tmp_path_factory, wakemandg_hensonrn, mni_downsampled):
tmp_path = tmp_path_factory.mktemp(basename="wakemandg_hensonrn_downsampled")
os.chdir(str(tmp_path))
data = dict()
def _downsample(in_file):
result = ants.ApplyTransforms(
dimension=3,
input_image_type=0,
input_image=in_file,
reference_image=mni_downsampled,
interpolation="NearestNeighbor",
transforms=["identity"]
).run()
return result.outputs.output_image
for k, v in wakemandg_hensonrn.items():
if isinstance(v, list):
data[k] = [_downsample(f) if Path(f).exists() else f for f in v]
else:
data[k] = v
return data
@pytest.mark.timeout(600)
@pytest.mark.parametrize("use_var_cope", [False, True])
def test_FLAME1(tmp_path, wakemandg_hensonrn_downsampled, use_var_cope):
os.chdir(str(tmp_path))
# prepare
data = wakemandg_hensonrn_downsampled
cope_files = data["stat-effect_statmap"]
var_cope_files = data["stat-variance_statmap"]
mask_files = data["mask"]
subjects = data["subjects"]
spreadsheet_file = data["spreadsheet"]
regressors, contrasts, _ = _group_model(
subjects=subjects,
spreadsheet=spreadsheet_file,
variabledicts=[
{"name": "Sub", "type": "id"},
{"name": "Age", "type": "continuous"},
{"name": "ReactionTime", "type": "categorical"},
],
contrastdicts=[
{"variable": ["Age"], "type": "infer"},
{"variable": ["ReactionTime"], "type": "infer"}
]
)
# run FSL
merge_cope_file = _merge(cope_files, "t")
merge_var_cope_file = _merge(var_cope_files, "t")
merge_mask_file = _merge_mask(mask_files)
workflow = pe.Workflow("comparison", base_dir=str(tmp_path))
multipleregressdesign = pe.Node(
fsl.MultipleRegressDesign(
regressors=regressors,
contrasts=contrasts,
),
name="multipleregressdesign",
)
flameo = pe.Node(
FSLFLAMEO(
run_mode="flame1",
cope_file=merge_cope_file,
mask_file=merge_mask_file,
),
name="flameo"
)
if use_var_cope:
flameo.inputs.var_cope_file = merge_var_cope_file
workflow.connect(multipleregressdesign, "design_mat", flameo, "design_file")
workflow.connect(multipleregressdesign, "design_con", flameo, "t_con_file")
workflow.connect(multipleregressdesign, "design_fts", flameo, "f_con_file")
workflow.connect(multipleregressdesign, "design_grp", flameo, "cov_split_file")
execgraph = workflow.run()
# retrieve flameo again
for node in execgraph.nodes():
if node.name == "flameo":
flameo = node
result = flameo.result
r0 = dict(
cope=result.outputs.copes[0],
tstat=result.outputs.tstats[0],
fstat=first(result.outputs.fstats),
tdof=result.outputs.tdof[0],
)
# run halfpipe
if use_var_cope:
var_cope_files_or_none = var_cope_files
else:
var_cope_files_or_none = None
result = flame1(
cope_files=cope_files,
var_cope_files=var_cope_files_or_none,
mask_files=mask_files,
regressors=regressors,
contrasts=contrasts,
num_threads=1,
)
r1 = dict(
cope=result["copes"][0],
tstat=result["tstats"][0],
fstat=result["fstats"][2],
tdof=result["tdof"][0],
)
# compare
mask = nib.load(merge_mask_file).get_fdata() > 0
for k in set(r0.keys()) & set(r1.keys()):
a0 = nib.load(r0[k]).get_fdata()[mask]
a1 = nib.load(r1[k]).get_fdata()[mask]
# weak criteria, determined post-hoc
# we don't expect exactly identical results, because FSL and numpy
# use different numerics code and we use double precision while FSL
# uses single precision floating point
# so these assertions are here to verify that the small differences
# will not get any larger with future changes or optimizations
# no more than one percent of voxels can be more than one percent different
assert np.isclose(a0, a1, rtol=1e-2).mean() > 0.99, f"Too many diverging voxels for {k}"
# mean error average needs to be below 0.05
assert np.abs(a0 - a1).mean() < 0.05, f"Too high mean error average for {k}"
|
[
"nipype.interfaces.ants.ApplyTransforms",
"numpy.abs",
"tarfile.open",
"numpy.isclose",
"nibabel.load",
"pathlib.Path",
"templateflow.api.get",
"nipype.interfaces.ants.ResampleImageBySpacing",
"pytest.mark.parametrize",
"nipype.interfaces.fsl.MultipleRegressDesign",
"pytest.fixture",
"pytest.mark.timeout"
] |
[((683, 713), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (697, 713), False, 'import pytest\n'), ((1566, 1596), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1580, 1596), False, 'import pytest\n'), ((1996, 2026), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2010, 2026), False, 'import pytest\n'), ((2806, 2830), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(600)'], {}), '(600)\n', (2825, 2830), False, 'import pytest\n'), ((2832, 2886), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""use_var_cope"""', '[False, True]'], {}), "('use_var_cope', [False, True])\n", (2855, 2886), False, 'import pytest\n'), ((1743, 1821), 'templateflow.api.get', 'get_template', (['"""MNI152NLin2009cAsym"""'], {'resolution': '(2)', 'desc': '"""brain"""', 'suffix': '"""mask"""'}), "('MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask')\n", (1755, 1821), True, 'from templateflow.api import get as get_template\n'), ((960, 986), 'tarfile.open', 'tarfile.open', (['inputtarpath'], {}), '(inputtarpath)\n', (972, 986), False, 'import tarfile\n'), ((3974, 4043), 'nipype.interfaces.fsl.MultipleRegressDesign', 'fsl.MultipleRegressDesign', ([], {'regressors': 'regressors', 'contrasts': 'contrasts'}), '(regressors=regressors, contrasts=contrasts)\n', (3999, 4043), False, 'from nipype.interfaces import fsl, ants\n'), ((1836, 1921), 'nipype.interfaces.ants.ResampleImageBySpacing', 'ants.ResampleImageBySpacing', ([], {'dimension': '(3)', 'input_image': 'tpl', 'out_spacing': '(6, 6, 6)'}), '(dimension=3, input_image=tpl, out_spacing=(6, 6, 6)\n )\n', (1863, 1921), False, 'from nipype.interfaces import fsl, ants\n'), ((2296, 2469), 'nipype.interfaces.ants.ApplyTransforms', 'ants.ApplyTransforms', ([], {'dimension': '(3)', 'input_image_type': '(0)', 'input_image': 'in_file', 'reference_image': 'mni_downsampled', 'interpolation': '"""NearestNeighbor"""', 'transforms': "['identity']"}), "(dimension=3, input_image_type=0, input_image=in_file,\n reference_image=mni_downsampled, interpolation='NearestNeighbor',\n transforms=['identity'])\n", (2316, 2469), False, 'from nipype.interfaces import fsl, ants\n'), ((5625, 5650), 'nibabel.load', 'nib.load', (['merge_mask_file'], {}), '(merge_mask_file)\n', (5633, 5650), True, 'import nibabel as nib\n'), ((5727, 5742), 'nibabel.load', 'nib.load', (['r0[k]'], {}), '(r0[k])\n', (5735, 5742), True, 'import nibabel as nib\n'), ((5774, 5789), 'nibabel.load', 'nib.load', (['r1[k]'], {}), '(r1[k])\n', (5782, 5789), True, 'import nibabel as nib\n'), ((6299, 6328), 'numpy.isclose', 'np.isclose', (['a0', 'a1'], {'rtol': '(0.01)'}), '(a0, a1, rtol=0.01)\n', (6309, 6328), True, 'import numpy as np\n'), ((6449, 6464), 'numpy.abs', 'np.abs', (['(a0 - a1)'], {}), '(a0 - a1)\n', (6455, 6464), True, 'import numpy as np\n'), ((2712, 2719), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (2716, 2719), False, 'from pathlib import Path\n')]
|
from __future__ import annotations
import mmap
import threading
from enum import Enum
from itertools import product
from pathlib import Path
from typing import (
TYPE_CHECKING,
Optional,
Sequence,
Set,
Sized,
SupportsInt,
Union,
cast,
overload,
)
import numpy as np
from ._util import AXIS, VoxelSize, get_reader, is_supported_file
from .structures import Attributes, ExpLoop, FrameMetadata, Metadata, XYPosLoop
try:
from functools import cached_property
except ImportError:
cached_property = property # type: ignore
if TYPE_CHECKING:
from typing import Any, Dict, List, Tuple
import dask.array as da
import xarray as xr
from typing_extensions import Literal
Index = Union[int, slice]
class ReadMode(str, Enum):
MMAP = "mmap"
SDK = "sdk"
class ND2File:
_memmap: mmap.mmap
_is_legacy: bool
def __init__(
self,
path: Union[Path, str],
validate_frames: bool = False,
search_window: int = 100,
) -> None:
"""Open an nd2 file.
Parameters
----------
path : Union[Path, str]
Filename of an nd2 file.
validate_frames : bool
Whether to verify (and attempt to fix) frames whose positions have been
shifted relative to the predicted offset (i.e. in a corrupted file).
This comes at a slight performance penalty at file open, but may "rescue"
some corrupt files. by default False.
search_window : int
When validate_frames is true, this is the search window (in KB) that will
be used to try to find the actual chunk position. by default 100 KB
"""
self._path = str(path)
self._rdr = get_reader(
self._path, validate_frames=validate_frames, search_window=search_window
)
self._closed = False
self._is_legacy = "Legacy" in type(self._rdr).__name__
self._lock = threading.RLock()
@staticmethod
def is_supported_file(path) -> bool:
return is_supported_file(path)
@property
def path(self):
"""Path of the image."""
return self._path
@property
def is_legacy(self) -> bool:
"""Whether file is a legacy nd2 (JPEG2000) file."""
return self._is_legacy
def open(self) -> None:
"""open file for reading."""
if self.closed:
self._rdr.open()
self._closed = False
def close(self) -> None:
"""Close file (may cause segfault if read when closed in some cases)."""
if not self.closed:
self._rdr.close()
self._closed = True
@property
def closed(self) -> bool:
"""Whether the file is closed."""
return self._closed
def __enter__(self) -> ND2File:
self.open()
return self
def __exit__(self, *_) -> None:
self.close()
def __getstate__(self):
state = self.__dict__.copy()
del state["_rdr"]
del state["_lock"]
return state
def __setstate__(self, d):
self.__dict__ = d
self._lock = threading.RLock()
self._rdr = get_reader(self._path)
if self._closed:
self._rdr.close()
@cached_property
def attributes(self) -> Attributes:
"""Core image attributes"""
return self._rdr.attributes
@cached_property
def text_info(self) -> Dict[str, Any]:
"""Misc text info."""
return self._rdr.text_info()
@cached_property
def experiment(self) -> List[ExpLoop]:
"""Loop information for each nd axis"""
return self._rdr.experiment()
@cached_property
def metadata(self) -> Union[Metadata, dict]:
"""Various metadata (will be dict if legacy format)."""
return self._rdr.metadata()
def frame_metadata(
self, seq_index: Union[int, tuple]
) -> Union[FrameMetadata, dict]:
"""Metadata for specific frame.
This includes the global metadata from the metadata function.
(will be dict if legacy format).
Parameters
----------
seq_index : Union[int, tuple]
frame index
Returns
-------
Union[FrameMetadata, dict]
dict if legacy format, else FrameMetadata
"""
idx = cast(
int,
self._seq_index_from_coords(seq_index)
if isinstance(seq_index, tuple)
else seq_index,
)
return self._rdr.frame_metadata(idx)
@cached_property
def custom_data(self) -> Dict[str, Any]:
"""Dict of various unstructured custom metadata."""
return self._rdr._custom_data()
@cached_property
def ndim(self) -> int:
"""number of dimensions"""
return len(self.shape)
@cached_property
def shape(self) -> Tuple[int, ...]:
"""size of each axis"""
return self._coord_shape + self._frame_shape
@cached_property
def sizes(self) -> Dict[str, int]:
"""names and sizes for each axis"""
attrs = self.attributes
dims = {AXIS._MAP[c[1]]: c[2] for c in self._rdr._coord_info()}
dims[AXIS.CHANNEL] = (
dims.pop(AXIS.CHANNEL)
if AXIS.CHANNEL in dims
else (attrs.channelCount or 1)
)
dims[AXIS.Y] = attrs.heightPx
dims[AXIS.X] = attrs.widthPx or -1
if self.components_per_channel == 3: # rgb
dims[AXIS.RGB] = self.components_per_channel
else:
# if not exactly 3 channels, throw them all into monochrome channels
dims[AXIS.CHANNEL] = attrs.componentCount
return {k: v for k, v in dims.items() if v != 1}
@property
def is_rgb(self) -> bool:
"""Whether the image is rgb"""
return self.components_per_channel in (3, 4)
@property
def components_per_channel(self) -> int:
"""Number of components per channel (e.g. 3 for rgb)"""
attrs = cast(Attributes, self.attributes)
return attrs.componentCount // (attrs.channelCount or 1)
@property
def size(self) -> int:
"""Total number of pixels in the volume."""
return int(np.prod(self.shape))
@property
def nbytes(self) -> int:
"""Total bytes of image data."""
return self.size * self.dtype.itemsize
@cached_property
def dtype(self) -> np.dtype:
"""Image data type"""
attrs = self.attributes
d = attrs.pixelDataType[0] if attrs.pixelDataType else "u"
return np.dtype(f"{d}{attrs.bitsPerComponentInMemory // 8}")
def voxel_size(self, channel: int = 0) -> VoxelSize:
"""XYZ voxel size.
Parameters
----------
channel : int
Channel for which to retrieve voxel info, by default 0
Returns
-------
VoxelSize
Named tuple with attrs `x`, `y`, and `z`.
"""
return VoxelSize(*self._rdr.voxel_size())
def asarray(self, position: Optional[int] = None) -> np.ndarray:
"""Read image into numpy array.
Parameters
----------
position : int, optional
A specific XY position to extract, by default (None) reads all.
Returns
-------
np.ndarray
Raises
------
ValueError
if `position` is a string and is not a valid position name
IndexError
if `position` is provided and is out of range
"""
final_shape = list(self.shape)
if position is None:
seqs: Sequence[int] = range(self._frame_count)
else:
if isinstance(position, str):
try:
position = self._position_names().index(position)
except ValueError as e:
raise ValueError(
f"{position!r} is not a valid position name"
) from e
try:
pidx = list(self.sizes).index(AXIS.POSITION)
except ValueError as exc:
if position > 0:
raise IndexError(
f"Position {position} is out of range. "
f"Only 1 position available"
) from exc
seqs = range(self._frame_count)
else:
if position >= self.sizes[AXIS.POSITION]:
raise IndexError(
f"Position {position} is out of range. "
f"Only {self.sizes[AXIS.POSITION]} positions available"
)
ranges: List[Union[range, tuple]] = [
range(x) for x in self._coord_shape
]
ranges[pidx] = (position,)
coords = list(zip(*product(*ranges)))
seqs = self._seq_index_from_coords(coords) # type: ignore
final_shape[pidx] = 1
arr: np.ndarray = np.stack([self._get_frame(i) for i in seqs])
return arr.reshape(final_shape)
def __array__(self) -> np.ndarray:
"""array protocol"""
return self.asarray()
def to_dask(self, wrapper=True, copy=True) -> da.Array:
"""Create dask array (delayed reader) representing image.
This generally works well, but it remains to be seen whether performance
is optimized, or if we're duplicating safety mechanisms. You may try
various combinations of `wrapper` and `copy`, setting both to `False`
will very likely cause segmentation faults in many cases. But setting
one of them to `False`, may slightly improve read speed in certain
cases.
Parameters
----------
wrapper : bool
If True (the default), the returned obect will be a thin subclass of
a :class:`dask.array.Array` (an
`ResourceBackedDaskArray`) that manages the opening
and closing of this file when getting chunks via compute(). If `wrapper`
is `False`, then a pure `da.Array` will be returned. However, when that
array is computed, it will incur a file open/close on *every* chunk
that is read (in the `_dask_block` method). As such `wrapper`
will generally be much faster, however, it *may* fail (i.e. result in
segmentation faults) with certain dask schedulers.
copy : bool
If `True` (the default), the dask chunk-reading function will return
an array copy. This can avoid segfaults in certain cases, though it
may also add overhead.
Returns
-------
da.Array
"""
from dask.array import map_blocks
chunks = [(1,) * x for x in self._coord_shape]
chunks += [(x,) for x in self._frame_shape]
dask_arr = map_blocks(
self._dask_block,
copy=copy,
chunks=chunks,
dtype=self.dtype,
)
if wrapper:
from resource_backed_dask_array import ResourceBackedDaskArray
# this subtype allows the dask array to re-open the underlying
# nd2 file on compute.
return ResourceBackedDaskArray.from_array(dask_arr, self)
return dask_arr
_NO_IDX = -1
def _seq_index_from_coords(
self, coords: Sequence
) -> Union[Sequence[int], SupportsInt]:
if not self._coord_shape:
return self._NO_IDX
return np.ravel_multi_index(coords, self._coord_shape)
def _dask_block(self, copy: bool, block_id: Tuple[int]) -> np.ndarray:
if isinstance(block_id, np.ndarray):
return
with self._lock:
was_closed = self.closed
if self.closed:
self.open()
try:
ncoords = len(self._coord_shape)
idx = self._seq_index_from_coords(block_id[:ncoords])
if idx == self._NO_IDX:
if any(block_id):
raise ValueError(
f"Cannot get chunk {block_id} for single frame image."
)
idx = 0
data = self._get_frame(cast(int, idx))
data = data.copy() if copy else data
return data[(np.newaxis,) * ncoords]
finally:
if was_closed:
self.close()
def to_xarray(
self,
delayed: bool = True,
squeeze: bool = True,
position: Optional[int] = None,
copy: bool = True,
) -> xr.DataArray:
"""Create labeled xarray representing image.
`array.dims` will be populated according to image metadata, and coordinates
will be populated based on pixel spacings. Additional metadata is available
in `array.attrs['metadata']`.
Parameters
----------
delayed : bool
Whether the DataArray should be backed by dask array or numpy array,
by default True (dask).
squeeze : bool
Whether to squeeze singleton dimensions, by default True
position : int, optional
A specific XY position to extract, by default (None) reads all.
copy : bool
Only applies when `delayed==True`. See `to_dask` for details.
Returns
-------
xr.DataArray
xarray with all axes labeled.
"""
import xarray as xr
data = self.to_dask(copy=copy) if delayed else self.asarray(position)
dims = list(self.sizes)
coords = self._expand_coords(squeeze)
if not squeeze:
for missing_dim in set(coords).difference(dims):
dims.insert(0, missing_dim)
missing_axes = len(dims) - data.ndim
if missing_axes > 0:
data = data[(np.newaxis,) * missing_axes]
if position is not None and not delayed and AXIS.POSITION in coords:
# if it's delayed, we do this using isel below instead.
coords[AXIS.POSITION] = [coords[AXIS.POSITION][position]]
x = xr.DataArray(
data,
dims=dims,
coords=coords,
attrs={
"metadata": {
"metadata": self.metadata,
"experiment": self.experiment,
"attributes": self.attributes,
"text_info": self.text_info,
}
},
)
if delayed and position is not None and AXIS.POSITION in coords:
x = x.isel({AXIS.POSITION: [position]})
return x.squeeze() if squeeze else x
@property
def _frame_coords(self) -> Set[str]:
return {AXIS.X, AXIS.Y, AXIS.CHANNEL, AXIS.RGB}
@property
def _raw_frame_shape(self) -> Tuple[int, int, int, int]:
"""sizes of each frame coordinate, prior to reshape"""
attr = self.attributes
return (
attr.heightPx,
attr.widthPx or -1,
attr.channelCount or 1,
self.components_per_channel,
)
@property
def _frame_shape(self) -> Tuple[int, ...]:
"""sizes of each frame coordinate, after reshape & squeeze"""
return tuple(v for k, v in self.sizes.items() if k in self._frame_coords)
@cached_property
def _coord_shape(self) -> Tuple[int, ...]:
"""sizes of each *non-frame* coordinate"""
return tuple(v for k, v in self.sizes.items() if k not in self._frame_coords)
@property
def _frame_count(self) -> int:
return int(np.prod(self._coord_shape))
def _get_frame(self, index: int) -> np.ndarray:
frame = self._rdr._read_image(index)
frame.shape = self._raw_frame_shape
return frame.transpose((2, 0, 1, 3)).squeeze()
def _expand_coords(self, squeeze: bool = True) -> dict:
"""Return a dict that can be used as the coords argument to xr.DataArray
Parameters
----------
squeeze : bool
whether to squeeze axes with length < 2, by default True
Returns
-------
dict
dict of axis name -> coordinates
"""
dx, dy, dz = self.voxel_size()
coords: Dict[str, Sized] = {
AXIS.Y: np.arange(self.attributes.heightPx) * dy,
AXIS.X: np.arange(self.attributes.widthPx or 1) * dx,
AXIS.CHANNEL: self._channel_names,
AXIS.POSITION: ["XYPos:0"], # maybe overwritten below
}
for c in self.experiment:
if squeeze and c.count <= 1:
continue
if c.type == "ZStackLoop":
coords[AXIS.Z] = np.arange(c.count) * c.parameters.stepUm
elif c.type == "TimeLoop":
coords[AXIS.TIME] = np.arange(c.count) * c.parameters.periodMs
elif c.type == "NETimeLoop":
pers = [np.arange(p.count) * p.periodMs for p in c.parameters.periods]
coords[AXIS.TIME] = np.hstack(pers)
elif c.type == "XYPosLoop":
coords[AXIS._MAP["XYPosLoop"]] = self._position_names(c)
if self.components_per_channel > 1:
coords[AXIS.RGB] = ["Red", "Green", "Blue", "alpha"][
: self.components_per_channel
]
# fix for Z axis missing from experiment:
if AXIS.Z in self.sizes and AXIS.Z not in coords:
coords[AXIS.Z] = np.arange(self.sizes[AXIS.Z]) * dz
if squeeze:
return {k: v for k, v in coords.items() if len(v) > 1}
return coords
def _position_names(self, loop: Optional[XYPosLoop] = None) -> List[str]:
if loop is None:
for c in self.experiment:
if c.type == "XYPosLoop":
loop = c
break
if loop is None:
return ["XYPos:0"]
return [p.name or f"XYPos:{i}" for i, p in enumerate(loop.parameters.points)]
@property
def _channel_names(self) -> List[str]:
return self._rdr.channel_names()
def __repr__(self) -> str:
try:
details = " (closed)" if self.closed else f" {self.dtype}: {self.sizes!r}"
extra = f": {Path(self.path).name!r}{details}"
except Exception:
extra = ""
return f"<ND2File at {hex(id(self))}{extra}>"
@overload
def imread(
file: Union[Path, str],
dask: Literal[False] = False,
xarray: Literal[False] = False,
validate_frames: bool = False,
) -> np.ndarray:
...
@overload
def imread(
file: Union[Path, str],
dask: bool = ...,
xarray: Literal[True] = True,
validate_frames: bool = False,
) -> xr.DataArray:
...
@overload
def imread(
file: Union[Path, str],
dask: Literal[True] = ...,
xarray=False,
validate_frames: bool = False,
) -> da.Array:
...
def imread(
file: Union[Path, str],
dask: bool = False,
xarray: bool = False,
validate_frames: bool = False,
):
"""Open `file`, return requested array type, and close `file`.
Parameters
----------
file : Union[Path, str]
Filepath (`str`) or `Path` object to ND2 file.
dask : bool
If `True`, returns a (delayed) `dask.array.Array`. This will avoid reading
any data from disk until specifically requested by using `.compute()` or
casting to a numpy array with `np.asarray()`. By default `False`.
xarray : bool
If `True`, returns an `xarray.DataArray`, `array.dims` will be populated
according to image metadata, and coordinates will be populated based on pixel
spacings. Additional metadata is available in `array.attrs['metadata']`.
If `dask` is also `True`, will return an xarray backed by a delayed dask array.
By default `False`.
validate_frames : bool
Whether to verify (and attempt to fix) frames whose positions have been
shifted relative to the predicted offset (i.e. in a corrupted file).
This comes at a slight performance penalty at file open, but may "rescue"
some corrupt files. by default False.
Returns
-------
Union[np.ndarray, dask.array.Array, xarray.DataArray]
Array subclass, depending on arguments used.
"""
with ND2File(file, validate_frames=validate_frames) as nd2:
if xarray:
return nd2.to_xarray(delayed=dask)
elif dask:
return nd2.to_dask()
else:
return nd2.asarray()
|
[
"numpy.prod",
"numpy.hstack",
"pathlib.Path",
"numpy.ravel_multi_index",
"dask.array.map_blocks",
"threading.RLock",
"itertools.product",
"resource_backed_dask_array.ResourceBackedDaskArray.from_array",
"xarray.DataArray",
"numpy.dtype",
"typing.cast",
"numpy.arange"
] |
[((1978, 1995), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1993, 1995), False, 'import threading\n'), ((3150, 3167), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (3165, 3167), False, 'import threading\n'), ((6028, 6061), 'typing.cast', 'cast', (['Attributes', 'self.attributes'], {}), '(Attributes, self.attributes)\n', (6032, 6061), False, 'from typing import TYPE_CHECKING, Optional, Sequence, Set, Sized, SupportsInt, Union, cast, overload\n'), ((6592, 6645), 'numpy.dtype', 'np.dtype', (['f"""{d}{attrs.bitsPerComponentInMemory // 8}"""'], {}), "(f'{d}{attrs.bitsPerComponentInMemory // 8}')\n", (6600, 6645), True, 'import numpy as np\n'), ((10911, 10983), 'dask.array.map_blocks', 'map_blocks', (['self._dask_block'], {'copy': 'copy', 'chunks': 'chunks', 'dtype': 'self.dtype'}), '(self._dask_block, copy=copy, chunks=chunks, dtype=self.dtype)\n', (10921, 10983), False, 'from dask.array import map_blocks\n'), ((11550, 11597), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['coords', 'self._coord_shape'], {}), '(coords, self._coord_shape)\n', (11570, 11597), True, 'import numpy as np\n'), ((14206, 14399), 'xarray.DataArray', 'xr.DataArray', (['data'], {'dims': 'dims', 'coords': 'coords', 'attrs': "{'metadata': {'metadata': self.metadata, 'experiment': self.experiment,\n 'attributes': self.attributes, 'text_info': self.text_info}}"}), "(data, dims=dims, coords=coords, attrs={'metadata': {'metadata':\n self.metadata, 'experiment': self.experiment, 'attributes': self.\n attributes, 'text_info': self.text_info}})\n", (14218, 14399), True, 'import xarray as xr\n'), ((6240, 6259), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (6247, 6259), True, 'import numpy as np\n'), ((11268, 11318), 'resource_backed_dask_array.ResourceBackedDaskArray.from_array', 'ResourceBackedDaskArray.from_array', (['dask_arr', 'self'], {}), '(dask_arr, self)\n', (11302, 11318), False, 'from resource_backed_dask_array import ResourceBackedDaskArray\n'), ((15683, 15709), 'numpy.prod', 'np.prod', (['self._coord_shape'], {}), '(self._coord_shape)\n', (15690, 15709), True, 'import numpy as np\n'), ((16381, 16416), 'numpy.arange', 'np.arange', (['self.attributes.heightPx'], {}), '(self.attributes.heightPx)\n', (16390, 16416), True, 'import numpy as np\n'), ((16443, 16482), 'numpy.arange', 'np.arange', (['(self.attributes.widthPx or 1)'], {}), '(self.attributes.widthPx or 1)\n', (16452, 16482), True, 'import numpy as np\n'), ((17547, 17576), 'numpy.arange', 'np.arange', (['self.sizes[AXIS.Z]'], {}), '(self.sizes[AXIS.Z])\n', (17556, 17576), True, 'import numpy as np\n'), ((12289, 12303), 'typing.cast', 'cast', (['int', 'idx'], {}), '(int, idx)\n', (12293, 12303), False, 'from typing import TYPE_CHECKING, Optional, Sequence, Set, Sized, SupportsInt, Union, cast, overload\n'), ((16786, 16804), 'numpy.arange', 'np.arange', (['c.count'], {}), '(c.count)\n', (16795, 16804), True, 'import numpy as np\n'), ((16902, 16920), 'numpy.arange', 'np.arange', (['c.count'], {}), '(c.count)\n', (16911, 16920), True, 'import numpy as np\n'), ((17109, 17124), 'numpy.hstack', 'np.hstack', (['pers'], {}), '(pers)\n', (17118, 17124), True, 'import numpy as np\n'), ((18329, 18344), 'pathlib.Path', 'Path', (['self.path'], {}), '(self.path)\n', (18333, 18344), False, 'from pathlib import Path\n'), ((8868, 8884), 'itertools.product', 'product', (['*ranges'], {}), '(*ranges)\n', (8875, 8884), False, 'from itertools import product\n'), ((17010, 17028), 'numpy.arange', 'np.arange', (['p.count'], {}), '(p.count)\n', (17019, 17028), True, 'import numpy as np\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet model for classifying images from CIFAR-10 dataset.
Support single-host training with one or multiple devices.
ResNet as proposed in:
<NAME>, <NAME>, <NAME>, <NAME>
Deep Residual Learning for Image Recognition. arXiv:1512.03385
CIFAR-10 as in:
http://www.cs.toronto.edu/~kriz/cifar.html
"""
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import functools
import itertools
import os
# Silence tf for prettier logging of Bayesian Optimization
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
from bayes_opt import BayesianOptimization
from bayes_opt import UtilityFunction
import cifar10
import cifar10_model
import cifar10_utils
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# import tensorflow_addons as tfa
# Setting verbosity to INFO will log training and evaluation details.
tf.logging.set_verbosity(tf.logging.ERROR)
import ray
from ray.tune import run, Trainable
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.bayesopt import BayesOptSearch
import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-dir",
type=str,
required=True,
help="The directory where the CIFAR-10 input data is stored.",
)
parser.add_argument(
"--job-dir",
type=str,
required=True,
help="The directory where the model will be stored.",
)
parser.add_argument(
"--variable-strategy",
choices=["CPU", "GPU"],
type=str,
default="CPU",
help="Where to locate variable operations",
)
parser.add_argument(
"--num-gpus",
type=int,
default=1,
help="The number of gpus used. Uses only CPU if set to 0.",
)
parser.add_argument(
"--num-layers",
type=int,
default=20,
help="The number of layers of the model.",
)
parser.add_argument(
"--train-steps",
type=int,
default=80000,
help="The number of steps to use for training.",
)
# parser.add_argument(
# "--train-batch-size",
# type=int,
# default=128,
# help="Batch size for training.",
# )
parser.add_argument(
"--eval-batch-size",
type=int,
default=500,
help="Batch size for validation.",
)
parser.add_argument(
"--num-batches-for-eval",
type=int,
default=10,
help="Number of batches for validation.",
)
# parser.add_argument(
# "--momentum",
# type=float,
# default=0.9,
# help="Momentum for MomentumOptimizer.",
# )
# parser.add_argument(
# "--weight-decay",
# type=float,
# default=2e-4,
# help="Weight decay for convolutions.",
# )
# parser.add_argument(
# "--learning-rate",
# type=float,
# default=0.1,
# help="""\
# This is the inital learning rate value. The learning rate will decrease
# during training. For more details check the model_fn implementation in
# this file.\
# """,
# )
parser.add_argument(
"--use-distortion-for-training",
type=bool,
default=True,
help="If doing image distortion for training.",
)
parser.add_argument(
"--sync",
action="store_true",
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""",
)
parser.add_argument(
"--num-intra-threads",
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""",
)
parser.add_argument(
"--num-inter-threads",
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""",
)
parser.add_argument(
"--data-format",
type=str,
default=None,
help="""\
If not set, the data format best for the training device is used.
Allowed values: channels_first (NCHW) channels_last (NHWC).\
""",
)
parser.add_argument(
"--log-device-placement",
action="store_true",
default=False,
help="Whether to log device placement.",
)
# parser.add_argument(
# "--batch-norm-decay",
# type=float,
# default=0.997,
# help="Decay for batch norm.",
# )
# parser.add_argument(
# "--batch-norm-epsilon",
# type=float,
# default=1e-5,
# help="Epsilon for batch norm.",
# )
# Add arguments related to BayesOpt
parser.add_argument(
"--smoke-test",
action="store_true",
default=False,
help="Finish quickly for testing",
)
# parser.add_argument(
# "--verbose", type=bool, default=False, help="Verbose output of training."
# )
parser.add_argument(
"--strategy",
type=str,
default="proposed",
help="Strategy for discretizing. Possible options are: basic, proposed.",
)
parser.add_argument(
"--metric",
type=str,
default="accuracy",
help="""\
Whether to use accuracy or loss for Bayesian optimization.\
""",
)
# TODO: better name?
parser.add_argument(
"--precision",
type=int,
default=1000,
help="""\
Size of grid\
""",
)
parser.add_argument(
"--log-path",
type=str,
default=os.getcwd() + "/train.log",
help="""
""",
)
parser.add_argument(
"--ray-address",
type=str,
default="",
help="""
""",
)
args = parser.parse_args()
# Filling in shared values here
hparams = {}
hparams["num_layers"] = args.num_layers
hparams["eval_batch_size"] = args.eval_batch_size
hparams["sync"] = args.sync
hparams["num_inter_threads"] = args.num_inter_threads
hparams["data_format"] = args.data_format
def get_model_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build the resnet model."""
def _resnet_model_fn(features, labels, mode, params):
"""Resnet model body.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
labels: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
A EstimatorSpec object.
"""
is_training = mode == tf.estimator.ModeKeys.TRAIN
weight_decay = params.weight_decay
momentum = params.momentum
tower_features = features
tower_labels = labels
tower_losses = []
tower_gradvars = []
tower_preds = []
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params.data_format
if not data_format:
if num_gpus == 0:
data_format = "channels_last"
else:
data_format = "channels_first"
if num_gpus == 0:
num_devices = 1
device_type = "cpu"
else:
num_devices = num_gpus
device_type = "gpu"
for i in range(num_devices):
worker_device = "/{}:{}".format(device_type, i)
if variable_strategy == "CPU":
device_setter = cifar10_utils.local_device_setter(
worker_device=worker_device
)
elif variable_strategy == "GPU":
device_setter = cifar10_utils.local_device_setter(
ps_device_type="gpu",
worker_device=worker_device,
ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(
num_gpus, tf.contrib.training.byte_size_load_fn
),
)
with tf.variable_scope("resnet", reuse=bool(i != 0)):
with tf.name_scope("tower_%d" % i) as name_scope:
with tf.device(device_setter):
loss, gradvars, preds = _tower_fn(
is_training,
weight_decay,
tower_features[i],
tower_labels[i],
data_format,
params.num_layers,
params.batch_norm_decay,
params.batch_norm_epsilon,
)
tower_losses.append(loss)
tower_gradvars.append(gradvars)
tower_preds.append(preds)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, name_scope
)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope("gradient_averaging"):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(
tf.add_n(grads), 1.0 / len(grads)
)
gradvars.append((avg_grad, var))
# Device that runs the ops to apply global gradient updates.
consolidation_device = (
"/gpu:0" if variable_strategy == "GPU" else "/cpu:0"
)
with tf.device(consolidation_device):
# Suggested learning rate scheduling from
# https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar10-resnet.py#L155
num_batches_per_epoch = cifar10.Cifar10DataSet.num_examples_per_epoch(
"train"
) // (
params.train_batch_size * num_workers
)
boundaries = [
num_batches_per_epoch * x
for x in np.array([80, 120, 160], dtype=np.int64)
]
staged_lr = [
params.learning_rate * x for x in [1, 0.1, 0.01, 0.001]
]
learning_rate = tf.train.piecewise_constant(
tf.train.get_global_step(), boundaries, staged_lr
)
loss = tf.reduce_mean(tower_losses, name="loss")
# examples_sec_hook = cifar10_utils.ExamplesPerSecondHook(
# params.train_batch_size, every_n_steps=10
# )
# tensors_to_log = {"learning_rate": learning_rate, "loss": loss}
# logging_hook = tf.train.LoggingTensorHook(
# tensors=tensors_to_log, every_n_iter=100
# )
# train_hooks = [logging_hook, examples_sec_hook]
train_hooks = []
# Hyper-parameter "momentum" is only used for the Momentum Optimizer
# Other optimizers use their default parameters.
if params.optimizer == "momentum":
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum
)
elif params.optimizer == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif params.optimizer == "adagrad":
optimizer = tf.train.AdagradOptimizer(
learning_rate=learning_rate
)
elif params.optimizer == "adadelta":
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=learning_rate
)
elif params.optimizer == "sgd":
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate
)
elif params.optimizer == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(
learning_rate=learning_rate
)
else:
raise ValueError("unrecognized optimizer name")
# TODO: RAdam is implemented in tensorflow-addons v0.6, which requires tf 2.0
# Upgrade code by removing tf.contrib modules.
# optimizer = tfa.optimizers.RectifiedAdam(lr=learning_rate)
if params.sync:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=num_workers
)
sync_replicas_hook = optimizer.make_session_run_hook(
params.is_chief
)
train_hooks.append(sync_replicas_hook)
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=tf.train.get_global_step()
)
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions = {
"classes": tf.concat(
[p["classes"] for p in tower_preds], axis=0
),
"probabilities": tf.concat(
[p["probabilities"] for p in tower_preds], axis=0
),
}
stacked_labels = tf.concat(labels, axis=0)
metrics = {
"accuracy": tf.metrics.accuracy(
stacked_labels, predictions["classes"]
)
}
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
training_hooks=train_hooks,
eval_metric_ops=metrics,
)
return _resnet_model_fn
def _tower_fn(
is_training,
weight_decay,
feature,
label,
data_format,
num_layers,
batch_norm_decay,
batch_norm_epsilon,
):
"""Build computation tower (Resnet).
Args:
is_training: true if is training graph.
weight_decay: weight regularization strength, a float.
feature: a Tensor.
label: a Tensor.
data_format: channels_last (NHWC) or channels_first (NCHW).
num_layers: number of layers, an int.
batch_norm_decay: decay for batch normalization, a float.
batch_norm_epsilon: epsilon for batch normalization, a float.
Returns:
A tuple with the loss for the tower, the gradients and parameters, and
predictions.
"""
model = cifar10_model.ResNetCifar10(
num_layers,
batch_norm_decay=batch_norm_decay,
batch_norm_epsilon=batch_norm_epsilon,
is_training=is_training,
data_format=data_format,
)
logits = model.forward_pass(feature, input_data_format="channels_last")
tower_pred = {
"classes": tf.argmax(input=logits, axis=1),
"probabilities": tf.nn.softmax(logits),
}
tower_loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=label
)
tower_loss = tf.reduce_mean(tower_loss)
model_params = tf.trainable_variables()
tower_loss += weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params]
)
tower_grad = tf.gradients(tower_loss, model_params)
return tower_loss, zip(tower_grad, model_params), tower_pred
def input_fn(
data_dir, subset, num_shards, batch_size, use_distortion_for_training=True
):
"""Create input graph for model.
Args:
data_dir: Directory where TFRecords representing the dataset are located.
subset: one of 'train', 'validation' and 'eval'.
num_shards: num of towers participating in data-parallel training.
batch_size: total batch size for training to be divided by the number of
shards.
use_distortion_for_training: True to use distortions.
Returns:
two lists of tensors for features and labels, each of num_shards length.
"""
with tf.device("/cpu:0"):
use_distortion = subset == "train" and use_distortion_for_training
dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
image_batch, label_batch = dataset.make_batch(batch_size)
if num_shards <= 1:
# No GPU available or only 1 GPU.
return [image_batch], [label_batch]
# Note that passing num=batch_size is safe here, even though
# dataset.batch(batch_size) can, in some cases, return fewer than batch_size
# examples. This is because it does so only when repeating for a limited
# number of epochs, but our dataset repeats forever.
image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
feature_shards = [[] for i in range(num_shards)]
label_shards = [[] for i in range(num_shards)]
for i in xrange(batch_size):
idx = i % num_shards
feature_shards[idx].append(image_batch[i])
label_shards[idx].append(label_batch[i])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
label_shards = [tf.parallel_stack(x) for x in label_shards]
return feature_shards, label_shards
def build_estimator(
data_dir,
num_gpus,
variable_strategy,
run_config,
hparams,
use_distortion_for_training=True,
ws=None,
):
"""Returns an Experiment function.
Experiments perform training on several workers in parallel,
in other words experiments know how to invoke train and eval in a sensible
fashion for distributed training. Arguments passed directly to this
function are not tunable, all other arguments should be passed within
tf.HParams, passed to the enclosed function.
Args:
data_dir: str. Location of the data for input_fns.
num_gpus: int. Number of GPUs on each worker.
variable_strategy: String. CPU to use CPU as the parameter server
and GPU to use the GPUs as the parameter server.
use_distortion_for_training: bool. See cifar10.Cifar10DataSet.
Returns:
A function (tf.estimator.RunConfig, tf.contrib.training.HParams) ->
tf.contrib.learn.Experiment.
Suitable for use by tf.contrib.learn.learn_runner, which will run various
methods on Experiment (train, evaluate) based on information
about the current runner in `run_config`.
"""
# Create estimator.
train_input_fn = functools.partial(
input_fn,
data_dir,
subset="train",
num_shards=num_gpus,
batch_size=hparams.train_batch_size,
use_distortion_for_training=use_distortion_for_training,
)
eval_input_fn = functools.partial(
input_fn,
data_dir,
subset="validation",
batch_size=hparams.eval_batch_size,
num_shards=num_gpus,
)
# validation: 5000, eval:10000
num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch(
"validation"
)
if num_eval_examples % hparams.eval_batch_size != 0:
raise ValueError(
"validation set size must be multiple of eval_batch_size"
)
classifier = tf.estimator.Estimator(
model_fn=get_model_fn(
num_gpus, variable_strategy, run_config.num_worker_replicas or 1
),
config=run_config,
params=hparams,
warm_start_from=ws,
)
return train_input_fn, eval_input_fn, classifier
def get_idx(pbounds, names):
param_names = list(pbounds.keys())
param_names.sort()
param_list = [0] * len(param_names)
for i in range(len(param_names)):
if param_names[i] in names:
param_list[i] = 1
return param_list
class MyTrainableEstimator(Trainable):
def _setup(self, config):
# The env variable is on deprecation path, default is set to off.
os.environ["TF_SYNC_ON_FINISH"] = "0"
os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "1"
# Session configuration.
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=args.log_device_placement,
intra_op_parallelism_threads=args.num_intra_threads,
gpu_options=tf.GPUOptions(
force_gpu_compatible=True, allow_growth=True
),
)
# Convert to actual hyperparameter values here using the grid (discrete) input
hparams["train_batch_size"] = 2 ** (int(config["batch_size"]) + 5)
hparams["momentum"] = 0.4 + (
0.55 * int(config["momentum"]) / args.precision
)
hparams["weight_decay"] = 1e-4 + (
1e-4 * int(config["weight_decay"]) / args.precision
)
hparams["batch_norm_decay"] = 0.8 + (
0.199 * int(config["batch_norm_decay"]) / args.precision
)
hparams["batch_norm_epsilon"] = 1e-5 + (
0.00099 * int(config["batch_norm_epsilon"]) / args.precision
)
hparams["learning_rate"] = 0.01 + (
0.1 * int(config["learning_rate"]) / args.precision
)
opt = int(config["optimizer"])
if opt == 0:
hparams["optimizer"] = "momentum"
elif opt == 1:
hparams["optimizer"] = "adam"
elif opt == 2:
hparams["optimizer"] = "adagrad"
elif opt == 3:
hparams["optimizer"] = "adadelta"
elif opt == 4:
hparams["optimizer"] = "sgd"
else:
hparams["optimizer"] = "rmsprop"
# Calculate number of steps per one epoch
self.train_steps = cifar10.Cifar10DataSet.num_examples_per_epoch(
"train"
) // (hparams["train_batch_size"])
# TODO: Fix checkpoint dir
run_config = cifar10_utils.RunConfig(
session_config=sess_config,
model_dir=None,
save_checkpoints_secs=None,
save_checkpoints_steps=self.train_steps,
keep_checkpoint_max=None,
keep_checkpoint_every_n_hours=None,
)
self.run_config = run_config
self.train_input_fn, self.eval_input_fn, self.estimator = build_estimator(
data_dir=args.data_dir,
num_gpus=args.num_gpus,
variable_strategy=args.variable_strategy,
use_distortion_for_training=args.use_distortion_for_training,
run_config=run_config,
hparams=tf.contrib.training.HParams(
is_chief=run_config.is_chief, **hparams
),
)
self.logger = logging.getLogger("metrics")
self.logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(args.log_path)
self.logger.addHandler(file_handler)
self.logger.info(f"[CONFIG] ID={self._experiment_id} config={hparams}")
# self.steps = self.train_steps
def _train(self):
self.estimator.train(
input_fn=self.train_input_fn, steps=self.train_steps
)
metrics = self.estimator.evaluate(
input_fn=self.eval_input_fn,
steps=args.eval_batch_size * args.num_batches_for_eval,
)
# self.steps = self.steps + self.train_steps
self.logger.info(
f"[RESULT] ID={self._experiment_id} iter={self._iteration} result={metrics}"
)
return metrics
def _stop(self):
self.estimator = None
def _save(self, checkpoint_dir):
lastest_checkpoint = self.estimator.latest_checkpoint()
tf.logging.info(
"Saving checkpoint {} for tune".format(lastest_checkpoint)
)
f = open(checkpoint_dir + "/path.txt", "w")
f.write(lastest_checkpoint)
f.flush()
f.close()
return checkpoint_dir + "/path.txt"
def _restore(self, checkpoint_path):
f = open(checkpoint_path, "r")
path = f.readline().strip()
tf.logging.info("Opening checkpoint {} for tune".format(path))
f.flush()
f.close()
ws = tf.estimator.WarmStartSettings(ckpt_to_initialize_from=path)
self.train_input_fn, self.eval_input_fn, self.estimator = build_estimator(
data_dir=args.data_dir,
num_gpus=args.num_gpus,
variable_strategy=args.variable_strategy,
use_distortion_for_training=args.use_distortion_for_training,
run_config=self.run_config,
hparams=tf.contrib.training.HParams(
is_chief=self.run_config.is_chief, **hparams
),
warm_start_from=ws,
)
def main():
# print(args)
# Minor hack of generating a grid of 100 values each.
# By setting all parameters to be discrete values over range (0,100),
# we can map each integer value to corresponding hyperparameter value in training code.
pbounds = {
"batch_size": (0, 6),
"momentum": (0, args.precision),
"weight_decay": (0, args.precision),
"batch_norm_decay": (0, args.precision),
"batch_norm_epsilon": (0, args.precision),
"learning_rate": (0, args.precision),
"optimizer": (0, 6),
}
discrete = [
"batch_size",
"momentum",
"weight_decay",
"batch_norm_decay",
"batch_norm_epsilon",
"learning_rate",
"optimizer",
]
categorical = []
discrete_indices = get_idx(pbounds, discrete)
categorical_indices = get_idx(pbounds, categorical)
train_spec = {
"resources_per_trial": {"cpu": 12, "gpu": 1},
"stop": {
"accuracy": 93,
"training_iteration": 2 if args.smoke_test else 99999,
},
"config": {
"exp": "ckpt", # the name of directory where training results are saved
"log_level": "ERROR",
},
"num_samples": 100000,
"local_dir": "/home/ddoyoon/BayesianOptimization/examples/cnn/cifar10_estimator/ckpt",
"checkpoint_at_end": True,
}
algo = BayesOptSearch(
args.strategy,
pbounds,
discrete=discrete_indices,
categorical=categorical_indices,
max_concurrent=12,
metric="accuracy",
mode="max",
utility_kwargs={"kind": "ucb", "kappa": 2.5, "xi": 0.0},
)
# TODO: Initial values will not be discretized as of now.
# Manually probing with discrete values instead.
# algo.optimizer.probe(
# params={
# "batch_size": 0,
# "momentum": 0,
# "weight_decay": 0,
# "batch_norm_decay": 0,
# "batch_norm_epsilon": 0,
# "learning_rate": 0,
# },
# lazy=True,
# )
scheduler = AsyncHyperBandScheduler(
metric="accuracy",
mode="max",
max_t=200,
grace_period=20,
reduction_factor=2,
)
experiment_start = datetime.datetime.utcnow()
logger = logging.getLogger("metrics")
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(args.log_path)
logger.addHandler(file_handler)
logger.info(f"[ TIME ] start={experiment_start}")
run(
MyTrainableEstimator,
name="bo_resnet_cifar10",
search_alg=algo,
scheduler=scheduler,
**train_spec,
)
experiment_end = datetime.datetime.utcnow()
experiment_duration = experiment_end - experiment_start
logger.info(f"[ TIME ] end={experiment_end}")
logger.info(
f"[ TIME ] end-to-end (min)={experiment_duration.total_seconds() / 60}"
)
if __name__ == "__main__":
if args.ray_address != "":
ray.init(redis_address=args.ray_address, logging_level=logging.ERROR)
else:
ray.init()
if args.num_gpus > 0:
assert tf.test.is_gpu_available(), "Requested GPUs but none found."
if args.num_gpus < 0:
raise ValueError(
'Invalid GPU count: "--num-gpus" must be 0 or a positive integer.'
)
if args.num_gpus == 0 and args.variable_strategy == "GPU":
raise ValueError(
"num-gpus=0, CPU must be used as parameter server. Set"
"--variable-strategy=CPU."
)
if (args.num_layers - 2) % 6 != 0:
raise ValueError("Invalid --num-layers parameter.")
main()
|
[
"logging.getLogger",
"ray.tune.suggest.bayesopt.BayesOptSearch",
"tensorflow.unstack",
"itertools.chain",
"cifar10.Cifar10DataSet",
"tensorflow.logging.set_verbosity",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.gradients",
"tensorflow.group",
"numpy.array",
"six.moves.xrange",
"tensorflow.nn.softmax",
"tensorflow.reduce_mean",
"tensorflow.parallel_stack",
"ray.init",
"tensorflow.GPUOptions",
"tensorflow.contrib.training.GreedyLoadBalancingStrategy",
"ray.tune.schedulers.AsyncHyperBandScheduler",
"argparse.ArgumentParser",
"tensorflow.test.is_gpu_available",
"cifar10_utils.local_device_setter",
"tensorflow.metrics.accuracy",
"tensorflow.estimator.WarmStartSettings",
"tensorflow.concat",
"tensorflow.train.get_global_step",
"logging.FileHandler",
"tensorflow.train.SyncReplicasOptimizer",
"tensorflow.trainable_variables",
"tensorflow.train.AdamOptimizer",
"tensorflow.device",
"tensorflow.train.MomentumOptimizer",
"cifar10_model.ResNetCifar10",
"tensorflow.nn.l2_loss",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.train.AdadeltaOptimizer",
"cifar10.Cifar10DataSet.num_examples_per_epoch",
"six.iteritems",
"cifar10_utils.RunConfig",
"tensorflow.train.RMSPropOptimizer",
"datetime.datetime.utcnow",
"os.getcwd",
"tensorflow.train.AdagradOptimizer",
"tensorflow.argmax",
"tensorflow.add_n",
"functools.partial",
"tensorflow.name_scope",
"tensorflow.contrib.training.HParams",
"tensorflow.losses.sparse_softmax_cross_entropy",
"ray.tune.run",
"tensorflow.get_collection"
] |
[((1608, 1650), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (1632, 1650), True, 'import tensorflow as tf\n'), ((1891, 1916), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1914, 1916), False, 'import argparse\n'), ((16042, 16213), 'cifar10_model.ResNetCifar10', 'cifar10_model.ResNetCifar10', (['num_layers'], {'batch_norm_decay': 'batch_norm_decay', 'batch_norm_epsilon': 'batch_norm_epsilon', 'is_training': 'is_training', 'data_format': 'data_format'}), '(num_layers, batch_norm_decay=batch_norm_decay,\n batch_norm_epsilon=batch_norm_epsilon, is_training=is_training,\n data_format=data_format)\n', (16069, 16213), False, 'import cifar10_model\n'), ((16472, 16539), 'tensorflow.losses.sparse_softmax_cross_entropy', 'tf.losses.sparse_softmax_cross_entropy', ([], {'logits': 'logits', 'labels': 'label'}), '(logits=logits, labels=label)\n', (16510, 16539), True, 'import tensorflow as tf\n'), ((16571, 16597), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_loss'], {}), '(tower_loss)\n', (16585, 16597), True, 'import tensorflow as tf\n'), ((16618, 16642), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (16640, 16642), True, 'import tensorflow as tf\n'), ((16759, 16797), 'tensorflow.gradients', 'tf.gradients', (['tower_loss', 'model_params'], {}), '(tower_loss, model_params)\n', (16771, 16797), True, 'import tensorflow as tf\n'), ((20008, 20185), 'functools.partial', 'functools.partial', (['input_fn', 'data_dir'], {'subset': '"""train"""', 'num_shards': 'num_gpus', 'batch_size': 'hparams.train_batch_size', 'use_distortion_for_training': 'use_distortion_for_training'}), "(input_fn, data_dir, subset='train', num_shards=num_gpus,\n batch_size=hparams.train_batch_size, use_distortion_for_training=\n use_distortion_for_training)\n", (20025, 20185), False, 'import functools\n'), ((20253, 20373), 'functools.partial', 'functools.partial', (['input_fn', 'data_dir'], {'subset': '"""validation"""', 'batch_size': 'hparams.eval_batch_size', 'num_shards': 'num_gpus'}), "(input_fn, data_dir, subset='validation', batch_size=\n hparams.eval_batch_size, num_shards=num_gpus)\n", (20270, 20373), False, 'import functools\n'), ((20476, 20535), 'cifar10.Cifar10DataSet.num_examples_per_epoch', 'cifar10.Cifar10DataSet.num_examples_per_epoch', (['"""validation"""'], {}), "('validation')\n", (20521, 20535), False, 'import cifar10\n'), ((27538, 27751), 'ray.tune.suggest.bayesopt.BayesOptSearch', 'BayesOptSearch', (['args.strategy', 'pbounds'], {'discrete': 'discrete_indices', 'categorical': 'categorical_indices', 'max_concurrent': '(12)', 'metric': '"""accuracy"""', 'mode': '"""max"""', 'utility_kwargs': "{'kind': 'ucb', 'kappa': 2.5, 'xi': 0.0}"}), "(args.strategy, pbounds, discrete=discrete_indices,\n categorical=categorical_indices, max_concurrent=12, metric='accuracy',\n mode='max', utility_kwargs={'kind': 'ucb', 'kappa': 2.5, 'xi': 0.0})\n", (27552, 27751), False, 'from ray.tune.suggest.bayesopt import BayesOptSearch\n'), ((28246, 28352), 'ray.tune.schedulers.AsyncHyperBandScheduler', 'AsyncHyperBandScheduler', ([], {'metric': '"""accuracy"""', 'mode': '"""max"""', 'max_t': '(200)', 'grace_period': '(20)', 'reduction_factor': '(2)'}), "(metric='accuracy', mode='max', max_t=200,\n grace_period=20, reduction_factor=2)\n", (28269, 28352), False, 'from ray.tune.schedulers import AsyncHyperBandScheduler\n'), ((28420, 28446), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (28444, 28446), False, 'import datetime\n'), ((28460, 28488), 'logging.getLogger', 'logging.getLogger', (['"""metrics"""'], {}), "('metrics')\n", (28477, 28488), False, 'import logging\n'), ((28542, 28576), 'logging.FileHandler', 'logging.FileHandler', (['args.log_path'], {}), '(args.log_path)\n', (28561, 28576), False, 'import logging\n'), ((28672, 28779), 'ray.tune.run', 'run', (['MyTrainableEstimator'], {'name': '"""bo_resnet_cifar10"""', 'search_alg': 'algo', 'scheduler': 'scheduler'}), "(MyTrainableEstimator, name='bo_resnet_cifar10', search_alg=algo,\n scheduler=scheduler, **train_spec)\n", (28675, 28779), False, 'from ray.tune import run, Trainable\n'), ((28845, 28871), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (28869, 28871), False, 'import datetime\n'), ((1825, 1856), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (1842, 1856), False, 'import logging\n'), ((15026, 15175), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'predictions': 'predictions', 'loss': 'loss', 'train_op': 'train_op', 'training_hooks': 'train_hooks', 'eval_metric_ops': 'metrics'}), '(mode=mode, predictions=predictions, loss=loss,\n train_op=train_op, training_hooks=train_hooks, eval_metric_ops=metrics)\n', (15052, 15175), True, 'import tensorflow as tf\n'), ((16367, 16398), 'tensorflow.argmax', 'tf.argmax', ([], {'input': 'logits', 'axis': '(1)'}), '(input=logits, axis=1)\n', (16376, 16398), True, 'import tensorflow as tf\n'), ((16425, 16446), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (16438, 16446), True, 'import tensorflow as tf\n'), ((17494, 17513), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (17503, 17513), True, 'import tensorflow as tf\n'), ((17608, 17664), 'cifar10.Cifar10DataSet', 'cifar10.Cifar10DataSet', (['data_dir', 'subset', 'use_distortion'], {}), '(data_dir, subset, use_distortion)\n', (17630, 17664), False, 'import cifar10\n'), ((18172, 18219), 'tensorflow.unstack', 'tf.unstack', (['image_batch'], {'num': 'batch_size', 'axis': '(0)'}), '(image_batch, num=batch_size, axis=0)\n', (18182, 18219), True, 'import tensorflow as tf\n'), ((18242, 18289), 'tensorflow.unstack', 'tf.unstack', (['label_batch'], {'num': 'batch_size', 'axis': '(0)'}), '(label_batch, num=batch_size, axis=0)\n', (18252, 18289), True, 'import tensorflow as tf\n'), ((18419, 18437), 'six.moves.xrange', 'xrange', (['batch_size'], {}), '(batch_size)\n', (18425, 18437), False, 'from six.moves import xrange\n'), ((23323, 23529), 'cifar10_utils.RunConfig', 'cifar10_utils.RunConfig', ([], {'session_config': 'sess_config', 'model_dir': 'None', 'save_checkpoints_secs': 'None', 'save_checkpoints_steps': 'self.train_steps', 'keep_checkpoint_max': 'None', 'keep_checkpoint_every_n_hours': 'None'}), '(session_config=sess_config, model_dir=None,\n save_checkpoints_secs=None, save_checkpoints_steps=self.train_steps,\n keep_checkpoint_max=None, keep_checkpoint_every_n_hours=None)\n', (23346, 23529), False, 'import cifar10_utils\n'), ((24114, 24142), 'logging.getLogger', 'logging.getLogger', (['"""metrics"""'], {}), "('metrics')\n", (24131, 24142), False, 'import logging\n'), ((24209, 24243), 'logging.FileHandler', 'logging.FileHandler', (['args.log_path'], {}), '(args.log_path)\n', (24228, 24243), False, 'import logging\n'), ((25567, 25627), 'tensorflow.estimator.WarmStartSettings', 'tf.estimator.WarmStartSettings', ([], {'ckpt_to_initialize_from': 'path'}), '(ckpt_to_initialize_from=path)\n', (25597, 25627), True, 'import tensorflow as tf\n'), ((29154, 29223), 'ray.init', 'ray.init', ([], {'redis_address': 'args.ray_address', 'logging_level': 'logging.ERROR'}), '(redis_address=args.ray_address, logging_level=logging.ERROR)\n', (29162, 29223), False, 'import ray\n'), ((29242, 29252), 'ray.init', 'ray.init', ([], {}), '()\n', (29250, 29252), False, 'import ray\n'), ((29295, 29321), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (29319, 29321), True, 'import tensorflow as tf\n'), ((5946, 5957), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5955, 5957), False, 'import os\n'), ((10175, 10210), 'tensorflow.name_scope', 'tf.name_scope', (['"""gradient_averaging"""'], {}), "('gradient_averaging')\n", (10188, 10210), True, 'import tensorflow as tf\n'), ((10268, 10300), 'itertools.chain', 'itertools.chain', (['*tower_gradvars'], {}), '(*tower_gradvars)\n', (10283, 10300), False, 'import itertools\n'), ((10432, 10456), 'six.iteritems', 'six.iteritems', (['all_grads'], {}), '(all_grads)\n', (10445, 10456), False, 'import six\n'), ((11099, 11130), 'tensorflow.device', 'tf.device', (['consolidation_device'], {}), '(consolidation_device)\n', (11108, 11130), True, 'import tensorflow as tf\n'), ((11903, 11944), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['tower_losses'], {'name': '"""loss"""'}), "(tower_losses, name='loss')\n", (11917, 11944), True, 'import tensorflow as tf\n'), ((14474, 14493), 'tensorflow.group', 'tf.group', (['*train_op'], {}), '(*train_op)\n', (14482, 14493), True, 'import tensorflow as tf\n'), ((14820, 14845), 'tensorflow.concat', 'tf.concat', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (14829, 14845), True, 'import tensorflow as tf\n'), ((18606, 18626), 'tensorflow.parallel_stack', 'tf.parallel_stack', (['x'], {}), '(x)\n', (18623, 18626), True, 'import tensorflow as tf\n'), ((18676, 18696), 'tensorflow.parallel_stack', 'tf.parallel_stack', (['x'], {}), '(x)\n', (18693, 18696), True, 'import tensorflow as tf\n'), ((23156, 23210), 'cifar10.Cifar10DataSet.num_examples_per_epoch', 'cifar10.Cifar10DataSet.num_examples_per_epoch', (['"""train"""'], {}), "('train')\n", (23201, 23210), False, 'import cifar10\n'), ((8198, 8260), 'cifar10_utils.local_device_setter', 'cifar10_utils.local_device_setter', ([], {'worker_device': 'worker_device'}), '(worker_device=worker_device)\n', (8231, 8260), False, 'import cifar10_utils\n'), ((11326, 11380), 'cifar10.Cifar10DataSet.num_examples_per_epoch', 'cifar10.Cifar10DataSet.num_examples_per_epoch', (['"""train"""'], {}), "('train')\n", (11371, 11380), False, 'import cifar10\n'), ((11819, 11845), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (11843, 11845), True, 'import tensorflow as tf\n'), ((12615, 12689), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'learning_rate', 'momentum': 'momentum'}), '(learning_rate=learning_rate, momentum=momentum)\n', (12641, 12689), True, 'import tensorflow as tf\n'), ((13902, 13978), 'tensorflow.train.SyncReplicasOptimizer', 'tf.train.SyncReplicasOptimizer', (['optimizer'], {'replicas_to_aggregate': 'num_workers'}), '(optimizer, replicas_to_aggregate=num_workers)\n', (13932, 13978), True, 'import tensorflow as tf\n'), ((14550, 14604), 'tensorflow.concat', 'tf.concat', (["[p['classes'] for p in tower_preds]"], {'axis': '(0)'}), "([p['classes'] for p in tower_preds], axis=0)\n", (14559, 14604), True, 'import tensorflow as tf\n'), ((14677, 14737), 'tensorflow.concat', 'tf.concat', (["[p['probabilities'] for p in tower_preds]"], {'axis': '(0)'}), "([p['probabilities'] for p in tower_preds], axis=0)\n", (14686, 14737), True, 'import tensorflow as tf\n'), ((14898, 14957), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['stacked_labels', "predictions['classes']"], {}), "(stacked_labels, predictions['classes'])\n", (14917, 14957), True, 'import tensorflow as tf\n'), ((16695, 16711), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (16708, 16711), True, 'import tensorflow as tf\n'), ((21783, 21842), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'force_gpu_compatible': '(True)', 'allow_growth': '(True)'}), '(force_gpu_compatible=True, allow_growth=True)\n', (21796, 21842), True, 'import tensorflow as tf\n'), ((23981, 24049), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'is_chief': 'run_config.is_chief'}), '(is_chief=run_config.is_chief, **hparams)\n', (24008, 24049), True, 'import tensorflow as tf\n'), ((25971, 26044), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'is_chief': 'self.run_config.is_chief'}), '(is_chief=self.run_config.is_chief, **hparams)\n', (25998, 26044), True, 'import tensorflow as tf\n'), ((8783, 8812), 'tensorflow.name_scope', 'tf.name_scope', (["('tower_%d' % i)"], {}), "('tower_%d' % i)\n", (8796, 8812), True, 'import tensorflow as tf\n'), ((10590, 10611), 'tensorflow.device', 'tf.device', (['var.device'], {}), '(var.device)\n', (10599, 10611), True, 'import tensorflow as tf\n'), ((11578, 11618), 'numpy.array', 'np.array', (['[80, 120, 160]'], {'dtype': 'np.int64'}), '([80, 120, 160], dtype=np.int64)\n', (11586, 11618), True, 'import numpy as np\n'), ((12801, 12852), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (12823, 12852), True, 'import tensorflow as tf\n'), ((8853, 8877), 'tensorflow.device', 'tf.device', (['device_setter'], {}), '(device_setter)\n', (8862, 8877), True, 'import tensorflow as tf\n'), ((12929, 12983), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (12954, 12983), True, 'import tensorflow as tf\n'), ((14352, 14378), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (14376, 14378), True, 'import tensorflow as tf\n'), ((8534, 8635), 'tensorflow.contrib.training.GreedyLoadBalancingStrategy', 'tf.contrib.training.GreedyLoadBalancingStrategy', (['num_gpus', 'tf.contrib.training.byte_size_load_fn'], {}), '(num_gpus, tf.contrib.\n training.byte_size_load_fn)\n', (8581, 8635), True, 'import tensorflow as tf\n'), ((9973, 10027), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'name_scope'], {}), '(tf.GraphKeys.UPDATE_OPS, name_scope)\n', (9990, 10027), True, 'import tensorflow as tf\n'), ((10799, 10814), 'tensorflow.add_n', 'tf.add_n', (['grads'], {}), '(grads)\n', (10807, 10814), True, 'import tensorflow as tf\n'), ((13099, 13154), 'tensorflow.train.AdadeltaOptimizer', 'tf.train.AdadeltaOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (13125, 13154), True, 'import tensorflow as tf\n'), ((13265, 13327), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (13298, 13327), True, 'import tensorflow as tf\n'), ((13442, 13496), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (13467, 13496), True, 'import tensorflow as tf\n')]
|
import torch
import cv2 as cv
import numpy as np
from sklearn.neighbors import NearestNeighbors
from .model_utils import spread_feature
def optimize_image_mask(image_mask, sp_image, nK=4, th=1e-2):
mask_pts = image_mask.reshape(-1)
xyz_pts = sp_image.reshape(-1, 3)
xyz_pts = xyz_pts[mask_pts > 0.5, :]
Neighbors = NearestNeighbors(n_neighbors=nK + 1, algorithm='kd_tree').fit(xyz_pts)
nn_dist, nn_idx = Neighbors.kneighbors(xyz_pts) # N,nK
nn_dist = nn_dist[:, 1:]
valid = (np.sum((nn_dist < th).astype(np.float), axis=1) == nK).astype(np.float)
optimized_mask = image_mask.reshape(-1)
optimized_mask[mask_pts > 0.5] = valid
optimized_mask = optimized_mask.reshape(image_mask.shape[0], image_mask.shape[1])
return optimized_mask
def generate_final_mask(image_learned_uv, image_mask,
image_resize_factor, mask_container_low_res, final_gim):
"""
Post Process Algorithm to generate mask of the unwrapped chart
Parameters
----------
image_learned_uv: [H,W,2]
image_mask: [H,W]
image_resize_factor: float
mask_container_low_res: a predefined tensor with intermediate low resolution
final_gim: a predefined tensor with target high resolution
"""
# resize (larger) rgb and uv with Bi-linear up-sampling
resized_uv = cv.resize(image_learned_uv, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = cv.resize(image_mask, dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_LINEAR)
resized_mask = (resized_mask > 0.5).astype(np.float)
# use gradient to remove the edge
discontinuous_mask_u = cv.Laplacian(image_learned_uv[..., 0], ddepth=cv.CV_32F) # small gradient map
discontinuous_mask_v = cv.Laplacian(image_learned_uv[..., 1], ddepth=cv.CV_32F) # small gradient map
# use the max and min in latent u and v to find the threshhold
u_max = (image_learned_uv[..., 0] * image_mask).max()
v_max = (image_learned_uv[..., 1] * image_mask).max()
u_min = (image_learned_uv[..., 0] * image_mask + (1.0 - image_mask)).min()
v_min = (image_learned_uv[..., 1] * image_mask + (1.0 - image_mask)).min()
u_th = (u_max - u_min) / 30
v_th = (v_max - v_min) / 30
discontinuous_mask_u = (discontinuous_mask_u > u_th).astype(np.float) * image_mask
discontinuous_mask_v = (discontinuous_mask_v > v_th).astype(np.float) * image_mask
discontinuous_mask = ((discontinuous_mask_u + discontinuous_mask_v) > 0).astype(np.float)
# use the mask to remove the boundary
boundary_recovery_mask = (cv.Laplacian(image_mask, ddepth=cv.CV_32F) > 0.01).astype(np.float)
discontinuous_mask = discontinuous_mask * (1.0 - boundary_recovery_mask)
resized_discontinuous_mask = cv.resize(discontinuous_mask,
dsize=(image_resize_factor * image_learned_uv.shape[0],
image_resize_factor * image_learned_uv.shape[1]),
interpolation=cv.INTER_NEAREST)
# make the small mask & texture
high_res_mask = torch.from_numpy(resized_mask * (1.0 - resized_discontinuous_mask)) \
.unsqueeze(0).unsqueeze(0).cuda().float() # 1,1,R,R
high_res_uv = torch.from_numpy(resized_uv).permute(2, 0, 1).unsqueeze(0).cuda().float()
low_res_mask = mask_container_low_res.cuda()
low_res_mask = spread_feature(low_res_mask, high_res_uv, high_res_mask, high_res_mask)
# use close to remove the holes in small mask and then resize
low_res_mask_closed = low_res_mask.detach().cpu().squeeze(0).squeeze(0).numpy() # R,R
close_k_size = int(final_gim.shape[2] / 100)
close_kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (close_k_size, close_k_size))
final_mask_np = cv.resize(low_res_mask_closed, dsize=(final_gim.shape[2],
final_gim.shape[2]),
interpolation=cv.INTER_NEAREST) # R,R,3
final_mask_np = (final_mask_np > 0).astype(np.float)
final_mask_np = cv.morphologyEx(final_mask_np, cv.MORPH_OPEN, close_kernel)
return final_mask_np
def generate_texture(sp_image, full_gim, image_rgb, image_mask, final_mask_np, final_res, nK=4, th=1e-2):
# prepare root and query points form the image and from the high-res chart
root_xyz_np = sp_image.reshape(-1, 3) # H*W,3
root_rgb_np = image_rgb.reshape(-1, 3) # H*W,3
_image_mask = image_mask.reshape(-1) # H*W
root_xyz_np = root_xyz_np[_image_mask > 0.5, :] # M,2 [0,1]
root_rgb_np = root_rgb_np[_image_mask > 0.5, :] # M,3 [0,1]
query_xyz_np = full_gim.reshape(-1, 3) # R*R,3
_final_mask_np = final_mask_np.reshape(-1) # R*R
query_xyz_np = query_xyz_np[_final_mask_np > 0.5, :] # N,3 [0,1]
# finding nearest root pixel points
Neighbors = NearestNeighbors(n_neighbors=nK, algorithm='kd_tree').fit(root_xyz_np)
nn_dist, nn_idx = Neighbors.kneighbors(query_xyz_np) # N,nK
# optimize the gim mask
valid = (nn_dist[:, 0] < th).astype(np.float)
optimized_final_mask_np = final_mask_np.reshape(-1).copy()
optimized_final_mask_np[_final_mask_np > 0.5] = valid
optimized_final_mask_np = optimized_final_mask_np.reshape(final_mask_np.shape[0], final_mask_np.shape[1])
# do interpolation based on chart distance
interpolation_weight = nn_dist.copy()
interpolation_weight = 1 - interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
interpolation_weight = interpolation_weight / np.sum(interpolation_weight, 1, keepdims=True)
query_rgb_np = np.zeros((query_xyz_np.shape[0], 3))
for kdx in range(nK):
nn_color = root_rgb_np[nn_idx[:, kdx], :]
query_rgb_np += nn_color * interpolation_weight[:, kdx][..., np.newaxis]
final_texture_np = np.ones((final_res ** 2, 3))
final_texture_np[_final_mask_np > 0.5, :] = query_rgb_np
final_texture_np = final_texture_np.reshape(final_res, final_res, 3)
return final_texture_np, optimized_final_mask_np
|
[
"cv2.Laplacian",
"numpy.ones",
"torch.from_numpy",
"cv2.morphologyEx",
"numpy.zeros",
"numpy.sum",
"sklearn.neighbors.NearestNeighbors",
"cv2.resize",
"cv2.getStructuringElement"
] |
[((1330, 1503), 'cv2.resize', 'cv.resize', (['image_learned_uv'], {'dsize': '(image_resize_factor * image_learned_uv.shape[0], image_resize_factor *\n image_learned_uv.shape[1])', 'interpolation': 'cv.INTER_LINEAR'}), '(image_learned_uv, dsize=(image_resize_factor * image_learned_uv.\n shape[0], image_resize_factor * image_learned_uv.shape[1]),\n interpolation=cv.INTER_LINEAR)\n', (1339, 1503), True, 'import cv2 as cv\n'), ((1593, 1761), 'cv2.resize', 'cv.resize', (['image_mask'], {'dsize': '(image_resize_factor * image_learned_uv.shape[0], image_resize_factor *\n image_learned_uv.shape[1])', 'interpolation': 'cv.INTER_LINEAR'}), '(image_mask, dsize=(image_resize_factor * image_learned_uv.shape[0\n ], image_resize_factor * image_learned_uv.shape[1]), interpolation=cv.\n INTER_LINEAR)\n', (1602, 1761), True, 'import cv2 as cv\n'), ((1951, 2007), 'cv2.Laplacian', 'cv.Laplacian', (['image_learned_uv[..., 0]'], {'ddepth': 'cv.CV_32F'}), '(image_learned_uv[..., 0], ddepth=cv.CV_32F)\n', (1963, 2007), True, 'import cv2 as cv\n'), ((2057, 2113), 'cv2.Laplacian', 'cv.Laplacian', (['image_learned_uv[..., 1]'], {'ddepth': 'cv.CV_32F'}), '(image_learned_uv[..., 1], ddepth=cv.CV_32F)\n', (2069, 2113), True, 'import cv2 as cv\n'), ((3059, 3235), 'cv2.resize', 'cv.resize', (['discontinuous_mask'], {'dsize': '(image_resize_factor * image_learned_uv.shape[0], image_resize_factor *\n image_learned_uv.shape[1])', 'interpolation': 'cv.INTER_NEAREST'}), '(discontinuous_mask, dsize=(image_resize_factor * image_learned_uv\n .shape[0], image_resize_factor * image_learned_uv.shape[1]),\n interpolation=cv.INTER_NEAREST)\n', (3068, 3235), True, 'import cv2 as cv\n'), ((4007, 4079), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(close_k_size, close_k_size)'], {}), '(cv.MORPH_ELLIPSE, (close_k_size, close_k_size))\n', (4031, 4079), True, 'import cv2 as cv\n'), ((4100, 4215), 'cv2.resize', 'cv.resize', (['low_res_mask_closed'], {'dsize': '(final_gim.shape[2], final_gim.shape[2])', 'interpolation': 'cv.INTER_NEAREST'}), '(low_res_mask_closed, dsize=(final_gim.shape[2], final_gim.shape[2\n ]), interpolation=cv.INTER_NEAREST)\n', (4109, 4215), True, 'import cv2 as cv\n'), ((4385, 4444), 'cv2.morphologyEx', 'cv.morphologyEx', (['final_mask_np', 'cv.MORPH_OPEN', 'close_kernel'], {}), '(final_mask_np, cv.MORPH_OPEN, close_kernel)\n', (4400, 4444), True, 'import cv2 as cv\n'), ((5921, 5957), 'numpy.zeros', 'np.zeros', (['(query_xyz_np.shape[0], 3)'], {}), '((query_xyz_np.shape[0], 3))\n', (5929, 5957), True, 'import numpy as np\n'), ((6138, 6166), 'numpy.ones', 'np.ones', (['(final_res ** 2, 3)'], {}), '((final_res ** 2, 3))\n', (6145, 6166), True, 'import numpy as np\n'), ((5855, 5901), 'numpy.sum', 'np.sum', (['interpolation_weight', '(1)'], {'keepdims': '(True)'}), '(interpolation_weight, 1, keepdims=True)\n', (5861, 5901), True, 'import numpy as np\n'), ((332, 389), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(nK + 1)', 'algorithm': '"""kd_tree"""'}), "(n_neighbors=nK + 1, algorithm='kd_tree')\n", (348, 389), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((5170, 5223), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'nK', 'algorithm': '"""kd_tree"""'}), "(n_neighbors=nK, algorithm='kd_tree')\n", (5186, 5223), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((5758, 5804), 'numpy.sum', 'np.sum', (['interpolation_weight', '(1)'], {'keepdims': '(True)'}), '(interpolation_weight, 1, keepdims=True)\n', (5764, 5804), True, 'import numpy as np\n'), ((2881, 2923), 'cv2.Laplacian', 'cv.Laplacian', (['image_mask'], {'ddepth': 'cv.CV_32F'}), '(image_mask, ddepth=cv.CV_32F)\n', (2893, 2923), True, 'import cv2 as cv\n'), ((3419, 3486), 'torch.from_numpy', 'torch.from_numpy', (['(resized_mask * (1.0 - resized_discontinuous_mask))'], {}), '(resized_mask * (1.0 - resized_discontinuous_mask))\n', (3435, 3486), False, 'import torch\n'), ((3568, 3596), 'torch.from_numpy', 'torch.from_numpy', (['resized_uv'], {}), '(resized_uv)\n', (3584, 3596), False, 'import torch\n')]
|
import logging
import os
import numpy as np
import torch
if torch.cuda.is_available():
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
device = torch.device('cuda')
else:
device = torch.device('cpu')
def _patch_noise_extend_to_img(noise, image_size=[3, 32, 32], patch_location='center'):
c, h, w = image_size[0], image_size[1], image_size[2]
mask = np.zeros((c, h, w), np.float32)
x_len, y_len = noise.shape[1], noise.shape[2]
if patch_location == 'center' or (h == w == x_len == y_len):
x = h // 2
y = w // 2
elif patch_location == 'random':
x = np.random.randint(x_len // 2, w - x_len // 2)
y = np.random.randint(y_len // 2, h - y_len // 2)
else:
raise('Invalid patch location')
x1 = np.clip(x - x_len // 2, 0, h)
x2 = np.clip(x + x_len // 2, 0, h)
y1 = np.clip(y - y_len // 2, 0, w)
y2 = np.clip(y + y_len // 2, 0, w)
mask[:, x1: x2, y1: y2] = noise
return mask
def setup_logger(name, log_file, level=logging.INFO):
"""To setup as many loggers as you want"""
formatter = logging.Formatter('%(asctime)s %(message)s')
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return logger
def log_display(epoch, global_step, time_elapse, **kwargs):
display = 'epoch=' + str(epoch) + \
'\tglobal_step=' + str(global_step)
for key, value in kwargs.items():
if type(value) == str:
display = '\t' + key + '=' + value
else:
display += '\t' + str(key) + '=%.4f' % value
display += '\ttime=%.2fit/s' % (1. / time_elapse)
return display
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1/batch_size))
return res
def save_model(filename, epoch, model, optimizer, scheduler, save_best=False, **kwargs):
# Torch Save State Dict
state = {
'epoch': epoch+1,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict() if scheduler is not None else None
}
for key, value in kwargs.items():
state[key] = value
torch.save(state, filename + '.pth')
filename += '_best.pth'
if save_best:
torch.save(state, filename)
return
def load_model(filename, model, optimizer, scheduler, **kwargs):
# Load Torch State Dict
filename = filename + '.pth'
checkpoints = torch.load(filename, map_location=device)
model.load_state_dict(checkpoints['model_state_dict'])
if optimizer is not None and checkpoints['optimizer_state_dict'] is not None:
optimizer.load_state_dict(checkpoints['optimizer_state_dict'])
if scheduler is not None and checkpoints['scheduler_state_dict'] is not None:
scheduler.load_state_dict(checkpoints['scheduler_state_dict'])
return checkpoints
def count_parameters_in_MB(model):
return sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary_head" not in name)/1e6
def build_dirs(path):
if not os.path.exists(path):
os.makedirs(path)
return
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.max = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.max = max(self.max, val)
def onehot(size, target):
vec = torch.zeros(size, dtype=torch.float32)
vec[target] = 1.
return vec
def rand_bbox(size, lam):
if len(size) == 4:
W = size[2]
H = size[3]
elif len(size) == 3:
W = size[1]
H = size[2]
else:
raise Exception
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
|
[
"numpy.clip",
"logging.getLogger",
"os.path.exists",
"logging.StreamHandler",
"numpy.sqrt",
"os.makedirs",
"logging.Formatter",
"torch.load",
"numpy.zeros",
"torch.cuda.is_available",
"logging.FileHandler",
"numpy.random.randint",
"torch.save",
"numpy.int",
"torch.zeros",
"torch.device"
] |
[((62, 87), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (85, 87), False, 'import torch\n'), ((230, 250), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (242, 250), False, 'import torch\n'), ((270, 289), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (282, 289), False, 'import torch\n'), ((449, 480), 'numpy.zeros', 'np.zeros', (['(c, h, w)', 'np.float32'], {}), '((c, h, w), np.float32)\n', (457, 480), True, 'import numpy as np\n'), ((848, 877), 'numpy.clip', 'np.clip', (['(x - x_len // 2)', '(0)', 'h'], {}), '(x - x_len // 2, 0, h)\n', (855, 877), True, 'import numpy as np\n'), ((887, 916), 'numpy.clip', 'np.clip', (['(x + x_len // 2)', '(0)', 'h'], {}), '(x + x_len // 2, 0, h)\n', (894, 916), True, 'import numpy as np\n'), ((926, 955), 'numpy.clip', 'np.clip', (['(y - y_len // 2)', '(0)', 'w'], {}), '(y - y_len // 2, 0, w)\n', (933, 955), True, 'import numpy as np\n'), ((965, 994), 'numpy.clip', 'np.clip', (['(y + y_len // 2)', '(0)', 'w'], {}), '(y + y_len // 2, 0, w)\n', (972, 994), True, 'import numpy as np\n'), ((1166, 1210), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(message)s"""'], {}), "('%(asctime)s %(message)s')\n", (1183, 1210), False, 'import logging\n'), ((1233, 1256), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1254, 1256), False, 'import logging\n'), ((1320, 1349), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (1339, 1349), False, 'import logging\n'), ((1404, 1427), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1421, 1427), False, 'import logging\n'), ((2763, 2799), 'torch.save', 'torch.save', (['state', "(filename + '.pth')"], {}), "(state, filename + '.pth')\n", (2773, 2799), False, 'import torch\n'), ((3039, 3080), 'torch.load', 'torch.load', (['filename'], {'map_location': 'device'}), '(filename, map_location=device)\n', (3049, 3080), False, 'import torch\n'), ((4200, 4238), 'torch.zeros', 'torch.zeros', (['size'], {'dtype': 'torch.float32'}), '(size, dtype=torch.float32)\n', (4211, 4238), False, 'import torch\n'), ((4480, 4498), 'numpy.sqrt', 'np.sqrt', (['(1.0 - lam)'], {}), '(1.0 - lam)\n', (4487, 4498), True, 'import numpy as np\n'), ((4510, 4529), 'numpy.int', 'np.int', (['(W * cut_rat)'], {}), '(W * cut_rat)\n', (4516, 4529), True, 'import numpy as np\n'), ((4542, 4561), 'numpy.int', 'np.int', (['(H * cut_rat)'], {}), '(H * cut_rat)\n', (4548, 4561), True, 'import numpy as np\n'), ((4586, 4606), 'numpy.random.randint', 'np.random.randint', (['W'], {}), '(W)\n', (4603, 4606), True, 'import numpy as np\n'), ((4616, 4636), 'numpy.random.randint', 'np.random.randint', (['H'], {}), '(H)\n', (4633, 4636), True, 'import numpy as np\n'), ((4649, 4679), 'numpy.clip', 'np.clip', (['(cx - cut_w // 2)', '(0)', 'W'], {}), '(cx - cut_w // 2, 0, W)\n', (4656, 4679), True, 'import numpy as np\n'), ((4691, 4721), 'numpy.clip', 'np.clip', (['(cy - cut_h // 2)', '(0)', 'H'], {}), '(cy - cut_h // 2, 0, H)\n', (4698, 4721), True, 'import numpy as np\n'), ((4733, 4763), 'numpy.clip', 'np.clip', (['(cx + cut_w // 2)', '(0)', 'W'], {}), '(cx + cut_w // 2, 0, W)\n', (4740, 4763), True, 'import numpy as np\n'), ((4775, 4805), 'numpy.clip', 'np.clip', (['(cy + cut_h // 2)', '(0)', 'H'], {}), '(cy + cut_h // 2, 0, H)\n', (4782, 4805), True, 'import numpy as np\n'), ((2854, 2881), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (2864, 2881), False, 'import torch\n'), ((3651, 3671), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3665, 3671), False, 'import os\n'), ((3681, 3698), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3692, 3698), False, 'import os\n'), ((684, 729), 'numpy.random.randint', 'np.random.randint', (['(x_len // 2)', '(w - x_len // 2)'], {}), '(x_len // 2, w - x_len // 2)\n', (701, 729), True, 'import numpy as np\n'), ((742, 787), 'numpy.random.randint', 'np.random.randint', (['(y_len // 2)', '(h - y_len // 2)'], {}), '(y_len // 2, h - y_len // 2)\n', (759, 787), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.