code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# -*- coding: utf-8 -*-
import sys,os
import pandas as pd
import numpy as np
from collections import Counter
import joblib
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GroupKFold, StratifiedKFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import make_scorer, average_precision_score
from imblearn.over_sampling import SMOTE
sys.path.append('../analysis/')
from analysis import cv_save_feat_importances_result, cv_save_classification_result
def main(argv):
infile = argv[0]
mode = argv[1] # binary or multiclass or nonwear
dataset = argv[2]
outdir = argv[3]
resultdir = os.path.join(outdir,'models')
if not os.path.exists(resultdir):
os.makedirs(resultdir)
# Read data file and retain data only corresponding to 5 sleep states or nonwear
df = pd.read_csv(infile, dtype={'label':object, 'user':object,
'position':object, 'dataset':object})
if mode == 'binary':
states = ['Wake', 'Sleep']
collate_states = ['NREM 1', 'NREM 2', 'NREM 3', 'REM']
df.loc[df['label'].isin(collate_states), 'label'] = 'Sleep'
elif mode == 'nonwear':
states = ['Wear', 'Nonwear']
collate_states = ['Wake', 'NREM 1', 'NREM 2', 'NREM 3', 'REM']
df.loc[df['label'].isin(collate_states), 'label'] = 'Wear'
else:
states = ['Wake', 'NREM 1', 'NREM 2', 'NREM 3', 'REM']
df = df[df['label'].isin(states)].reset_index()
print('... Number of data samples: %d' % len(df))
ctr = Counter(df['label'])
for cls in ctr:
print('%s: %d (%0.2f%%)' % (cls,ctr[cls],ctr[cls]*100.0/len(df)))
feat_cols = ['ENMO_mean','ENMO_std','ENMO_range','ENMO_mad',
'ENMO_entropy1','ENMO_entropy2', 'ENMO_prev30diff', 'ENMO_next30diff',
'ENMO_prev60diff', 'ENMO_next60diff', 'ENMO_prev120diff', 'ENMO_next120diff',
'angz_mean','angz_std','angz_range','angz_mad',
'angz_entropy1','angz_entropy2', 'angz_prev30diff', 'angz_next30diff',
'angz_prev60diff', 'angz_next60diff', 'angz_prev120diff', 'angz_next120diff',
'LIDS_mean','LIDS_std','LIDS_range','LIDS_mad',
'LIDS_entropy1','LIDS_entropy2', 'LIDS_prev30diff', 'LIDS_next30diff',
'LIDS_prev60diff', 'LIDS_next60diff', 'LIDS_prev120diff', 'LIDS_next120diff']
######################## Partition the datasets #######################
# Nested cross-validation - outer CV for estimating model performance
# Inner CV for estimating model hyperparameters
# Split data based on users, not on samples, for outer CV
# Use Stratified CV for inner CV to ensure similar label distribution
ts = df['timestamp']
X = df[feat_cols].values
y = df['label']
y = np.array([states.index(i) for i in y])
groups = df['user']
fnames = df['filename']
feat_len = X.shape[1]
encoder = OneHotEncoder()
scorer = make_scorer(average_precision_score, average='macro')
# Outer CV
imbalanced_pred = []; imbalanced_imp = []
balanced_pred = []; balanced_imp = []
outer_cv_splits = 5; inner_cv_splits = 5
outer_group_kfold = GroupKFold(n_splits=outer_cv_splits)
out_fold = 0
for train_indices, test_indices in outer_group_kfold.split(X,y,groups):
out_fold += 1
out_fold_X_train = X[train_indices,:]; out_fold_X_test = X[test_indices,:]
out_fold_y_train = y[train_indices]; out_fold_y_test = y[test_indices]
out_fold_users_train = groups[train_indices]; out_fold_users_test = groups[test_indices]
out_fold_ts_test = ts[test_indices]
out_fold_fnames_test = fnames[test_indices]
if mode != 'multiclass':
class_wt = compute_class_weight('balanced', np.unique(out_fold_y_train), out_fold_y_train)
class_wt = {i:val for i,val in enumerate(class_wt)}
else:
class_wt = []
for cls in range(len(states)):
class_train = (out_fold_y_train == cls).astype(np.int32)
cls_wt = compute_class_weight('balanced', np.unique(class_train), class_train)
cls_wt = {i:val for i,val in enumerate(cls_wt)}
class_wt.append(cls_wt)
# Inner CV
################## Balancing with SMOTE ###################
scaler = StandardScaler()
scaler.fit(out_fold_X_train)
out_fold_X_train_sc = scaler.transform(out_fold_X_train)
out_fold_X_test_sc = scaler.transform(out_fold_X_test)
# Imblearn - Undersampling techniques ENN and Tomek are too slow and
# difficult to parallelize
# So stick only with oversampling techniques
print('Fold'+str(out_fold)+' - Balanced: SMOTE')
smote = SMOTE(random_state=0, n_jobs=-1, sampling_strategy='all')
# Resample training data for each user
train_users = list(set(out_fold_users_train))
out_fold_X_train_resamp, out_fold_y_train_resamp, out_fold_users_train_resamp = None, None, None
for i,user in enumerate(train_users):
#print('%d/%d - %s' % (i+1,len(train_users),user))
user_X = out_fold_X_train_sc[out_fold_users_train == user]
user_y = out_fold_y_train[out_fold_users_train == user]
if len(set(user_y)) == 1:
print('%d/%d: %s has only one class' % (i+1,len(train_users),user))
print(Counter(user_y))
continue
try:
user_X_resamp, user_y_resamp = smote.fit_resample(user_X, user_y)
except:
print('%d/%d: %s failed to fit' % (i+1,len(train_users),user))
print(Counter(user_y))
continue
user_y_resamp = user_y_resamp.reshape(-1,1)
user_resamp = np.array([user] * len(user_X_resamp)).reshape(-1,1)
if out_fold_X_train_resamp is None:
out_fold_X_train_resamp = user_X_resamp
out_fold_y_train_resamp = user_y_resamp
out_fold_users_train_resamp = user_resamp
else:
out_fold_X_train_resamp = np.vstack((out_fold_X_train_resamp, user_X_resamp))
out_fold_y_train_resamp = np.vstack((out_fold_y_train_resamp, user_y_resamp))
out_fold_users_train_resamp = np.vstack((out_fold_users_train_resamp, user_resamp))
# Shuffle resampled data
resamp_indices = np.arange(len(out_fold_X_train_resamp))
np.random.shuffle(resamp_indices)
out_fold_X_train_resamp = out_fold_X_train_resamp[resamp_indices]
out_fold_y_train_resamp = out_fold_y_train_resamp[resamp_indices].reshape(-1)
out_fold_users_train_resamp = out_fold_users_train_resamp[resamp_indices].reshape(-1)
inner_group_kfold = GroupKFold(n_splits=inner_cv_splits)
custom_resamp_cv_indices = []
for grp_train_idx, grp_test_idx in \
inner_group_kfold.split(out_fold_X_train_resamp, out_fold_y_train_resamp, out_fold_users_train_resamp):
custom_resamp_cv_indices.append((grp_train_idx, grp_test_idx))
grp_train_users = out_fold_users_train_resamp[grp_train_idx]
grp_test_users = out_fold_users_train_resamp[grp_test_idx]
# Note: imblearn Pipeline is slow and sklearn pipeline yields poor results
clf = RandomForestClassifier(class_weight=class_wt,
max_depth=None, random_state=0)
print('Fold'+str(out_fold)+' - Balanced: Hyperparameter search')
search_params = {'n_estimators':[100,150,200,300,400,500],
'max_depth': [5,10,15,20,None]}
cv_clf = RandomizedSearchCV(estimator=clf, param_distributions=search_params,
cv=custom_resamp_cv_indices, scoring=scorer,
n_iter=10, n_jobs=-1, verbose=2)
if mode == 'multiclass':
out_fold_y_train_resamp = encoder.fit_transform(out_fold_y_train_resamp.reshape(-1,1)).todense()
cv_clf.fit(out_fold_X_train_resamp, out_fold_y_train_resamp)
print(cv_clf.best_estimator_)
joblib.dump([scaler,cv_clf], os.path.join(resultdir,\
'fold'+str(out_fold)+'_'+ mode + '_balanced_RF.sav'))
out_fold_y_test_pred = cv_clf.predict_proba(out_fold_X_test_sc)
if mode == 'multiclass': # collect probabilities from each binary classification
multiclass_y_pred = None
for cls in range(len(out_fold_y_test_pred)):
if cls == 0:
multiclass_y_pred = out_fold_y_test_pred[cls][:,1].reshape(-1,1)
else:
multiclass_y_pred = np.hstack((multiclass_y_pred, out_fold_y_test_pred[cls][:,1].reshape(-1,1)))
out_fold_y_test_pred = multiclass_y_pred
print('Fold'+str(out_fold)+' - Balanced', cv_clf.best_params_)
balanced_pred.append((out_fold_users_test, out_fold_ts_test, out_fold_fnames_test,
out_fold_y_test, out_fold_y_test_pred))
balanced_imp.append(cv_clf.best_estimator_.feature_importances_)
print('############## Balanced classification ##############')
# Save balanced classification reports
cv_save_feat_importances_result(balanced_imp, feat_cols,
os.path.join(outdir, mode + '_balanced_feat_imp.csv'))
cv_save_classification_result(balanced_pred, states,
os.path.join(outdir, mode + '_balanced_classification.csv'))
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"sys.path.append",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.StandardScaler",
"os.makedirs",
"pandas.read_csv",
"numpy.unique",
"sklearn.preprocessing.OneHotEncoder",
"os.path.exists",
"sklearn.model_selection.RandomizedSearchCV",
"sklearn.metrics.make_scorer",
"numpy.vstack",
"imblearn.over_sampling.SMOTE",
"sklearn.model_selection.GroupKFold",
"collections.Counter",
"os.path.join",
"numpy.random.shuffle"
] |
[((584, 615), 'sys.path.append', 'sys.path.append', (['"""../analysis/"""'], {}), "('../analysis/')\n", (599, 615), False, 'import sys, os\n'), ((850, 880), 'os.path.join', 'os.path.join', (['outdir', '"""models"""'], {}), "(outdir, 'models')\n", (862, 880), False, 'import sys, os\n'), ((1039, 1142), 'pandas.read_csv', 'pd.read_csv', (['infile'], {'dtype': "{'label': object, 'user': object, 'position': object, 'dataset': object}"}), "(infile, dtype={'label': object, 'user': object, 'position':\n object, 'dataset': object})\n", (1050, 1142), True, 'import pandas as pd\n'), ((1721, 1741), 'collections.Counter', 'Counter', (["df['label']"], {}), "(df['label'])\n", (1728, 1741), False, 'from collections import Counter\n'), ((3118, 3133), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (3131, 3133), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((3146, 3199), 'sklearn.metrics.make_scorer', 'make_scorer', (['average_precision_score'], {'average': '"""macro"""'}), "(average_precision_score, average='macro')\n", (3157, 3199), False, 'from sklearn.metrics import make_scorer, average_precision_score\n'), ((3369, 3405), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'outer_cv_splits'}), '(n_splits=outer_cv_splits)\n', (3379, 3405), False, 'from sklearn.model_selection import GroupKFold, StratifiedKFold\n'), ((890, 915), 'os.path.exists', 'os.path.exists', (['resultdir'], {}), '(resultdir)\n', (904, 915), False, 'import sys, os\n'), ((922, 944), 'os.makedirs', 'os.makedirs', (['resultdir'], {}), '(resultdir)\n', (933, 944), False, 'import sys, os\n'), ((4456, 4472), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4470, 4472), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder\n'), ((4859, 4916), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(0)', 'n_jobs': '(-1)', 'sampling_strategy': '"""all"""'}), "(random_state=0, n_jobs=-1, sampling_strategy='all')\n", (4864, 4916), False, 'from imblearn.over_sampling import SMOTE\n'), ((6421, 6454), 'numpy.random.shuffle', 'np.random.shuffle', (['resamp_indices'], {}), '(resamp_indices)\n', (6438, 6454), True, 'import numpy as np\n'), ((6727, 6763), 'sklearn.model_selection.GroupKFold', 'GroupKFold', ([], {'n_splits': 'inner_cv_splits'}), '(n_splits=inner_cv_splits)\n', (6737, 6763), False, 'from sklearn.model_selection import GroupKFold, StratifiedKFold\n'), ((7254, 7331), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'class_weight': 'class_wt', 'max_depth': 'None', 'random_state': '(0)'}), '(class_weight=class_wt, max_depth=None, random_state=0)\n', (7276, 7331), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((7562, 7713), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'clf', 'param_distributions': 'search_params', 'cv': 'custom_resamp_cv_indices', 'scoring': 'scorer', 'n_iter': '(10)', 'n_jobs': '(-1)', 'verbose': '(2)'}), '(estimator=clf, param_distributions=search_params, cv=\n custom_resamp_cv_indices, scoring=scorer, n_iter=10, n_jobs=-1, verbose=2)\n', (7580, 7713), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((9133, 9186), 'os.path.join', 'os.path.join', (['outdir', "(mode + '_balanced_feat_imp.csv')"], {}), "(outdir, mode + '_balanced_feat_imp.csv')\n", (9145, 9186), False, 'import sys, os\n'), ((9264, 9323), 'os.path.join', 'os.path.join', (['outdir', "(mode + '_balanced_classification.csv')"], {}), "(outdir, mode + '_balanced_classification.csv')\n", (9276, 9323), False, 'import sys, os\n'), ((3939, 3966), 'numpy.unique', 'np.unique', (['out_fold_y_train'], {}), '(out_fold_y_train)\n', (3948, 3966), True, 'import numpy as np\n'), ((6092, 6143), 'numpy.vstack', 'np.vstack', (['(out_fold_X_train_resamp, user_X_resamp)'], {}), '((out_fold_X_train_resamp, user_X_resamp))\n', (6101, 6143), True, 'import numpy as np\n'), ((6179, 6230), 'numpy.vstack', 'np.vstack', (['(out_fold_y_train_resamp, user_y_resamp)'], {}), '((out_fold_y_train_resamp, user_y_resamp))\n', (6188, 6230), True, 'import numpy as np\n'), ((6270, 6323), 'numpy.vstack', 'np.vstack', (['(out_fold_users_train_resamp, user_resamp)'], {}), '((out_fold_users_train_resamp, user_resamp))\n', (6279, 6323), True, 'import numpy as np\n'), ((4232, 4254), 'numpy.unique', 'np.unique', (['class_train'], {}), '(class_train)\n', (4241, 4254), True, 'import numpy as np\n'), ((5469, 5484), 'collections.Counter', 'Counter', (['user_y'], {}), '(user_y)\n', (5476, 5484), False, 'from collections import Counter\n'), ((5693, 5708), 'collections.Counter', 'Counter', (['user_y'], {}), '(user_y)\n', (5700, 5708), False, 'from collections import Counter\n')]
|
import unittest
from yauber_algo.errors import *
class TWMATestCase(unittest.TestCase):
def test_twma(self):
import yauber_algo.sanitychecks as sc
from numpy import array, nan, inf
import os
import sys
import pandas as pd
import numpy as np
from yauber_algo.algo import twma
#
# Function settings
#
algo = 'twma'
func = twma
with sc.SanityChecker(algo) as s:
#
# Check regular algorithm logic
#
s.check_regular(
array([nan, nan, 2, 7/3]),
func,
(
array([3, 2, 1, 4]),
np.array([1, 1, 1])
),
suffix='twma_equal_weight'
)
s.check_regular(
array([nan, nan, (1*1+2*0.5+3*0.25)/1.75, (4*1+1*0.5+2*0.25)/1.75]),
func,
(
array([3, 2, 1, 4]),
np.array([1, 0.5, 0.25])
),
suffix='twma_linear_weight'
)
s.check_regular(
array([nan, nan, 2, 7 / 3]),
func,
(
array([3, 2, 1, 4]),
[1, 1, 1]
),
suffix='twma_list_weight'
)
s.check_regular(
array([nan, nan, 2, 7 / 3]),
func,
(
array([3, 2, 1, 4]),
pd.Series([1, 1, 1])
),
suffix='twma_series_weight'
)
s.check_regular(
array([nan, nan, 2, 7 / 3]),
func,
(
array([3, 2, 1, 4]),
[1, 1, 1, 2, 2]
),
suffix='twma_weight_gt_ser',
exception=YaUberAlgoArgumentError,
)
s.check_regular(
array([nan, nan, nan, nan]),
func,
(
array([3, 2, 1, 4]),
np.array([0, 0, 0])
),
suffix='twma_zeroweight'
)
s.check_regular(
array([nan, nan, 2, 7 / 3]),
func,
(
array([3, 2, 1, 4]),
np.array([1, 1, nan])
),
suffix='twma_nan_weight',
exception=YaUberAlgoArgumentError,
)
s.check_naninf(
array([nan, nan, nan, nan]),
func,
(
array([3, 2, nan, inf]),
np.array([1, 1, 1, 1])
),
suffix='',
)
s.check_series(
pd.Series(array([nan, nan, 2, 7 / 3])),
func,
(
pd.Series(array([3, 2, 1, 4])),
np.array([1, 1, 1])
),
)
s.check_dtype_float(
array([nan, nan, 2, 7 / 3], dtype=float),
func,
(
array([3, 2, 1, 4], dtype=float),
np.array([1, 1, 1], dtype=float)
),
)
s.check_dtype_bool(
array([nan, nan, 1/3, 2 / 3], dtype=float),
func,
(
array([0, 1, 0, 1], dtype=bool),
np.array([1, 1, 1], dtype=float)
),
)
s.check_dtype_int(
array([nan, nan, 2, 7 / 3], dtype=float),
func,
(
array([3, 2, 1, 4], dtype=np.int32),
np.array([1, 1, 1], dtype=np.int32)
),
)
s.check_dtype_object(
func,
(
array([3, 2, 1, 4], dtype=np.object),
np.array([1, 1, 1], dtype=float)
),
)
s.check_futref(5, 1,
func,
(
np.random.random(100),
np.array([1, 0.5, 0.25, 0.2, 0.1]),
),
fix_args=[1], # Use weights args as is
)
s.check_window_consistency(5, 1,
func,
(
np.random.random(100),
np.array([1, 0.5, 0.25, 0.2, 0.1]),
),
fix_args=[1], # Use weights args as is
)
|
[
"pandas.Series",
"numpy.random.random",
"numpy.array",
"yauber_algo.sanitychecks.SanityChecker"
] |
[((443, 465), 'yauber_algo.sanitychecks.SanityChecker', 'sc.SanityChecker', (['algo'], {}), '(algo)\n', (459, 465), True, 'import yauber_algo.sanitychecks as sc\n'), ((589, 616), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {}), '([nan, nan, 2, 7 / 3])\n', (594, 616), False, 'from numpy import array, nan, inf\n'), ((859, 954), 'numpy.array', 'array', (['[nan, nan, (1 * 1 + 2 * 0.5 + 3 * 0.25) / 1.75, (4 * 1 + 1 * 0.5 + 2 * 0.25\n ) / 1.75]'], {}), '([nan, nan, (1 * 1 + 2 * 0.5 + 3 * 0.25) / 1.75, (4 * 1 + 1 * 0.5 + 2 *\n 0.25) / 1.75])\n', (864, 954), False, 'from numpy import array, nan, inf\n'), ((1177, 1204), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {}), '([nan, nan, 2, 7 / 3])\n', (1182, 1204), False, 'from numpy import array, nan, inf\n'), ((1437, 1464), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {}), '([nan, nan, 2, 7 / 3])\n', (1442, 1464), False, 'from numpy import array, nan, inf\n'), ((1711, 1738), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {}), '([nan, nan, 2, 7 / 3])\n', (1716, 1738), False, 'from numpy import array, nan, inf\n'), ((2032, 2059), 'numpy.array', 'array', (['[nan, nan, nan, nan]'], {}), '([nan, nan, nan, nan])\n', (2037, 2059), False, 'from numpy import array, nan, inf\n'), ((2302, 2329), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {}), '([nan, nan, 2, 7 / 3])\n', (2307, 2329), False, 'from numpy import array, nan, inf\n'), ((2625, 2652), 'numpy.array', 'array', (['[nan, nan, nan, nan]'], {}), '([nan, nan, nan, nan])\n', (2630, 2652), False, 'from numpy import array, nan, inf\n'), ((3142, 3182), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {'dtype': 'float'}), '([nan, nan, 2, 7 / 3], dtype=float)\n', (3147, 3182), False, 'from numpy import array, nan, inf\n'), ((3413, 3457), 'numpy.array', 'array', (['[nan, nan, 1 / 3, 2 / 3]'], {'dtype': 'float'}), '([nan, nan, 1 / 3, 2 / 3], dtype=float)\n', (3418, 3457), False, 'from numpy import array, nan, inf\n'), ((3683, 3723), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {'dtype': 'float'}), '([nan, nan, 2, 7 / 3], dtype=float)\n', (3688, 3723), False, 'from numpy import array, nan, inf\n'), ((676, 695), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (681, 695), False, 'from numpy import array, nan, inf\n'), ((717, 736), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (725, 736), True, 'import numpy as np\n'), ((988, 1007), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (993, 1007), False, 'from numpy import array, nan, inf\n'), ((1029, 1053), 'numpy.array', 'np.array', (['[1, 0.5, 0.25]'], {}), '([1, 0.5, 0.25])\n', (1037, 1053), True, 'import numpy as np\n'), ((1266, 1285), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (1271, 1285), False, 'from numpy import array, nan, inf\n'), ((1526, 1545), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (1531, 1545), False, 'from numpy import array, nan, inf\n'), ((1567, 1587), 'pandas.Series', 'pd.Series', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (1576, 1587), True, 'import pandas as pd\n'), ((1800, 1819), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (1805, 1819), False, 'from numpy import array, nan, inf\n'), ((2121, 2140), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (2126, 2140), False, 'from numpy import array, nan, inf\n'), ((2162, 2181), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2170, 2181), True, 'import numpy as np\n'), ((2391, 2410), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (2396, 2410), False, 'from numpy import array, nan, inf\n'), ((2432, 2453), 'numpy.array', 'np.array', (['[1, 1, nan]'], {}), '([1, 1, nan])\n', (2440, 2453), True, 'import numpy as np\n'), ((2714, 2737), 'numpy.array', 'array', (['[3, 2, nan, inf]'], {}), '([3, 2, nan, inf])\n', (2719, 2737), False, 'from numpy import array, nan, inf\n'), ((2759, 2781), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (2767, 2781), True, 'import numpy as np\n'), ((2897, 2924), 'numpy.array', 'array', (['[nan, nan, 2, 7 / 3]'], {}), '([nan, nan, 2, 7 / 3])\n', (2902, 2924), False, 'from numpy import array, nan, inf\n'), ((3039, 3058), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (3047, 3058), True, 'import numpy as np\n'), ((3244, 3276), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {'dtype': 'float'}), '([3, 2, 1, 4], dtype=float)\n', (3249, 3276), False, 'from numpy import array, nan, inf\n'), ((3298, 3330), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'float'}), '([1, 1, 1], dtype=float)\n', (3306, 3330), True, 'import numpy as np\n'), ((3517, 3548), 'numpy.array', 'array', (['[0, 1, 0, 1]'], {'dtype': 'bool'}), '([0, 1, 0, 1], dtype=bool)\n', (3522, 3548), False, 'from numpy import array, nan, inf\n'), ((3570, 3602), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'float'}), '([1, 1, 1], dtype=float)\n', (3578, 3602), True, 'import numpy as np\n'), ((3785, 3820), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {'dtype': 'np.int32'}), '([3, 2, 1, 4], dtype=np.int32)\n', (3790, 3820), False, 'from numpy import array, nan, inf\n'), ((3842, 3877), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'np.int32'}), '([1, 1, 1], dtype=np.int32)\n', (3850, 3877), True, 'import numpy as np\n'), ((4006, 4042), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {'dtype': 'np.object'}), '([3, 2, 1, 4], dtype=np.object)\n', (4011, 4042), False, 'from numpy import array, nan, inf\n'), ((4064, 4096), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': 'float'}), '([1, 1, 1], dtype=float)\n', (4072, 4096), True, 'import numpy as np\n'), ((4257, 4278), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (4273, 4278), True, 'import numpy as np\n'), ((4311, 4345), 'numpy.array', 'np.array', (['[1, 0.5, 0.25, 0.2, 0.1]'], {}), '([1, 0.5, 0.25, 0.2, 0.1])\n', (4319, 4345), True, 'import numpy as np\n'), ((4647, 4668), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (4663, 4668), True, 'import numpy as np\n'), ((4713, 4747), 'numpy.array', 'np.array', (['[1, 0.5, 0.25, 0.2, 0.1]'], {}), '([1, 0.5, 0.25, 0.2, 0.1])\n', (4721, 4747), True, 'import numpy as np\n'), ((2997, 3016), 'numpy.array', 'array', (['[3, 2, 1, 4]'], {}), '([3, 2, 1, 4])\n', (3002, 3016), False, 'from numpy import array, nan, inf\n')]
|
# coding=utf-8
from pypint.integrators.node_providers.gauss_legendre_nodes import GaussLegendreNodes
import unittest
from nose.tools import *
import numpy as np
test_num_nodes = range(2, 7)
def manual_initialization(n_nodes):
nodes = GaussLegendreNodes()
nodes.init(n_nodes)
assert_equal(nodes.num_nodes, n_nodes,
"Number of nodes should be set")
assert_is_instance(nodes.nodes, np.ndarray,
"Nodes should be a numpy.ndarray")
assert_equal(nodes.nodes.size, n_nodes,
"There should be correct number of nodes")
def test_manual_initialization():
for n_nodes in test_num_nodes:
yield manual_initialization, n_nodes
class GaussLegendreNodesTest(unittest.TestCase):
def setUp(self):
self._test_obj = GaussLegendreNodes()
def test_default_initialization(self):
self.assertIsNone(self._test_obj.num_nodes,
"Number of nodes should be initialized as 'None'")
self.assertIsNone(self._test_obj.nodes,
"Nodes list should be initializes as 'None'")
def test_correctness_of_selected_nodes(self):
self._test_obj.init(1)
self.assertAlmostEqual(self._test_obj.nodes[0], 0.0)
self.setUp()
self._test_obj.init(2)
self.assertAlmostEqual(self._test_obj.nodes[0], -np.sqrt(1.0 / 3.0))
self.assertAlmostEqual(self._test_obj.nodes[1], np.sqrt(1.0 / 3.0))
self.setUp()
self._test_obj.init(5)
self.assertAlmostEqual(self._test_obj.nodes[0], -1.0 / 3.0 * np.sqrt(5.0 + 2.0 * np.sqrt(10.0 / 7.0)))
self.assertAlmostEqual(self._test_obj.nodes[1], -1.0 / 3.0 * np.sqrt(5.0 - 2.0 * np.sqrt(10.0 / 7.0)))
self.assertAlmostEqual(self._test_obj.nodes[2], 0.0)
self.assertAlmostEqual(self._test_obj.nodes[3], 1.0 / 3.0 * np.sqrt(5.0 - 2.0 * np.sqrt(10.0 / 7.0)))
self.assertAlmostEqual(self._test_obj.nodes[4], 1.0 / 3.0 * np.sqrt(5 + 2 * np.sqrt(10.0 / 7.0)))
|
[
"pypint.integrators.node_providers.gauss_legendre_nodes.GaussLegendreNodes",
"numpy.sqrt"
] |
[((242, 262), 'pypint.integrators.node_providers.gauss_legendre_nodes.GaussLegendreNodes', 'GaussLegendreNodes', ([], {}), '()\n', (260, 262), False, 'from pypint.integrators.node_providers.gauss_legendre_nodes import GaussLegendreNodes\n'), ((803, 823), 'pypint.integrators.node_providers.gauss_legendre_nodes.GaussLegendreNodes', 'GaussLegendreNodes', ([], {}), '()\n', (821, 823), False, 'from pypint.integrators.node_providers.gauss_legendre_nodes import GaussLegendreNodes\n'), ((1446, 1464), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (1453, 1464), True, 'import numpy as np\n'), ((1370, 1388), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (1377, 1388), True, 'import numpy as np\n'), ((1608, 1627), 'numpy.sqrt', 'np.sqrt', (['(10.0 / 7.0)'], {}), '(10.0 / 7.0)\n', (1615, 1627), True, 'import numpy as np\n'), ((1719, 1738), 'numpy.sqrt', 'np.sqrt', (['(10.0 / 7.0)'], {}), '(10.0 / 7.0)\n', (1726, 1738), True, 'import numpy as np\n'), ((1890, 1909), 'numpy.sqrt', 'np.sqrt', (['(10.0 / 7.0)'], {}), '(10.0 / 7.0)\n', (1897, 1909), True, 'import numpy as np\n'), ((1996, 2015), 'numpy.sqrt', 'np.sqrt', (['(10.0 / 7.0)'], {}), '(10.0 / 7.0)\n', (2003, 2015), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 13:23:53 2019
@author: casimp
"""
import numpy as np
import csv
import matplotlib.pyplot as plt
from cpex.transformation import strain_transformation
class Extract():
def __init__(self):
pass
def extract_grains(self, data='elastic', idx=1, grain_idx=None):
"""
Extracts data (stress, strain etc.) for either all grains, or a
specified grain at a given (orthogonal) orientation or component
(where data='time', 'frame' etc. indexing does not work.
Parameters
----------
data: str
The data label, either 'stress', 'strain', 'elastic' (strain),
'back stress', 'rot', 'time', 'frame'
idx: int
The orientation (referenced via an idx) of the defined data
e.g. data='stress', idx=1 => sigma_yy
grain_idx: int
The index of the grain (note GRAIN-1 => idx=0)
Returns
-------
order: array
Dataset
"""
if idx == None and grain_idx != None:
idx = np.s_[:, grain_idx]
elif idx == None and grain_idx == None:
idx = np.s_[:, :]
elif idx != None and grain_idx == None:
idx = np.s_[idx, :]
else:
idx = np.s_[idx, grain_idx]
d = {'strain':self.e,
'stress':self.s,
'elastic':self.elastic,
'back stress':self.b_stress,
'rot':self.rot - self.rot[:,:, 0][:, :, None],
'time':self.t,
'frame':np.arange(self.num_frames)}
if data not in ['time', 'frame', 'rot']:
ex = d[data][idx]
else:
ex = d[data]
return ex
def extract_neighbours_idx(self, grain_idx, frame=0):
"""
Extracts the indinces of all grains ordered with respect to position
away from a given grain (index).
Grains move a small amount during deformation, the frame can be defined
to explicity interrogtae neightbours at a given load level/time.
Parameters
----------
grain_idx: int
The index of the grain to search around
frame: int, None
The frame to reference (default = 0). If None extracts ordered
inidices for all frames.
Returns
-------
order: list
Grain indices ordered by euclidean distance from selected grain
"""
if frame == None:
frame = np.s_[:]
dims = self.dims[:, :, frame]
rel_dims = dims - dims[:, grain_idx, None] # Keeps correct dimensionality
euc_dist = np.sum(rel_dims ** 2, axis=0)**0.5
order = np.argsort(euc_dist, axis=0)
return order[1:]
def extract_neighbours(self, grain_idx, data='strain', idx=1, frame=-1,
cmean=False, dimframe='simple'):
"""
Extracts data (stress, strain etc.) for all grains, with data being
ordered with respect to position away from a given grain (index).
Calls extract_grains and extrains_neighbours_idx methods.
Parameters
----------
grain_idx: int
The index of the grain to search around
data: str
The data label, either 'stress', 'strain', 'elastic' (strain),
'back stress'
idx: int
The orientation (referenced via an idx) of the defined data
e.g. data='stress', idx=1 => sigma_yy
frame: int, None
The frame to reference (default = 0). If None extracts ordered
data for all frames.
cmean: bool
Compute a rolling, cumulative mean
dimframe: str, int, None
If frame is not None then the neighbour ordering is done on same
frame. If frame is None then the dimensions are taken from the
final frame or a specified frame (int) unless dimframes==None, in
which case neighbour ordering is done for each frame . Warning
this is slow!
Returns
-------
order: array
Ordered dataset
"""
if frame==None:
frame=np.s_[:]
ex = self.extract_grains(data=data, idx=idx)
if frame == np.s_[:] and dimframe !='simple':
# Not ideal!!!
order = self.extract_neighbours_idx(grain_idx, None)
ex_ordered = np.column_stack([i[j] for i, j in zip(np.split(ex, ex.shape[1], axis=1),
np.split(order, order.shape[1], axis=1))]).squeeze()
elif frame == np.s_[:] and isinstance(dimframe, int):
order = self.extract_neighbours_idx(grain_idx, dimframe)
ex_ordered = ex[order]
else:
dimframe = frame if frame != np.s_[:] else -1
order = self.extract_neighbours_idx(grain_idx, dimframe)
# print(order)
ex_ordered = ex[order]
if cmean:
ex_csum = np.cumsum(ex_ordered, axis=0)
ex_cmean = ex_csum / np.arange(1, ex_csum.shape[0] + 1)[:, None]
return ex_cmean[..., frame]
return ex_ordered[..., frame]
def plot_neighbours(self, grain_idx, data='plastic', idx=1, frame=-1,
cmean=True, ):
"""
Plots data (stress, strain etc.) for all n grains, with data being
ordered with respect to position away from a given grain (index).
Parameters
----------
grain_idx: int
The index of the grain to search around
data: str
The data to plot either 'stress', 'strain', 'elastic' (strain),
'back stress'
idx: int
The orientation (referenced via an idx) of the defined data
e.g. data='stress', idx=1 => sigma_yy
frame: int, None
The frame to reference (default = 0). If None extracts ordered
data for all frames.
cmean: bool
Compute a rolling, cumulative mean
"""
assert frame != None, "Can't study response across all frames."
ex_ordered = self.extract_neighbours(grain_idx, data=data,
idx=idx, frame=frame,
cmean=cmean)
# Tinkering with axis labels
x = 'nth nearest neighbour'
y = 'cumulative mean {} (window=n)'.format(data) if cmean else data
# Plotting
plt.plot(np.arange(1, np.size(ex_ordered) +1), ex_ordered, label=grain_idx)
plt.legend()
plt.ylabel(y)
plt.xlabel(x)
def extract_lattice(self, data='lattice', family='311',
grain_idx=None, plane_idx=None):
"""
Routine to extract information about some or all (default) grains for a
specified lattice plane.
Parameters:
-----------
data: str
Either 'lattice' or 'phi'
family: str
The lattice plane family to assess
grain_idx: int, [int,...], None
If None then all grains of this family to be extracted else
the individual grain (or list of grains)
plane_idx: int, [int,...], None
If None then all planes of this family/grain combination to be
extracted else the individual planes (or list of planes)
Returns:
--------
data: array
Lattice strains (or phi) for given family (and potentially
grain/plane specification)
"""
if plane_idx == None and grain_idx != None:
idx = np.s_[:, grain_idx]
elif plane_idx == None and grain_idx == None:
idx = np.s_[:, :]
elif plane_idx != None and grain_idx == None:
idx = np.s_[plane_idx, :]
else:
idx = np.s_[plane_idx, grain_idx]
lattice = self.lattice_strain[family][idx]
phi = self.lattice_phi[family]
d = {'phi':phi,'lattice':lattice}
return d[data]
def extract_phi_idx(self, family='311', phi=0, window=10, frame=0):
"""
Allows for selection of the index of lattice planes wityh a defined
orientation with resepect to the y axis (nominally the loading axis).
A 2D array of indices with be returned if a frame is specified, the
elemtns in the array will be structured:
[[grain_idx, plane_idx],
[grain_idx, plane_idx],
...]
If None is passed as the frame variable then the rotation of
the grain during loading/dwell etc. is being considered - a 2D array
is returned with each element being structured as follows:
[[grain_idx, frame_idx, plane_idx],
[grain_idx, frame_idx, plane_idx],
...]
** In addition to the list of indices an equivalent boolean array is
returned in each case. **
Parameters
----------
family: str
The index of the grain to search around
phi: float
The data to extractm either 'stress', 'strain', 'elastic' (strain),
'back stress'
window: float
The orientation (referenced via an idx) of the defined data
e.g. data='stress', idx=1 => sigma_yy
frame: int, None
The frame to reference (default = 0). If None extracts ordered
data for all frames.
Returns
-------
va: array (bool)
Boolean array of the same dimension as the lattice strain array -
elements are True if they are within the window, else False)
select: array (int)
A list of the grains/plane indices for all grains that lie within
specified orientation/window combination.
"""
if frame == None:
frame = np.s_[:]
phi_ = 180 * self.lattice_phi[family][:, frame] / np.pi
phi_ -= 90
phi -= 90
w = window / 2
p0, p1 = phi - w, phi + w
s0 = np.logical_and(phi_ > np.min(p0), phi_ < np.max(p1))
s1 = np.logical_and(-phi_ > np.min(p0), -phi_ < np.max(p1))
select = np.logical_or(s0, s1)
va = np.argwhere(select)
return va, select
def plot_phi(self, y='lattice', family='200', frame=-1, idx=0,
alpha=0.1, restrict_z=False, restrict_range = [70, 110]):
"""
For a given lattice family (and frame) plots the variation in the
*resolved* lattice strain (or back stress) with respect to the angle
the planes make to the loading axis (phi). Can be restricted across
a smaller z_rot if required. N.b. rotations of grains defined as
(x_rot, phi, z_rot).
Parameters
----------
y: str
The data to plot on the y axis. This is typically lattice strain
but it is also possible to plot wrt. back stress.
family: str
The lattice plane family to assess
frame: int
The frame to extract data from (default = 0).
idx: int
The compnent (referenced via an idx) of the defined data. Only
valid for back stress (for fcc, idx = 0-11)
alpha: float
Plotting data transparency
restrict_z: bool
Restrict data extraction/plotting across one angular range. Can be
used to normalise the amount of data wrt. phi
restrict_range: [float, float]
Range across which to limit z rotations.
"""
lattice = self.lattice_strain
y_ = {'lattice': lattice[family],
'back stress': self.b_stress[idx]}[y]
try:
y_tensor = self.lattice_tensor[family]
tens = True
except KeyError:
print('Tensor not available')
tens=False
if y == 'back stress':
x = self.rot[1]
else:
x = self.lattice_phi[family]
rot = self.lattice_rot[family]
if restrict_z == True and y == 'lattice':
r0, r1 = restrict_range
t_z = rot[:, :, 2]* 180 / np.pi
va = np.logical_and(t_z > r0, t_z < r1)
vaf = np.zeros_like(rot[:, :, 2], dtype='bool')
vaf[:, frame, :] += True
va = np.logical_and(va, vaf)
else:
va = np.s_[:, frame]
plt.plot(x[va].flatten(), y_[va].flatten(), '.', alpha=alpha)
if y == 'lattice' and tens:
st = strain_transformation(np.linspace(0, np.pi, 1001), *y_tensor[:, frame])
plt.plot(np.linspace(0, np.pi, 1001), st, 'r')
x = 'lattice rot (phi)' if y == 'lattice' else 'grain rot (phi)'
plt.xlabel(x)
plt.ylabel(y)
def plot_grains(self, y='elastic', x='stress', x_mean=True,
y_mean=False, x_idx=1, y_idx=1, grain_idx=None, alpha=0.2,
color='k', mcolor='r'):
"""
The plot_grain method is very general plotting routing
and any grain (not lattice) specific vaues can be plotted on
either axis.
- Define data to plot on either axis i.e. y='stress', x='strain'
- Specify whether the data on given axis is the mean response of all grains
- Where relevant, the index of that data must be specified
i.e. for y='stress', y_idx = 1 for sigma_yy
While general a limited number of x, y combinations will,
unsurprisingly, not work.
Parameters
----------
y, x: str, str
The data (label), either 'stress', 'strain', 'elastic' (strain),
'back stress', 'rot', 'time', 'frame' to plot on x/y axis
x_mean, y_mean: bool, bool
Whether to take the mean (across all grains) of the data on the
x/y axis
x_idx, y_idx: int, int
Component/orientation of the specified data to plot
e.g. x='stress', idx=1 => sigma_xx
grain_idx: [int, ...]
List on grains (indices) to plot (if None, all grains plotted)
alpha, color: float, str
Plotting options for the grain specific lines
mcolor:
The color of the grain average (across x and y) line
"""
# If necessary put grain_idx into list for fancy indexing
if isinstance(grain_idx, int):
grain_idx = [grain_idx,]
# Time and frame can't be averaged
if x in ['time', 'frame']:
x_mean = False
if y in ['time', 'frame']:
y_mean = False
# Data extraction
x_ = self.extract_grains(data=x, idx=x_idx, grain_idx=grain_idx)
y_ = self.extract_grains(data=y, idx=y_idx, grain_idx=grain_idx)
# Saving x, y locations (?)
csvfile = open('strain_grain.csv', 'w', newline='')
obj = csv.writer(csvfile)
for val in np.transpose(x_):
obj.writerow(val)
csvfile.close()
csvfile = open('stress_grain.csv', 'w', newline='')
obj = csv.writer(csvfile)
for val in np.transpose(y_):
obj.writerow(val)
csvfile.close()
# Calculate mean of arrays
xm = np.nanmean(x_, axis=0) if x not in ['time', 'frame'] else x_
ym = np.nanmean(y_, axis=0) if y not in ['time', 'frame'] else y_
x__ = xm if x_mean else x_.T
y__ = ym if y_mean else y_.T
# Tinkering with axis labels
x = '{} (idx={})'.format(x, x_idx) if x not in ['time', 'frame'] else x
y = '{} (idx={})'.format(y, y_idx) if y not in ['time', 'frame'] else y
x = 'mean {}'.format(x) if x_mean else x
y = 'mean {}'.format(y) if y_mean else y
# Plotting
plt.plot(np.squeeze(x__), np.squeeze(y__), color=color, alpha=alpha)
if (not y_mean or not x_mean) and (grain_idx == None or len(grain_idx) != 1):
plt.plot(xm, ym, color=mcolor, label='Mean response')
plt.legend()
plt.ylabel(y)
plt.xlabel(x)
def plot_lattice(self, family='200', phi=0, window=10, lat_ax='x',
ax2='stress', ax2_idx=1, ax2_mean=True,
alpha=0.2, color='k', mcolor='r',
plot_select=True, phi_frame=0):
"""
The lattice strains for a given family are plotted if they lie at (or
close to) an angle, phi (with the loading axis). The angular tolerance
/ azimuthal window is defined by the user (window). For XRD, a window
of 10deg is often used.
Parameters:
-----------
family: str
The lattice plane family to assess
phi: float
Angle at which to extract the lattice plane strains
window: float
Azimuthal tolerance (absolute window width) for lattice data
extraction
lat_ax: str
Axis to plot the lattice data on, either 'x' or 'y'
ax2: str
The data to plot against the lattice strain. Either 'stress',
'strain', 'elastic' (strain), 'back stress'
ax2_idx: int
Component/orientation of the specified second axis data to plot
e.g. ax2='stress', ax2_idx=1 => sigma_xx
ax2_mean: bool
Whether to take the mean (across all grains) of the data on the
second axis
alpha, color: float, str
Plotting options for the grain specific lines
mcolor:
The color of the grain average (across x and y) line
plot_select: bool
If plot_select is True the individual lattice planes will be
plotted in addition to the mean result, when False just the mean
response
phi_frame: int
The frame to define the grains that lie within the aimuthal
window (default = 0).
"""
ax2_mean = False if ax2 in ['time', 'frame'] else ax2_mean
d = self.extract_grains(data=ax2, idx=ax2_idx, grain_idx=None)
valid, select = self.extract_phi_idx(family=family, phi=phi,window=window, frame=phi_frame)
if ax2 in ['time', 'frame']:
d, dm = d, d
else:
d = np.nanmean(d, axis=0) if ax2_mean else d[valid[:,0]].T
dm = d if ax2_mean else np.nanmean(d, axis=1)
lattice = self.extract_lattice(family=family)
lattice = lattice[valid[:,0], :, valid[:,1]].T
x_ = lattice if lat_ax == 'x' else d
y_ = lattice if lat_ax != 'x' else d
assert np.sum(select) > 0, 'Phi window too small for {} - no grains/planes selected'.format(family)
if plot_select:
plt.plot(x_, y_, 'k', alpha=alpha)
x_ = np.nanmean(lattice, axis=1) if lat_ax == 'x' else dm
y_ = np.nanmean(lattice, axis=1) if lat_ax != 'x' else dm
plt.plot(x_, y_, label=family, color=mcolor)
ax2 = '{} (idx={})'.format(ax2, ax2_idx) if ax2 not in ['time', 'frame'] else ax2
ax2 = ax2 if not ax2_mean else 'mean {}'.format(ax2)
xlabel = ax2 if lat_ax != 'x' else 'lattice'
ylabel = ax2 if lat_ax == 'x' else 'lattice'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def extract_lattice_map(self, family='200', az_bins=19):
"""
Average the lattice strains data across a defined number of bins
(i.e. azimuthally integrate), return 2D array of lattice strains against frame
for the specified family.
Parameters:
-----------
family: str
The lattice plane family to assess
az_bins: int
Number of bins to extract lattice strains across
Returns:
--------
bins: list
List of the phi bins that data has been extracted at
data: array
Lattice strains for given family averaged across a user
defined (az_bins) number of azimuthally arrayed bins
"""
phi_steps = az_bins + 1
arr1 = np.moveaxis(self.lattice_strain[family], 1, 2)
arr1 = arr1.reshape((-1, arr1.shape[-1]))
arr2 = np.moveaxis(self.lattice_phi[family], 1, 2)
arr2 = arr2.reshape((-1, arr2.shape[-1]))
arr2[arr2 > np.pi/2] -= np.pi # -90 to 90
bins = np.linspace(-90, 90, phi_steps)
e_phi = np.nan * np.ones((phi_steps - 1, self.num_frames))
for idx, i in enumerate(bins[:-1]):
va = np.logical_and(arr2 < bins[idx + 1] * np.pi / 180, arr2 > bins[idx] * np.pi / 180)
try:
e_phi[idx] = np.sum(arr1 * va, axis=0) / np.nansum(va, axis=0)
except ZeroDivisionError:
pass
return (bins[:-1]+bins[1:])/2, e_phi
def plot_lattice_map(self, family='200', az_bins=19, ax2='time',
ax2_idx=1):
"""
Plot 2D map of the azimtuhally arrayed lattice strains as a function of
second variable such as time or frame. Also works with macro stress
or strain although obvious issues may arise if there is creep dwells.
Parameters:
-----------
family: str
The lattice plane family to assess
az_bins: int
Number of bins to extract lattice strains across
ax2: str
The data to plot against the lattice strain. Either 'stress',
'strain', 'elastic' (strain), 'back stress'
ax2_idx: int
Component/orientation of the specified second axis data to plot
e.g. ax2='stress', ax2_idx=1 => sigma_xx
Returns:
--------
bins: list
List of the phi bins that data has been extracted at
data: array
Lattice strains for given family averaged across a user
defined (az_bins) number of azimuthally arrayed bins
"""
bin_c, e_phi = self.extract_lattice_map(family=family, az_bins=az_bins)
d = self.extract_grains(data=ax2, idx=ax2_idx, grain_idx=None)
ax2_mean = False if ax2 in ['time', 'frame'] else True
if ax2_mean:
d = np.nanmean(d, axis=0)
time, phi = np.meshgrid(d, bin_c)
plt.contourf(time, phi, e_phi)
plt.colorbar()
ax2 = 'mean {} (idx={})'.format(ax2, ax2_idx) if ax2 not in ['time', 'frame'] else ax2
plt.xlabel(ax2)
plt.ylabel('phi (reflected at 0$^o$)')
def plot_lattice_all(self, phi=0, window=10, lat_ax='x', ax2='stress',
ax2_idx=1, ax2_mean=True, phi_frame=0):
"""
The lattice strains for a ALL families are plotted if they lie at (or
close to) an angle, phi (with the loading axis). The angular tolerance
/ azimuthal window is defined by the user (window). For XRD, a window
of 10deg is often used.
Parameters:
-----------
phi: float
Angle at which to extract the lattice plane strains
window: float
Azimuthal tolerance (absolute window width) for lattice data
extraction
lat_ax: str
Axis to plot the lattice data on, either 'x' or 'y'
ax2: str
The data to plot against the lattice strain. Either 'stress',
'strain', 'elastic' (strain), 'back stress'
ax2_idx: int
Component/orientation of the specified second axis data to plot
e.g. ax2='stress', ax2_idx=1 => sigma_xx
ax2_mean: bool
Whether to take the mean (across all grains) of the data on the
second axis
phi_frame: int
The frame to define the grains that lie within the aimuthal
window (default = 0).
"""
for family in self.lattice_list:
try:
self.plot_lattice(family=family, lat_ax=lat_ax, ax2=ax2, ax2_idx=ax2_idx, phi=phi,
window=window, phi_frame=phi_frame, plot_select=False, mcolor=None, ax2_mean=ax2_mean)
except AssertionError:
print('Phi window too small for {} - no grains/planes selected'.format(family))
plt.legend(self.lattice_list)
def plot_back_lattice(self, family='200', phi=0, window=10,
back_ax='y', b_idx=1, ax2='stress', ax2_idx=1,
alpha=0.2, color='k', mcolor='r',
plot_select=True, phi_frame=0):
"""
Plot a component of back stress for a specified family of lattice
planes at a defined azimuthal angle. Plot against any other extracted
stress, strain, time etc. component.
Parameters:
-----------
family: str
The lattice plane family to assess
phi: float
Angle at which to extract the lattice plane strains
window: float
Azimuthal tolerance (absolute window width) for lattice data
extraction
back_ax: str
Axis to plot the lattice data on, either 'x' or 'y'
back_idx: int
Component of the back stress to plot (for fcc 0-11)
ax2: str
The data to plot against the lattice strain. Either 'stress',
'strain', 'elastic' (strain), 'back stress'
ax2_idx: int
Component/orientation of the specified second axis data to plot
e.g. ax2='stress', ax2_idx=1 => sigma_xx
alpha, color: float, str
Plotting options for the grain specific lines
mcolor:
The color of the grain average (across x and y) line
plot_select: bool
If plot_select is True the individual lattice planes will be
plotted in addition to the mean result, when False just the mean
response
phi_frame: int
The frame to define the grains that lie within the aimuthal
window (default = 0).
"""
back = self.extract_grains(data='back stress', idx=b_idx, grain_idx=None)
d = self.extract_grains(data=ax2, idx=ax2_idx, grain_idx=None)
d = d if ax2 in ['time', 'frame'] else np.nanmean(d, axis=0)
valid, select = self.extract_phi_idx(family=family, phi=phi,window=window, frame=phi_frame)
# back = back[valid[:,0], :, valid[:,1]].T
v = np.unique(valid[:,0])
back = back[v, :].T
x_ = back if back_ax == 'x' else d
y_ = back if back_ax != 'x' else d
assert np.sum(select) > 0, 'Phi window too small for {} - no grains/planes selected'.format(family)
if plot_select:
plt.plot(x_, y_, 'k', alpha=alpha)
ax2 = 'mean {} (idx={})'.format(ax2, ax2_idx) if ax2 not in ['time', 'frame'] else ax2
xlabel = ax2 if back_ax != 'x' else 'back stress'
ylabel = ax2 if back_ax == 'x' else 'back stress'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_active_slip(self, family='200', phi=0, window=10,
back_ax='y', b_active=2, ax2='stress', ax2_idx=1,
alpha=0.2, color='k', mcolor='r',
plot_select=True, phi_frame=0):
"""
Plot the number of active slip systems for every plane for a specified
family of lattice planes at a defined azimuthal angle (angle wrt y axis).
Plotting is a function of time, frame, stress strain etc. The
activation of a slip system is taken to occur when the absolute back
stress associated with that system (i.e. back stress component) rises
above a user define value
Parameters:
-----------
family: str
The lattice plane family to assess
phi: float
Angle at which to extract the lattice plane strains
window: float
Azimuthal tolerance (absolute window width) for lattice data
extraction
back_ax: str
Axis to plot the lattice data on, either 'x' or 'y'
b_active: int
Component of the back stress to plot (for fcc 0-11)
ax2: str
The data to plot against the lattice strain. Either 'stress',
'strain', 'elastic' (strain), 'back stress'
ax2_idx: int
Component/orientation of the specified second axis data to plot
e.g. ax2='stress', ax2_idx=1 => sigma_xx
alpha, color: float, str
Plotting options for the grain specific lines
mcolor:
The color of the grain average (across x and y) line
plot_select: bool
If plot_select is True the individual lattice planes will be
plotted in addition to the mean result, when False just the mean
response
phi_frame: int
The frame to define the grains that lie within the aimuthal
window (default = 0).
"""
back = self.extract_grains(data='back stress', idx=None, grain_idx=None)
back_bool = np.abs(back) > b_active
d = self.extract_grains(data=ax2, idx=ax2_idx, grain_idx=None)
d = d if ax2 in ['time', 'frame'] else np.nanmean(d, axis=0)
valid, select = self.extract_phi_idx(family=family, phi=phi,window=window, frame=phi_frame)
# back = back[valid[:,0], :, valid[:,1]].T
v = np.unique(valid[:,0])
back_active = np.sum(back_bool, axis=0)[v, :].T
x_ = back_active if back_ax == 'x' else d
y_ = back_active if back_ax != 'x' else d
assert np.sum(select) > 0, 'Phi window too small for {} - no grains/planes selected'.format(family)
if plot_select:
plt.plot(x_, y_, 'k', alpha=alpha)
x_ = np.nanmean(back_active, axis=1) if back_ax == 'x' else d
y_ = np.nanmean(back_active, axis=1) if back_ax != 'x' else d
plt.plot(x_, y_, label=family, color=mcolor)
ax2 = 'mean {} (idx={})'.format(ax2, ax2_idx) if ax2 not in ['time', 'frame'] else ax2
xlabel = ax2 if back_ax != 'x' else 'Active slip systems'
ylabel = ax2 if back_ax == 'x' else 'Active slip systems'
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def plot_active_slip_all(self, phi=0, window=10, back_ax='y', b_active = 2,
ax2='stress', ax2_idx=1, phi_frame=0):
"""
Plot the plane averaged number of active slip systems for all families
of lattice planes at a defined azimuthal angle (angle wrt y axis).
Plotting is a function of time, frame, stress strain etc. The
activation of a slip system is taken to occur when the absolute back
stress associated with that system (i.e. back stress component) rises
above a user define value
Parameters:
-----------
phi: float
Angle at which to extract the lattice plane strains
window: float
Azimuthal tolerance (absolute window width) for lattice data
extraction
back_ax: str
Axis to plot the lattice data on, either 'x' or 'y'
b_active: int
Component of the back stress to plot (for fcc 0-11)
ax2: str
The data to plot against the lattice strain. Either 'stress',
'strain', 'elastic' (strain), 'back stress'
ax2_idx: int
Component/orientation of the specified second axis data to plot
e.g. ax2='stress', ax2_idx=1 => sigma_xx
phi_frame: int
The frame to define the grains that lie within the aimuthal
window (default = 0).
"""
for family in self.lattice_list:
try:
self.plot_active_slip(family=family, back_ax=back_ax, ax2=ax2, ax2_idx=ax2_idx, phi=phi,
window=window, frame=phi_frame, plot_select=False, mcolor=None)
except AssertionError:
print('Phi window too small for {} - no grains/planes selected'.format(family))
plt.legend(self.lattice_list)
|
[
"numpy.moveaxis",
"numpy.sum",
"numpy.abs",
"numpy.ones",
"numpy.argsort",
"matplotlib.pyplot.contourf",
"numpy.arange",
"numpy.unique",
"numpy.nanmean",
"numpy.meshgrid",
"numpy.zeros_like",
"numpy.transpose",
"matplotlib.pyplot.colorbar",
"numpy.cumsum",
"numpy.max",
"numpy.linspace",
"numpy.size",
"numpy.nansum",
"csv.writer",
"matplotlib.pyplot.legend",
"numpy.min",
"numpy.argwhere",
"matplotlib.pyplot.ylabel",
"numpy.squeeze",
"matplotlib.pyplot.plot",
"numpy.logical_and",
"numpy.split",
"numpy.logical_or",
"matplotlib.pyplot.xlabel"
] |
[((2844, 2872), 'numpy.argsort', 'np.argsort', (['euc_dist'], {'axis': '(0)'}), '(euc_dist, axis=0)\n', (2854, 2872), True, 'import numpy as np\n'), ((6900, 6912), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6910, 6912), True, 'import matplotlib.pyplot as plt\n'), ((6921, 6934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (6931, 6934), True, 'import matplotlib.pyplot as plt\n'), ((6943, 6956), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (6953, 6956), True, 'import matplotlib.pyplot as plt\n'), ((10705, 10726), 'numpy.logical_or', 'np.logical_or', (['s0', 's1'], {}), '(s0, s1)\n', (10718, 10726), True, 'import numpy as np\n'), ((10749, 10768), 'numpy.argwhere', 'np.argwhere', (['select'], {}), '(select)\n', (10760, 10768), True, 'import numpy as np\n'), ((13337, 13350), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (13347, 13350), True, 'import matplotlib.pyplot as plt\n'), ((13359, 13372), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (13369, 13372), True, 'import matplotlib.pyplot as plt\n'), ((15506, 15525), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (15516, 15525), False, 'import csv\n'), ((15545, 15561), 'numpy.transpose', 'np.transpose', (['x_'], {}), '(x_)\n', (15557, 15561), True, 'import numpy as np\n'), ((15700, 15719), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (15710, 15719), False, 'import csv\n'), ((15739, 15755), 'numpy.transpose', 'np.transpose', (['y_'], {}), '(y_)\n', (15751, 15755), True, 'import numpy as np\n'), ((16666, 16679), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (16676, 16679), True, 'import matplotlib.pyplot as plt\n'), ((16688, 16701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (16698, 16701), True, 'import matplotlib.pyplot as plt\n'), ((19650, 19694), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'y_'], {'label': 'family', 'color': 'mcolor'}), '(x_, y_, label=family, color=mcolor)\n', (19658, 19694), True, 'import matplotlib.pyplot as plt\n'), ((19981, 19999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (19991, 19999), True, 'import matplotlib.pyplot as plt\n'), ((20008, 20026), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (20018, 20026), True, 'import matplotlib.pyplot as plt\n'), ((20849, 20895), 'numpy.moveaxis', 'np.moveaxis', (['self.lattice_strain[family]', '(1)', '(2)'], {}), '(self.lattice_strain[family], 1, 2)\n', (20860, 20895), True, 'import numpy as np\n'), ((20970, 21013), 'numpy.moveaxis', 'np.moveaxis', (['self.lattice_phi[family]', '(1)', '(2)'], {}), '(self.lattice_phi[family], 1, 2)\n', (20981, 21013), True, 'import numpy as np\n'), ((21138, 21169), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', 'phi_steps'], {}), '(-90, 90, phi_steps)\n', (21149, 21169), True, 'import numpy as np\n'), ((23080, 23101), 'numpy.meshgrid', 'np.meshgrid', (['d', 'bin_c'], {}), '(d, bin_c)\n', (23091, 23101), True, 'import numpy as np\n'), ((23110, 23140), 'matplotlib.pyplot.contourf', 'plt.contourf', (['time', 'phi', 'e_phi'], {}), '(time, phi, e_phi)\n', (23122, 23140), True, 'import matplotlib.pyplot as plt\n'), ((23149, 23163), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (23161, 23163), True, 'import matplotlib.pyplot as plt\n'), ((23269, 23284), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['ax2'], {}), '(ax2)\n', (23279, 23284), True, 'import matplotlib.pyplot as plt\n'), ((23293, 23331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""phi (reflected at 0$^o$)"""'], {}), "('phi (reflected at 0$^o$)')\n", (23303, 23331), True, 'import matplotlib.pyplot as plt\n'), ((25071, 25100), 'matplotlib.pyplot.legend', 'plt.legend', (['self.lattice_list'], {}), '(self.lattice_list)\n', (25081, 25100), True, 'import matplotlib.pyplot as plt\n'), ((27315, 27337), 'numpy.unique', 'np.unique', (['valid[:, 0]'], {}), '(valid[:, 0])\n', (27324, 27337), True, 'import numpy as np\n'), ((27892, 27910), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (27902, 27910), True, 'import matplotlib.pyplot as plt\n'), ((27919, 27937), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (27929, 27937), True, 'import matplotlib.pyplot as plt\n'), ((30419, 30441), 'numpy.unique', 'np.unique', (['valid[:, 0]'], {}), '(valid[:, 0])\n', (30428, 30441), True, 'import numpy as np\n'), ((30956, 31000), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'y_'], {'label': 'family', 'color': 'mcolor'}), '(x_, y_, label=family, color=mcolor)\n', (30964, 31000), True, 'import matplotlib.pyplot as plt\n'), ((31248, 31266), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (31258, 31266), True, 'import matplotlib.pyplot as plt\n'), ((31275, 31293), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (31285, 31293), True, 'import matplotlib.pyplot as plt\n'), ((33134, 33163), 'matplotlib.pyplot.legend', 'plt.legend', (['self.lattice_list'], {}), '(self.lattice_list)\n', (33144, 33163), True, 'import matplotlib.pyplot as plt\n'), ((1625, 1651), 'numpy.arange', 'np.arange', (['self.num_frames'], {}), '(self.num_frames)\n', (1634, 1651), True, 'import numpy as np\n'), ((2784, 2813), 'numpy.sum', 'np.sum', (['(rel_dims ** 2)'], {'axis': '(0)'}), '(rel_dims ** 2, axis=0)\n', (2790, 2813), True, 'import numpy as np\n'), ((5251, 5280), 'numpy.cumsum', 'np.cumsum', (['ex_ordered'], {'axis': '(0)'}), '(ex_ordered, axis=0)\n', (5260, 5280), True, 'import numpy as np\n'), ((12781, 12815), 'numpy.logical_and', 'np.logical_and', (['(t_z > r0)', '(t_z < r1)'], {}), '(t_z > r0, t_z < r1)\n', (12795, 12815), True, 'import numpy as np\n'), ((12834, 12875), 'numpy.zeros_like', 'np.zeros_like', (['rot[:, :, 2]'], {'dtype': '"""bool"""'}), "(rot[:, :, 2], dtype='bool')\n", (12847, 12875), True, 'import numpy as np\n'), ((12930, 12953), 'numpy.logical_and', 'np.logical_and', (['va', 'vaf'], {}), '(va, vaf)\n', (12944, 12953), True, 'import numpy as np\n'), ((15862, 15884), 'numpy.nanmean', 'np.nanmean', (['x_'], {'axis': '(0)'}), '(x_, axis=0)\n', (15872, 15884), True, 'import numpy as np\n'), ((15936, 15958), 'numpy.nanmean', 'np.nanmean', (['y_'], {'axis': '(0)'}), '(y_, axis=0)\n', (15946, 15958), True, 'import numpy as np\n'), ((16421, 16436), 'numpy.squeeze', 'np.squeeze', (['x__'], {}), '(x__)\n', (16431, 16436), True, 'import numpy as np\n'), ((16438, 16453), 'numpy.squeeze', 'np.squeeze', (['y__'], {}), '(y__)\n', (16448, 16453), True, 'import numpy as np\n'), ((16579, 16632), 'matplotlib.pyplot.plot', 'plt.plot', (['xm', 'ym'], {'color': 'mcolor', 'label': '"""Mean response"""'}), "(xm, ym, color=mcolor, label='Mean response')\n", (16587, 16632), True, 'import matplotlib.pyplot as plt\n'), ((16645, 16657), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16655, 16657), True, 'import matplotlib.pyplot as plt\n'), ((19332, 19346), 'numpy.sum', 'np.sum', (['select'], {}), '(select)\n', (19338, 19346), True, 'import numpy as np\n'), ((19461, 19495), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'y_', '"""k"""'], {'alpha': 'alpha'}), "(x_, y_, 'k', alpha=alpha)\n", (19469, 19495), True, 'import matplotlib.pyplot as plt\n'), ((19522, 19549), 'numpy.nanmean', 'np.nanmean', (['lattice'], {'axis': '(1)'}), '(lattice, axis=1)\n', (19532, 19549), True, 'import numpy as np\n'), ((19588, 19615), 'numpy.nanmean', 'np.nanmean', (['lattice'], {'axis': '(1)'}), '(lattice, axis=1)\n', (19598, 19615), True, 'import numpy as np\n'), ((21195, 21236), 'numpy.ones', 'np.ones', (['(phi_steps - 1, self.num_frames)'], {}), '((phi_steps - 1, self.num_frames))\n', (21202, 21236), True, 'import numpy as np\n'), ((21307, 21393), 'numpy.logical_and', 'np.logical_and', (['(arr2 < bins[idx + 1] * np.pi / 180)', '(arr2 > bins[idx] * np.pi / 180)'], {}), '(arr2 < bins[idx + 1] * np.pi / 180, arr2 > bins[idx] * np.pi /\n 180)\n', (21321, 21393), True, 'import numpy as np\n'), ((23029, 23050), 'numpy.nanmean', 'np.nanmean', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (23039, 23050), True, 'import numpy as np\n'), ((27111, 27132), 'numpy.nanmean', 'np.nanmean', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (27121, 27132), True, 'import numpy as np\n'), ((27484, 27498), 'numpy.sum', 'np.sum', (['select'], {}), '(select)\n', (27490, 27498), True, 'import numpy as np\n'), ((27613, 27647), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'y_', '"""k"""'], {'alpha': 'alpha'}), "(x_, y_, 'k', alpha=alpha)\n", (27621, 27647), True, 'import matplotlib.pyplot as plt\n'), ((30064, 30076), 'numpy.abs', 'np.abs', (['back'], {}), '(back)\n', (30070, 30076), True, 'import numpy as np\n'), ((30215, 30236), 'numpy.nanmean', 'np.nanmean', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (30225, 30236), True, 'import numpy as np\n'), ((30630, 30644), 'numpy.sum', 'np.sum', (['select'], {}), '(select)\n', (30636, 30644), True, 'import numpy as np\n'), ((30759, 30793), 'matplotlib.pyplot.plot', 'plt.plot', (['x_', 'y_', '"""k"""'], {'alpha': 'alpha'}), "(x_, y_, 'k', alpha=alpha)\n", (30767, 30793), True, 'import matplotlib.pyplot as plt\n'), ((30820, 30851), 'numpy.nanmean', 'np.nanmean', (['back_active'], {'axis': '(1)'}), '(back_active, axis=1)\n', (30830, 30851), True, 'import numpy as np\n'), ((30890, 30921), 'numpy.nanmean', 'np.nanmean', (['back_active'], {'axis': '(1)'}), '(back_active, axis=1)\n', (30900, 30921), True, 'import numpy as np\n'), ((10589, 10599), 'numpy.min', 'np.min', (['p0'], {}), '(p0)\n', (10595, 10599), True, 'import numpy as np\n'), ((10608, 10618), 'numpy.max', 'np.max', (['p1'], {}), '(p1)\n', (10614, 10618), True, 'import numpy as np\n'), ((10656, 10666), 'numpy.min', 'np.min', (['p0'], {}), '(p0)\n', (10662, 10666), True, 'import numpy as np\n'), ((10676, 10686), 'numpy.max', 'np.max', (['p1'], {}), '(p1)\n', (10682, 10686), True, 'import numpy as np\n'), ((13147, 13174), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(1001)'], {}), '(0, np.pi, 1001)\n', (13158, 13174), True, 'import numpy as np\n'), ((13218, 13245), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', '(1001)'], {}), '(0, np.pi, 1001)\n', (13229, 13245), True, 'import numpy as np\n'), ((18964, 18985), 'numpy.nanmean', 'np.nanmean', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (18974, 18985), True, 'import numpy as np\n'), ((19055, 19076), 'numpy.nanmean', 'np.nanmean', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (19065, 19076), True, 'import numpy as np\n'), ((30463, 30488), 'numpy.sum', 'np.sum', (['back_bool'], {'axis': '(0)'}), '(back_bool, axis=0)\n', (30469, 30488), True, 'import numpy as np\n'), ((5315, 5349), 'numpy.arange', 'np.arange', (['(1)', '(ex_csum.shape[0] + 1)'], {}), '(1, ex_csum.shape[0] + 1)\n', (5324, 5349), True, 'import numpy as np\n'), ((6838, 6857), 'numpy.size', 'np.size', (['ex_ordered'], {}), '(ex_ordered)\n', (6845, 6857), True, 'import numpy as np\n'), ((21436, 21461), 'numpy.sum', 'np.sum', (['(arr1 * va)'], {'axis': '(0)'}), '(arr1 * va, axis=0)\n', (21442, 21461), True, 'import numpy as np\n'), ((21464, 21485), 'numpy.nansum', 'np.nansum', (['va'], {'axis': '(0)'}), '(va, axis=0)\n', (21473, 21485), True, 'import numpy as np\n'), ((4680, 4713), 'numpy.split', 'np.split', (['ex', 'ex.shape[1]'], {'axis': '(1)'}), '(ex, ex.shape[1], axis=1)\n', (4688, 4713), True, 'import numpy as np\n'), ((4758, 4797), 'numpy.split', 'np.split', (['order', 'order.shape[1]'], {'axis': '(1)'}), '(order, order.shape[1], axis=1)\n', (4766, 4797), True, 'import numpy as np\n')]
|
from __future__ import print_function, absolute_import, division
import numpy as np
from poseutils.logger import log
from poseutils.datasets.unprocessed.Dataset import Dataset
class TDPWDataset(Dataset):
"""Dataset class for handling 3DPW dataset
:param path: path to npz file
:type path: str
"""
def __init__(self, path):
super(TDPWDataset, self).__init__('3dpw')
self.load_data(path)
def load_data(self, path):
data = np.load(path, allow_pickle=True, encoding='latin1')['data'].item()
data_train = data['train']
data_valid = data['test']
self._data_train['2d'] = data_train["combined_2d"]
self._data_train['3d'] = data_train["combined_3d_cam"]*1000
self._data_valid['2d'] = data_valid["combined_2d"]
self._data_valid['3d'] = data_valid["combined_3d_cam"]*1000
log("Loaded raw data")
|
[
"numpy.load",
"poseutils.logger.log"
] |
[((885, 907), 'poseutils.logger.log', 'log', (['"""Loaded raw data"""'], {}), "('Loaded raw data')\n", (888, 907), False, 'from poseutils.logger import log\n'), ((483, 534), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "(path, allow_pickle=True, encoding='latin1')\n", (490, 534), True, 'import numpy as np\n')]
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GS GPS parameters."""
from makani.config import mconfig
from makani.control import system_types
import numpy as np
@mconfig.Config(deps={
'gs_model': 'base_station.gs_model',
'test_site': 'common.test_site',
})
def MakeParams(params):
"""Make ground station gps parameters."""
if params['gs_model'] == system_types.kGroundStationModelTopHat:
gps_primary_antenna_dir = [0.0, 0.0, -1.0]
gps_primary_pos = [1.418, -1.657, -2.417]
# TopHat doesn't actually have a secondary gps.
gps_secondary_antenna_dir = gps_primary_antenna_dir
gps_secondary_pos = gps_primary_pos
# Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the perch frame. Note: The TopHat does not have a
# GPS compass, but this value is set for historical consistency.
gps_compass_to_perch_azi = -2.440
elif params['gs_model'] == system_types.kGroundStationModelGSv1:
gps_primary_antenna_dir = [0.0, 0.0, -1.0]
# Position measured on 2015-06-15.
gps_primary_pos = [0.0, 0.0, -2.94]
# GSv1 doesn't actually have a secondary gps.
gps_secondary_antenna_dir = gps_primary_antenna_dir
gps_secondary_pos = gps_primary_pos
# Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the perch frame
gps_compass_to_perch_azi = -2.440
elif params['gs_model'] == system_types.kGroundStationModelGSv2:
gps_primary_antenna_dir = [0.0, 0.0, -1.0]
gps_secondary_antenna_dir = [0.0, 0.0, -1.0]
if params['test_site'] == system_types.kTestSiteParkerRanch:
# See b/137283974 for details.
gps_primary_pos = [-0.002, 0.011, -6.7]
gps_secondary_pos = [-2.450, -0.428, -6.827]
elif params['test_site'] == system_types.kTestSiteNorway:
# See b/137660975 for details.
gps_primary_pos = [-0.002, 0.011, -6.7]
gps_secondary_pos = [-2.450, -0.428, -6.757]
else:
assert False, 'Unsupported test site.'
# Angle [rad] from the GPS compass baseline to the zero-azimuth
# reference of the platform frame. See b/118710931.
gps_compass_to_perch_azi = np.deg2rad(169.84)
else:
assert False, 'Unsupported ground station model.'
return {
# Position [m] of the GS GPS antenna in the platform frame.
# NOTE: The direction of the antennae is currently not used.
'primary_antenna_p': {
'antenna_dir': gps_primary_antenna_dir,
'pos': gps_primary_pos,
},
'secondary_antenna_p': {
'antenna_dir': gps_secondary_antenna_dir,
'pos': gps_secondary_pos,
},
# Calibration for the ground station compass ([#], [rad], [#]).
# The bias is used to account for the angle between the perch
# frame and the NovAtel differential GPS receiver.
# TODO: Remove this parameter once the computation of
# compass heading from the primary and secondary antennae is implemented.
'heading_cal': {
'scale': 1.0, 'bias': gps_compass_to_perch_azi, 'bias_count': 0}
}
|
[
"makani.config.mconfig.Config",
"numpy.deg2rad"
] |
[((711, 806), 'makani.config.mconfig.Config', 'mconfig.Config', ([], {'deps': "{'gs_model': 'base_station.gs_model', 'test_site': 'common.test_site'}"}), "(deps={'gs_model': 'base_station.gs_model', 'test_site':\n 'common.test_site'})\n", (725, 806), False, 'from makani.config import mconfig\n'), ((2688, 2706), 'numpy.deg2rad', 'np.deg2rad', (['(169.84)'], {}), '(169.84)\n', (2698, 2706), True, 'import numpy as np\n')]
|
import os
import csv
import sys
import time
import json
import h5py
import pickle as pkl
import logging
import argparse
import random
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm, trange
from nglib.common import utils
def get_arguments(argv):
parser = argparse.ArgumentParser(description='more intrinsic evaluations')
parser.add_argument('config_file', metavar='CONFIG_FILE',
help='ng config file')
parser.add_argument('input_dir', metavar='INPUT_DIR',
help='input dir')
parser.add_argument('prefix', metavar='PREFIX',
help='output file prefix')
parser.add_argument('output_dir', metavar='OUTPUT_DIR',
help='output dir')
parser.add_argument("--n_choices", type=int, default=8,
help="number of choices")
parser.add_argument("--seed", type=int, default=135,
help="random seed for initialization")
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='show info messages')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='show debug messages')
args = parser.parse_args(argv)
return args
def set_seed(gpu, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# if gpu != -1:
# torch.cuda.manual_seed_all(seed)
def _get_indices_by_rtypes(ng_edges, rtype_idxs):
n_edges = ng_edges.shape[1]
ret_idxs = []
for i in range(n_edges):
e = tuple(ng_edges[:3, i].astype('int64'))
if e[1] in rtype_idxs:
ret_idxs.append(i)
return ret_idxs
def _get_tail_node_repr_by_eidx(
gid, cand_idx, ng_edges, bert_nid2rows, bert_inputs, bert_target_idxs):
_edge = ng_edges[:, cand_idx] # outputs
_nid = int(_edge[2])
_row = bert_nid2rows[_nid]
_row = _row[_row != -1]
assert _row.shape == (1, ) # for pp links
binputs = bert_inputs[:, _row, :].squeeze()
target_col = bert_target_idxs[_row]
# choice format
ret = {
'gid': gid,
'eidx': cand_idx, # we need this because we might need to remove the edge
'edge': _edge,
'nid': _nid,
'bert_inputs': binputs,
'target_col': target_col
}
return ret
def sample_node_multiple_choice(ng_edges,
bert_inputs, bert_target_idxs, bert_nid2rows,
interested_rel_idxs, fr, gid):
interested_eidxs = _get_indices_by_rtypes(ng_edges, interested_rel_idxs)
if len(interested_eidxs) == 0:
return None
# sample a target node which is the tail node of the selected edge
eidx = interested_eidxs[random.randint(0, len(interested_eidxs)-1)]
answer = _get_tail_node_repr_by_eidx(
gid, eidx, ng_edges, bert_nid2rows, bert_inputs, bert_target_idxs
)
target_e = ng_edges[:3, eidx].astype('int64')
n_nodes = bert_nid2rows.shape[0]
choices = [answer]
gid_pool = [k for k in fr.keys()]
while len(choices) < args.n_choices:
# random a graph
rgid = gid_pool[random.randint(0, len(gid_pool)-1)]
rgid = int(rgid.split('_')[1])
if rgid == gid:
continue
key = 'graph_{}'.format(rgid)
r_binputs = fr[key]['bert_inputs'][:]
r_target_idxs = fr[key]['bert_target_idxs'][:]
r_nid2rows = fr[key]['bert_nid2rows'][:]
r_ng_edges = fr[key]['ng_edges'][:]
# sample a predicate node using the same manner as the answer
interested_eidxs = _get_indices_by_rtypes(r_ng_edges, interested_rel_idxs)
if len(interested_eidxs) == 0:
continue
eidx = interested_eidxs[random.randint(0, len(interested_eidxs)-1)]
c = _get_tail_node_repr_by_eidx(
rgid, eidx, r_ng_edges, r_nid2rows, r_binputs, r_target_idxs
)
choices.append(c)
# shuffle choices
choice_idxs = list(range(args.n_choices))
random.shuffle(choice_idxs)
correct = choice_idxs.index(0)
choices = [choices[cidx] for cidx in choice_idxs]
return (correct, choices)
def sample_node_multiple_choice_v2(
ng_edges,
bert_inputs, bert_target_idxs, bert_nid2rows,
interested_rel_idxs, fr, gid):
interested_eidxs = _get_indices_by_rtypes(ng_edges, interested_rel_idxs)
if len(interested_eidxs) == 0:
return None
pos_edges = set()
related_edges = ng_edges[:3, interested_eidxs].astype('int64')
for i in range(related_edges.shape[1]):
e = tuple(related_edges[:, i].flatten())
pos_edges.add(e)
eidx = interested_eidxs[random.randint(0, len(interested_eidxs)-1)]
new_edges = np.concatenate((ng_edges[:, :eidx], ng_edges[:, eidx+1:]), axis=1)
target_e = tuple(ng_edges[:3, eidx].astype('int64'))
n_nodes = bert_nid2rows.shape[0]
choices = [target_e]
while len(choices) < args.n_choices:
# try in-doc sampling
# random a node
r_nid = random.randint(0, n_nodes-1) # we don't separate entity or predicate
r_e = (target_e[0], target_e[1], r_nid)
if r_e in pos_edges:
continue
choices.append(r_e)
# shuffle choices
choice_idxs = list(range(args.n_choices))
random.shuffle(choice_idxs)
correct = choice_idxs.index(0)
choices = [choices[cidx] for cidx in choice_idxs]
return (new_edges, correct, choices)
def sample_ep_questions(ng_edges, rtype2idx):
# join ep_edges by src node
src_nodes = {}
ep_ridxs = {rtype2idx['s'], rtype2idx['o'], rtype2idx['prep']}
n_edges = ng_edges.shape[1]
all_pos_edges = set()
entity_nids = set()
for i in range(n_edges):
e = tuple(ng_edges[:3, i].astype('int64'))
all_pos_edges.add(e)
if e[1] in ep_ridxs:
if e[0] not in src_nodes:
src_nodes[e[0]] = []
src_nodes[e[0]].append((i, e))
entity_nids.add(int(e[2]))
if len(entity_nids) < args.n_choices:
return None, None
# Task1, random a event node with 3 ep edges, predict edge types
candidate_sources = {src: es for src, es in src_nodes.items() if len(es) >= 3}
if len(candidate_sources) == 0:
return None, None
keys = list(candidate_sources.keys())
src_nid = keys[random.randint(0, len(candidate_sources)-1)]
edges = candidate_sources[src_nid]
q_link = (src_nid, edges) # question
# Task2, random one edge, predict one entity
entity_nid_list = list(entity_nids)
r_eidx, r_edge = edges[random.randint(0, len(edges)-1)]
answer = r_edge[2]
choices = [answer]
while len(choices) < args.n_choices:
r_nid = entity_nid_list[random.randint(0, len(entity_nid_list)-1)]
r_tri = (r_edge[0], r_edge[1], r_nid)
if r_tri in all_pos_edges:
continue
choices.append(r_nid)
idxs = list(range(len(choices)))
random.shuffle(idxs)
correct = idxs.index(0)
choices = [choices[i] for i in idxs]
q_entity = (r_eidx, r_edge, correct, choices) # question
return q_link, q_entity
def main():
config = json.load(open(args.config_file))
assert config["config_target"] == "narrative_graph"
rtype2idx = config['rtype2idx']
if config['no_entity']:
ep_rtype_rev = {}
ent_pred_ridxs = set()
else:
ep_rtype_rev = {rtype2idx[v]: rtype2idx[k] for k, v in
config['entity_predicate_rtypes'].items()}
ent_pred_ridxs = set(ep_rtype_rev.keys())
n_rtypes = len(rtype2idx)
pred_pred_ridxs = set(range(n_rtypes)) - ent_pred_ridxs
disc_pred_pred_ridxs = pred_pred_ridxs - {rtype2idx['next'], rtype2idx['cnext']}
t2 = time.time()
q_counts = {
'pp_coref_next': 0,
'pp_next': 0,
'pp_discourse_next': {},
'ep_link': 0,
'ep_entity': {}
}
count_gids = 0
fs = sorted([f for f in os.listdir(args.input_dir) if f.endswith('.h5')])
for f in fs:
fpath = os.path.join(args.input_dir, f)
logger.info('processing {}...'.format(fpath))
fr = h5py.File(fpath, 'r')
questions = OrderedDict()
for gn in tqdm(fr.keys()):
questions[gn] = {}
gid = int(gn.split('_')[-1])
bert_inputs = fr[gn]['bert_inputs'][:]
bert_target_idxs = fr[gn]['bert_target_idxs'][:]
bert_nid2rows = fr[gn]['bert_nid2rows'][:]
ng_edges = fr[gn]['ng_edges'][:]
n_nodes = bert_nid2rows.shape[0]
# # sample PP_COREF_NEXT task
q = sample_node_multiple_choice_v2(
ng_edges, bert_inputs, bert_target_idxs, bert_nid2rows,
{rtype2idx['cnext']}, fr, gid)
questions[gn]['pp_coref_next'] = q
if q is not None:
q_counts['pp_coref_next'] += 1
# sample PP_NEXT task
q = sample_node_multiple_choice_v2(
ng_edges, bert_inputs, bert_target_idxs, bert_nid2rows,
{rtype2idx['next']}, fr, gid)
questions[gn]['pp_next'] = q
if q is not None:
q_counts['pp_next'] += 1
# sample PP_DISCOURSE_NEXT task
q = sample_node_multiple_choice_v2(
ng_edges, bert_inputs, bert_target_idxs, bert_nid2rows,
disc_pred_pred_ridxs, fr, gid)
questions[gn]['pp_discourse_next'] = q
if q is not None: # count by rtypes
ans = q[2][q[1]]
rtype = ans[1]
# ans = q[1][q[0]]
# rtype = int(ans['edge'][1])
if rtype not in q_counts['pp_discourse_next']:
q_counts['pp_discourse_next'][rtype] = 0
q_counts['pp_discourse_next'][rtype] += 1
# sample PP_DISCOURSE_LINK_TYPE task
# reuse the above links
# sample PP_DISCOURSE_TRIPLET task
# evaluate on the sampled test set
if not config['no_entity']:
# sample EP_LINK_TYPE
q_link, q_entity = sample_ep_questions(ng_edges, rtype2idx)
questions[gn]['ep_link'] = q_link
if q_link is not None:
q_counts['ep_link'] += 1
# # sample EP_NODE task
questions[gn]['ep_entity'] = q_entity
if q_entity is not None:
rtype = int(q_entity[1][1])
if rtype not in q_counts['ep_entity']:
q_counts['ep_entity'][rtype] = 0
q_counts['ep_entity'][rtype] += 1
count_gids += 1
fr.close()
# dump questions for a file
fn = '.'.join(f.split('.')[:-1])
fpath = os.path.join(args.output_dir, 'q_{}.pkl'.format(fn))
logger.info('dumping {}...'.format(fpath))
pkl.dump(questions, open(fpath, 'wb'))
logger.info('#graphs = {}'.format(count_gids))
logger.info('q_counts = {}'.format(q_counts))
if __name__ == "__main__":
args = utils.bin_config(get_arguments)
logger = utils.get_root_logger(args)
main()
|
[
"nglib.common.utils.get_root_logger",
"nglib.common.utils.bin_config",
"h5py.File",
"numpy.random.seed",
"argparse.ArgumentParser",
"random.randint",
"torch.manual_seed",
"random.shuffle",
"time.time",
"random.seed",
"collections.OrderedDict",
"os.path.join",
"os.listdir",
"numpy.concatenate"
] |
[((305, 370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""more intrinsic evaluations"""'}), "(description='more intrinsic evaluations')\n", (328, 370), False, 'import argparse\n'), ((1360, 1377), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1371, 1377), False, 'import random\n'), ((1382, 1402), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1396, 1402), True, 'import numpy as np\n'), ((1407, 1430), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1424, 1430), False, 'import torch\n'), ((4091, 4118), 'random.shuffle', 'random.shuffle', (['choice_idxs'], {}), '(choice_idxs)\n', (4105, 4118), False, 'import random\n'), ((4818, 4886), 'numpy.concatenate', 'np.concatenate', (['(ng_edges[:, :eidx], ng_edges[:, eidx + 1:])'], {'axis': '(1)'}), '((ng_edges[:, :eidx], ng_edges[:, eidx + 1:]), axis=1)\n', (4832, 4886), True, 'import numpy as np\n'), ((5385, 5412), 'random.shuffle', 'random.shuffle', (['choice_idxs'], {}), '(choice_idxs)\n', (5399, 5412), False, 'import random\n'), ((7045, 7065), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (7059, 7065), False, 'import random\n'), ((7844, 7855), 'time.time', 'time.time', ([], {}), '()\n', (7853, 7855), False, 'import time\n'), ((11230, 11261), 'nglib.common.utils.bin_config', 'utils.bin_config', (['get_arguments'], {}), '(get_arguments)\n', (11246, 11261), False, 'from nglib.common import utils\n'), ((11275, 11302), 'nglib.common.utils.get_root_logger', 'utils.get_root_logger', (['args'], {}), '(args)\n', (11296, 11302), False, 'from nglib.common import utils\n'), ((5117, 5147), 'random.randint', 'random.randint', (['(0)', '(n_nodes - 1)'], {}), '(0, n_nodes - 1)\n', (5131, 5147), False, 'import random\n'), ((8138, 8169), 'os.path.join', 'os.path.join', (['args.input_dir', 'f'], {}), '(args.input_dir, f)\n', (8150, 8169), False, 'import os\n'), ((8238, 8259), 'h5py.File', 'h5py.File', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (8247, 8259), False, 'import h5py\n'), ((8280, 8293), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8291, 8293), False, 'from collections import OrderedDict\n'), ((8055, 8081), 'os.listdir', 'os.listdir', (['args.input_dir'], {}), '(args.input_dir)\n', (8065, 8081), False, 'import os\n')]
|
# RUN: %PYTHON %s
import numpy as np
from shark.shark_importer import SharkImporter
import pytest
model_path = "https://tfhub.dev/tensorflow/lite-model/albert_lite_base/squadv1/1?lite-format=tflite"
# Inputs modified to be useful albert inputs.
def generate_inputs(input_details):
for input in input_details:
print(str(input["shape"]), input["dtype"].__name__)
args = []
args.append(
np.random.randint(
low=0,
high=256,
size=input_details[0]["shape"],
dtype=input_details[0]["dtype"],
)
)
args.append(
np.ones(
shape=input_details[1]["shape"], dtype=input_details[1]["dtype"]
)
)
args.append(
np.zeros(
shape=input_details[2]["shape"], dtype=input_details[2]["dtype"]
)
)
return args
if __name__ == "__main__":
my_shark_importer = SharkImporter(
model_path=model_path,
model_type="tflite",
model_source_hub="tfhub",
device="cpu",
dynamic=False,
jit_trace=True,
)
# Case1: Use default inputs
my_shark_importer.compile()
shark_results = my_shark_importer.forward()
# Case2: Use manually set inputs
input_details, output_details = my_shark_importer.get_model_details()
inputs = generate_inputs(input_details) # device_inputs
my_shark_importer.compile(inputs)
shark_results = my_shark_importer.forward(inputs)
# print(shark_results)
|
[
"numpy.random.randint",
"numpy.zeros",
"numpy.ones",
"shark.shark_importer.SharkImporter"
] |
[((905, 1038), 'shark.shark_importer.SharkImporter', 'SharkImporter', ([], {'model_path': 'model_path', 'model_type': '"""tflite"""', 'model_source_hub': '"""tfhub"""', 'device': '"""cpu"""', 'dynamic': '(False)', 'jit_trace': '(True)'}), "(model_path=model_path, model_type='tflite', model_source_hub=\n 'tfhub', device='cpu', dynamic=False, jit_trace=True)\n", (918, 1038), False, 'from shark.shark_importer import SharkImporter\n'), ((416, 520), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(256)', 'size': "input_details[0]['shape']", 'dtype': "input_details[0]['dtype']"}), "(low=0, high=256, size=input_details[0]['shape'], dtype=\n input_details[0]['dtype'])\n", (433, 520), True, 'import numpy as np\n'), ((606, 679), 'numpy.ones', 'np.ones', ([], {'shape': "input_details[1]['shape']", 'dtype': "input_details[1]['dtype']"}), "(shape=input_details[1]['shape'], dtype=input_details[1]['dtype'])\n", (613, 679), True, 'import numpy as np\n'), ((733, 807), 'numpy.zeros', 'np.zeros', ([], {'shape': "input_details[2]['shape']", 'dtype': "input_details[2]['dtype']"}), "(shape=input_details[2]['shape'], dtype=input_details[2]['dtype'])\n", (741, 807), True, 'import numpy as np\n')]
|
from typing import List, Tuple, Optional
import numpy as np
import os
import torch
from torch import nn
from environments.environment_abstract import Environment, State
from collections import OrderedDict
import re
from random import shuffle
from torch import Tensor
import torch.optim as optim
from torch.optim.optimizer import Optimizer
from torch.multiprocessing import Queue, get_context
import time
# training
def states_nnet_to_pytorch_input(states_nnet: List[np.ndarray], device) -> List[Tensor]:
states_nnet_tensors = []
for tensor_np in states_nnet:
tensor = torch.tensor(tensor_np, device=device)
states_nnet_tensors.append(tensor)
return states_nnet_tensors
def make_batches(states_nnet: List[np.ndarray], outputs: np.ndarray,
batch_size: int) -> List[Tuple[List[np.ndarray], np.ndarray]]:
num_examples = outputs.shape[0]
rand_idxs = np.random.choice(num_examples, num_examples, replace=False)
outputs = outputs.astype(np.float32)
start_idx = 0
batches = []
while (start_idx + batch_size) <= num_examples:
end_idx = start_idx + batch_size
idxs = rand_idxs[start_idx:end_idx]
inputs_batch = [x[idxs] for x in states_nnet]
outputs_batch = outputs[idxs]
batches.append((inputs_batch, outputs_batch))
start_idx = end_idx
return batches
def train_nnet(nnet: nn.Module, states_nnet: List[np.ndarray], outputs: np.ndarray, device: torch.device,
batch_size: int, num_itrs: int, train_itr: int, lr: float, lr_d: float, display: bool = True) -> float:
# optimization
display_itrs = 100
criterion = nn.MSELoss()
optimizer: Optimizer = optim.Adam(nnet.parameters(), lr=lr)
# initialize status tracking
start_time = time.time()
# train network
batches: List[Tuple[List, np.ndarray]] = make_batches(states_nnet, outputs, batch_size)
nnet.train()
max_itrs: int = train_itr + num_itrs
last_loss: float = np.inf
batch_idx: int = 0
while train_itr < max_itrs:
# zero the parameter gradients
optimizer.zero_grad()
lr_itr: float = lr * (lr_d ** train_itr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_itr
# get data
inputs_batch, targets_batch_np = batches[batch_idx]
targets_batch_np = targets_batch_np.astype(np.float32)
# send data to device
states_batch: List[Tensor] = states_nnet_to_pytorch_input(inputs_batch, device)
targets_batch: Tensor = torch.tensor(targets_batch_np, device=device)
# forward
nnet_outputs_batch: Tensor = nnet(*states_batch)
# cost
nnet_cost_to_go = nnet_outputs_batch[:, 0]
target_cost_to_go = targets_batch[:, 0]
loss = criterion(nnet_cost_to_go, target_cost_to_go)
# backwards
loss.backward()
# step
optimizer.step()
last_loss = loss.item()
# display progress
if (train_itr % display_itrs == 0) and display:
print("Itr: %i, lr: %.2E, loss: %.2f, targ_ctg: %.2f, nnet_ctg: %.2f, "
"Time: %.2f" % (
train_itr, lr_itr, loss.item(), target_cost_to_go.mean().item(), nnet_cost_to_go.mean().item(),
time.time() - start_time))
start_time = time.time()
train_itr = train_itr + 1
batch_idx += 1
if batch_idx >= len(batches):
shuffle(batches)
batch_idx = 0
return last_loss
# pytorch device
def get_device() -> Tuple[torch.device, List[int], bool]:
device: torch.device = torch.device("cpu")
devices: List[int] = get_available_gpu_nums()
on_gpu: bool = False
if devices and torch.cuda.is_available():
device = torch.device("cuda:%i" % 0)
on_gpu = True
return device, devices, on_gpu
# loading nnet
def load_nnet(model_file: str, nnet: nn.Module, device: torch.device = None) -> nn.Module:
# get state dict
if device is None:
state_dict = torch.load(model_file)
else:
state_dict = torch.load(model_file, map_location=device)
# remove module prefix
new_state_dict = OrderedDict()
for k, v in state_dict.items():
k = re.sub('^module\.', '', k)
new_state_dict[k] = v
# set state dict
nnet.load_state_dict(new_state_dict)
nnet.eval()
return nnet
# heuristic
def get_heuristic_fn(nnet: nn.Module, device: torch.device, env: Environment, clip_zero: bool = False,
batch_size: Optional[int] = None):
nnet.eval()
def heuristic_fn(states: List, is_nnet_format: bool = False) -> np.ndarray:
cost_to_go: np.ndarray = np.zeros(0)
if not is_nnet_format:
num_states: int = len(states)
else:
num_states: int = states[0].shape[0]
batch_size_inst: int = num_states
if batch_size is not None:
batch_size_inst = batch_size
start_idx: int = 0
while start_idx < num_states:
# get batch
end_idx: int = min(start_idx + batch_size_inst, num_states)
# convert to nnet input
if not is_nnet_format:
states_batch: List = states[start_idx:end_idx]
states_nnet_batch: List[np.ndarray] = env.state_to_nnet_input(states_batch)
else:
states_nnet_batch = [x[start_idx:end_idx] for x in states]
# get nnet output
states_nnet_batch_tensors = states_nnet_to_pytorch_input(states_nnet_batch, device)
cost_to_go_batch: np.ndarray = nnet(*states_nnet_batch_tensors).cpu().data.numpy()
cost_to_go: np.ndarray = np.concatenate((cost_to_go, cost_to_go_batch[:, 0]), axis=0)
start_idx: int = end_idx
assert (cost_to_go.shape[0] == num_states)
if clip_zero:
cost_to_go = np.maximum(cost_to_go, 0.0)
return cost_to_go
return heuristic_fn
def get_available_gpu_nums() -> List[int]:
devices: Optional[str] = os.environ.get('CUDA_VISIBLE_DEVICES')
return [int(x) for x in devices.split(',')] if devices else []
def load_heuristic_fn(nnet_dir: str, device: torch.device, on_gpu: bool, nnet: nn.Module, env: Environment,
clip_zero: bool = False, gpu_num: int = -1, batch_size: Optional[int] = None):
if (gpu_num >= 0) and on_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_num)
model_file = "%s/model_state_dict.pt" % nnet_dir
nnet = load_nnet(model_file, nnet, device=device)
nnet.eval()
nnet.to(device)
if on_gpu:
nnet = nn.DataParallel(nnet)
heuristic_fn = get_heuristic_fn(nnet, device, env, clip_zero=clip_zero, batch_size=batch_size)
return heuristic_fn
def heuristic_fn_par(states: List[State], env: Environment, heur_fn_i_q, heur_fn_o_qs):
num_parallel: int = len(heur_fn_o_qs)
# Write data
states_nnet: List[np.ndarray] = env.state_to_nnet_input(states)
parallel_nums = range(min(num_parallel, len(states)))
split_idxs = np.array_split(np.arange(len(states)), len(parallel_nums))
for idx in parallel_nums:
states_nnet_idx = [x[split_idxs[idx]] for x in states_nnet]
heur_fn_i_q.put((idx, states_nnet_idx))
# Check until all data is obtaied
results = [None]*len(parallel_nums)
for idx in parallel_nums:
results[idx] = heur_fn_o_qs[idx].get()
results = np.concatenate(results, axis=0)
return results
# parallel training
def heuristic_fn_queue(heuristic_fn_input_queue, heuristic_fn_output_queue, proc_id, env: Environment):
def heuristic_fn(states):
states_nnet = env.state_to_nnet_input(states)
heuristic_fn_input_queue.put((proc_id, states_nnet))
heuristics = heuristic_fn_output_queue.get()
return heuristics
return heuristic_fn
def heuristic_fn_runner(heuristic_fn_input_queue: Queue, heuristic_fn_output_queues, nnet_dir: str,
device, on_gpu: bool, gpu_num: int, env: Environment, all_zeros: bool,
clip_zero: bool, batch_size: Optional[int]):
heuristic_fn = None
if not all_zeros:
heuristic_fn = load_heuristic_fn(nnet_dir, device, on_gpu, env.get_nnet_model(), env, gpu_num=gpu_num,
clip_zero=clip_zero, batch_size=batch_size)
while True:
proc_id, states_nnet = heuristic_fn_input_queue.get()
if proc_id is None:
break
if all_zeros:
heuristics = np.zeros(states_nnet[0].shape[0], dtype=np.float)
else:
heuristics = heuristic_fn(states_nnet, is_nnet_format=True)
heuristic_fn_output_queues[proc_id].put(heuristics)
return heuristic_fn
def start_heur_fn_runners(num_procs: int, nnet_dir: str, device, on_gpu: bool, env: Environment,
all_zeros: bool = False, clip_zero: bool = False, batch_size: Optional[int] = None):
ctx = get_context("spawn")
heuristic_fn_input_queue: ctx.Queue = ctx.Queue()
heuristic_fn_output_queues: List[ctx.Queue] = []
for _ in range(num_procs):
heuristic_fn_output_queue: ctx.Queue = ctx.Queue(1)
heuristic_fn_output_queues.append(heuristic_fn_output_queue)
# initialize heuristic procs
gpu_nums = get_available_gpu_nums() or [-1]
heur_procs: List[ctx.Process] = []
for gpu_num in gpu_nums:
heur_proc = ctx.Process(target=heuristic_fn_runner,
args=(heuristic_fn_input_queue, heuristic_fn_output_queues,
nnet_dir, device, on_gpu, gpu_num, env, all_zeros, clip_zero, batch_size))
heur_proc.daemon = True
heur_proc.start()
heur_procs.append(heur_proc)
return heuristic_fn_input_queue, heuristic_fn_output_queues, heur_procs
def stop_heuristic_fn_runners(heur_procs, heuristic_fn_input_queue):
for _ in heur_procs:
heuristic_fn_input_queue.put((None, None))
for heur_proc in heur_procs:
heur_proc.join()
|
[
"numpy.random.choice",
"torch.nn.MSELoss",
"numpy.maximum",
"random.shuffle",
"torch.load",
"torch.multiprocessing.get_context",
"numpy.zeros",
"torch.nn.DataParallel",
"time.time",
"os.environ.get",
"torch.cuda.is_available",
"torch.device",
"collections.OrderedDict",
"torch.tensor",
"re.sub",
"numpy.concatenate"
] |
[((904, 963), 'numpy.random.choice', 'np.random.choice', (['num_examples', 'num_examples'], {'replace': '(False)'}), '(num_examples, num_examples, replace=False)\n', (920, 963), True, 'import numpy as np\n'), ((1661, 1673), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1671, 1673), False, 'from torch import nn\n'), ((1789, 1800), 'time.time', 'time.time', ([], {}), '()\n', (1798, 1800), False, 'import time\n'), ((3664, 3683), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3676, 3683), False, 'import torch\n'), ((4228, 4241), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4239, 4241), False, 'from collections import OrderedDict\n'), ((6109, 6147), 'os.environ.get', 'os.environ.get', (['"""CUDA_VISIBLE_DEVICES"""'], {}), "('CUDA_VISIBLE_DEVICES')\n", (6123, 6147), False, 'import os\n'), ((7510, 7541), 'numpy.concatenate', 'np.concatenate', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (7524, 7541), True, 'import numpy as np\n'), ((9061, 9081), 'torch.multiprocessing.get_context', 'get_context', (['"""spawn"""'], {}), "('spawn')\n", (9072, 9081), False, 'from torch.multiprocessing import Queue, get_context\n'), ((586, 624), 'torch.tensor', 'torch.tensor', (['tensor_np'], {'device': 'device'}), '(tensor_np, device=device)\n', (598, 624), False, 'import torch\n'), ((2561, 2606), 'torch.tensor', 'torch.tensor', (['targets_batch_np'], {'device': 'device'}), '(targets_batch_np, device=device)\n', (2573, 2606), False, 'import torch\n'), ((3778, 3803), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3801, 3803), False, 'import torch\n'), ((3822, 3849), 'torch.device', 'torch.device', (["('cuda:%i' % 0)"], {}), "('cuda:%i' % 0)\n", (3834, 3849), False, 'import torch\n'), ((4081, 4103), 'torch.load', 'torch.load', (['model_file'], {}), '(model_file)\n', (4091, 4103), False, 'import torch\n'), ((4135, 4178), 'torch.load', 'torch.load', (['model_file'], {'map_location': 'device'}), '(model_file, map_location=device)\n', (4145, 4178), False, 'import torch\n'), ((4290, 4317), 're.sub', 're.sub', (['"""^module\\\\."""', '""""""', 'k'], {}), "('^module\\\\.', '', k)\n", (4296, 4317), False, 'import re\n'), ((4747, 4758), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (4755, 4758), True, 'import numpy as np\n'), ((6693, 6714), 'torch.nn.DataParallel', 'nn.DataParallel', (['nnet'], {}), '(nnet)\n', (6708, 6714), False, 'from torch import nn\n'), ((3374, 3385), 'time.time', 'time.time', ([], {}), '()\n', (3383, 3385), False, 'import time\n'), ((3495, 3511), 'random.shuffle', 'shuffle', (['batches'], {}), '(batches)\n', (3502, 3511), False, 'from random import shuffle\n'), ((5756, 5816), 'numpy.concatenate', 'np.concatenate', (['(cost_to_go, cost_to_go_batch[:, 0])'], {'axis': '(0)'}), '((cost_to_go, cost_to_go_batch[:, 0]), axis=0)\n', (5770, 5816), True, 'import numpy as np\n'), ((5955, 5982), 'numpy.maximum', 'np.maximum', (['cost_to_go', '(0.0)'], {}), '(cost_to_go, 0.0)\n', (5965, 5982), True, 'import numpy as np\n'), ((8619, 8668), 'numpy.zeros', 'np.zeros', (['states_nnet[0].shape[0]'], {'dtype': 'np.float'}), '(states_nnet[0].shape[0], dtype=np.float)\n', (8627, 8668), True, 'import numpy as np\n'), ((3321, 3332), 'time.time', 'time.time', ([], {}), '()\n', (3330, 3332), False, 'import time\n')]
|
import cv2
import numpy as np
from imutils import perspective, rotate_bound
from pymatting import estimate_alpha_knn, estimate_foreground_ml, stack_images
from typing import Tuple
PAPER_SIZE = (1485, 1050)
def find_paper(image_bgr: np.ndarray) -> np.ndarray:
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
paper_mask = cv2.inRange(image_hsv, (0, 0, 90), (180, 60, 255))
contours, _ = cv2.findContours(paper_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
paper_contour = max(contours, key=cv2.contourArea)
eps = 1
while paper_contour.shape[0] > 4:
paper_contour = cv2.approxPolyDP(paper_contour, eps, True)
eps += 1
paper_contour = np.squeeze(paper_contour)
paper_image_bgr = perspective.four_point_transform(image_bgr, paper_contour)
return cv2.resize(paper_image_bgr, PAPER_SIZE if image_bgr.shape[1] > image_bgr.shape[0] else PAPER_SIZE[::-1])
def get_object_trimap(paper_image_bgr: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
paper_image_gray = cv2.cvtColor(paper_image_bgr, cv2.COLOR_BGR2GRAY)
# Reshaping the image into a 2D array of pixels and 3 color values (RGB)
pixel_vals = paper_image_gray.reshape((-1, 1))
# Convert to float type
pixel_vals = np.float32(pixel_vals)
k = 3
retval, labels, centers = cv2.kmeans(pixel_vals, k, None, None, None, cv2.KMEANS_PP_CENTERS)
# convert data into 8-bit values
centers = np.uint8(centers)
darkest_component_mask = np.uint8(np.ones(paper_image_gray.shape) * 255)
darkest_component_mask[labels.reshape(paper_image_gray.shape) == np.argmin(centers)] = 0
contours, _ = cv2.findContours(darkest_component_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours_new = []
border_size = 5
for contour in contours:
if np.min(contour[:, :, 0]) > border_size \
and np.min(contour[:, :, 1]) > border_size \
and np.max(contour[:, :, 0]) < darkest_component_mask.shape[1] - border_size \
and np.max(contour[:, :, 1]) < darkest_component_mask.shape[0] - border_size \
and cv2.contourArea(contour) > 150:
contours_new.append(contour)
convex_hulls = []
for contour_new in contours_new:
convex_hulls.append(cv2.convexHull(contour_new))
convex_hull = cv2.convexHull(np.concatenate(convex_hulls))
mask_by_countour = np.uint8(np.ones(paper_image_gray.shape) * 255)
cv2.drawContours(mask_by_countour, [convex_hull], -1, 0, -1)
eroded_mask_by_countour = cv2.erode(mask_by_countour, (30, 30), iterations=9)
trimap = 255 - eroded_mask_by_countour
trimap[trimap == 255] = 128
trimap[np.logical_and(trimap == 128, labels.reshape(paper_image_gray.shape) == np.argmin(centers))] = 255
return trimap, convex_hull
def find_object(image: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
image_bgr = image
image_bgr = cv2.resize(image_bgr, (1920, 1080) if image_bgr.shape[1] > image_bgr.shape[0] else (1080, 1920))
paper_image_bgr = find_paper(image_bgr)
trimap, convex_hull = get_object_trimap(paper_image_bgr)
paper_image_bgr_scaled = cv2.cvtColor(paper_image_bgr, cv2.COLOR_BGR2RGB) / 255.0
trimap_scaled = trimap / 255.0
# alpha = estimate_alpha_knn(paper_image_bgr_scaled, trimap_scaled)
alpha = np.zeros_like(trimap_scaled)
alpha[trimap_scaled > 0] = 1
return paper_image_bgr, np.squeeze(convex_hull, 1), np.uint8(alpha * 255)
|
[
"cv2.approxPolyDP",
"numpy.ones",
"numpy.argmin",
"cv2.erode",
"cv2.inRange",
"cv2.contourArea",
"numpy.zeros_like",
"cv2.cvtColor",
"numpy.max",
"imutils.perspective.four_point_transform",
"cv2.drawContours",
"cv2.resize",
"numpy.uint8",
"numpy.min",
"cv2.convexHull",
"numpy.squeeze",
"numpy.concatenate",
"numpy.float32",
"cv2.kmeans",
"cv2.findContours"
] |
[((280, 322), 'cv2.cvtColor', 'cv2.cvtColor', (['image_bgr', 'cv2.COLOR_BGR2HSV'], {}), '(image_bgr, cv2.COLOR_BGR2HSV)\n', (292, 322), False, 'import cv2\n'), ((340, 390), 'cv2.inRange', 'cv2.inRange', (['image_hsv', '(0, 0, 90)', '(180, 60, 255)'], {}), '(image_hsv, (0, 0, 90), (180, 60, 255))\n', (351, 390), False, 'import cv2\n'), ((409, 481), 'cv2.findContours', 'cv2.findContours', (['paper_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(paper_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (425, 481), False, 'import cv2\n'), ((691, 716), 'numpy.squeeze', 'np.squeeze', (['paper_contour'], {}), '(paper_contour)\n', (701, 716), True, 'import numpy as np\n'), ((739, 797), 'imutils.perspective.four_point_transform', 'perspective.four_point_transform', (['image_bgr', 'paper_contour'], {}), '(image_bgr, paper_contour)\n', (771, 797), False, 'from imutils import perspective, rotate_bound\n'), ((809, 918), 'cv2.resize', 'cv2.resize', (['paper_image_bgr', '(PAPER_SIZE if image_bgr.shape[1] > image_bgr.shape[0] else PAPER_SIZE[::-1])'], {}), '(paper_image_bgr, PAPER_SIZE if image_bgr.shape[1] > image_bgr.\n shape[0] else PAPER_SIZE[::-1])\n', (819, 918), False, 'import cv2\n'), ((1024, 1073), 'cv2.cvtColor', 'cv2.cvtColor', (['paper_image_bgr', 'cv2.COLOR_BGR2GRAY'], {}), '(paper_image_bgr, cv2.COLOR_BGR2GRAY)\n', (1036, 1073), False, 'import cv2\n'), ((1247, 1269), 'numpy.float32', 'np.float32', (['pixel_vals'], {}), '(pixel_vals)\n', (1257, 1269), True, 'import numpy as np\n'), ((1310, 1376), 'cv2.kmeans', 'cv2.kmeans', (['pixel_vals', 'k', 'None', 'None', 'None', 'cv2.KMEANS_PP_CENTERS'], {}), '(pixel_vals, k, None, None, None, cv2.KMEANS_PP_CENTERS)\n', (1320, 1376), False, 'import cv2\n'), ((1428, 1445), 'numpy.uint8', 'np.uint8', (['centers'], {}), '(centers)\n', (1436, 1445), True, 'import numpy as np\n'), ((1636, 1721), 'cv2.findContours', 'cv2.findContours', (['darkest_component_mask', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(darkest_component_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE\n )\n', (1652, 1721), False, 'import cv2\n'), ((2440, 2500), 'cv2.drawContours', 'cv2.drawContours', (['mask_by_countour', '[convex_hull]', '(-1)', '(0)', '(-1)'], {}), '(mask_by_countour, [convex_hull], -1, 0, -1)\n', (2456, 2500), False, 'import cv2\n'), ((2531, 2582), 'cv2.erode', 'cv2.erode', (['mask_by_countour', '(30, 30)'], {'iterations': '(9)'}), '(mask_by_countour, (30, 30), iterations=9)\n', (2540, 2582), False, 'import cv2\n'), ((2920, 3021), 'cv2.resize', 'cv2.resize', (['image_bgr', '((1920, 1080) if image_bgr.shape[1] > image_bgr.shape[0] else (1080, 1920))'], {}), '(image_bgr, (1920, 1080) if image_bgr.shape[1] > image_bgr.shape[\n 0] else (1080, 1920))\n', (2930, 3021), False, 'import cv2\n'), ((3330, 3358), 'numpy.zeros_like', 'np.zeros_like', (['trimap_scaled'], {}), '(trimap_scaled)\n', (3343, 3358), True, 'import numpy as np\n'), ((611, 653), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['paper_contour', 'eps', '(True)'], {}), '(paper_contour, eps, True)\n', (627, 653), False, 'import cv2\n'), ((2334, 2362), 'numpy.concatenate', 'np.concatenate', (['convex_hulls'], {}), '(convex_hulls)\n', (2348, 2362), True, 'import numpy as np\n'), ((3153, 3201), 'cv2.cvtColor', 'cv2.cvtColor', (['paper_image_bgr', 'cv2.COLOR_BGR2RGB'], {}), '(paper_image_bgr, cv2.COLOR_BGR2RGB)\n', (3165, 3201), False, 'import cv2\n'), ((3421, 3447), 'numpy.squeeze', 'np.squeeze', (['convex_hull', '(1)'], {}), '(convex_hull, 1)\n', (3431, 3447), True, 'import numpy as np\n'), ((3449, 3470), 'numpy.uint8', 'np.uint8', (['(alpha * 255)'], {}), '(alpha * 255)\n', (3457, 3470), True, 'import numpy as np\n'), ((1485, 1516), 'numpy.ones', 'np.ones', (['paper_image_gray.shape'], {}), '(paper_image_gray.shape)\n', (1492, 1516), True, 'import numpy as np\n'), ((1593, 1611), 'numpy.argmin', 'np.argmin', (['centers'], {}), '(centers)\n', (1602, 1611), True, 'import numpy as np\n'), ((2272, 2299), 'cv2.convexHull', 'cv2.convexHull', (['contour_new'], {}), '(contour_new)\n', (2286, 2299), False, 'import cv2\n'), ((2397, 2428), 'numpy.ones', 'np.ones', (['paper_image_gray.shape'], {}), '(paper_image_gray.shape)\n', (2404, 2428), True, 'import numpy as np\n'), ((1799, 1823), 'numpy.min', 'np.min', (['contour[:, :, 0]'], {}), '(contour[:, :, 0])\n', (1805, 1823), True, 'import numpy as np\n'), ((1860, 1884), 'numpy.min', 'np.min', (['contour[:, :, 1]'], {}), '(contour[:, :, 1])\n', (1866, 1884), True, 'import numpy as np\n'), ((1921, 1945), 'numpy.max', 'np.max', (['contour[:, :, 0]'], {}), '(contour[:, :, 0])\n', (1927, 1945), True, 'import numpy as np\n'), ((2016, 2040), 'numpy.max', 'np.max', (['contour[:, :, 1]'], {}), '(contour[:, :, 1])\n', (2022, 2040), True, 'import numpy as np\n'), ((2111, 2135), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (2126, 2135), False, 'import cv2\n'), ((2741, 2759), 'numpy.argmin', 'np.argmin', (['centers'], {}), '(centers)\n', (2750, 2759), True, 'import numpy as np\n')]
|
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk ,Image
import numpy as np
from keras.models import load_model
#load the model
model = load_model('traffic_classifier.h5')
#defince the class labels in the dictionary
classes = { 1:'Speed limit (20km/h)',
2:'Speed limit (30km/h)',
3:'Speed limit (50km/h)',
4:'Speed limit (60km/h)',
5:'Speed limit (70km/h)',
6:'Speed limit (80km/h)',
7:'End of speed limit (80km/h)',
8:'Speed limit (100km/h)',
9:'Speed limit (120km/h)',
10:'No passing',
11:'No passing veh over 3.5 tons',
12:'Right-of-way at intersection',
13:'Priority road',
14:'Yield',
15:'Stop',
16:'No vehicles',
17:'Veh > 3.5 tons prohibited',
18:'No entry',
19:'General caution',
20:'Dangerous curve left',
21:'Dangerous curve right',
22:'Double curve',
23:'Bumpy road',
24:'Slippery road',
25:'Road narrows on the right',
26:'Road work',
27:'Traffic signals',
28:'Pedestrians',
29:'Children crossing',
30:'Bicycles crossing',
31:'Beware of ice/snow',
32:'Wild animals crossing',
33:'End speed + passing limits',
34:'Turn right ahead',
35:'Turn left ahead',
36:'Ahead only',
37:'Go straight or right',
38:'Go straight or left',
39:'Keep right',
40:'Keep left',
41:'Roundabout mandatory',
42:'End of no passing',
43:'End no passing veh > 3.5 tons' }
#initialize GUI
top = tk.Tk()
top.geometry('800x600')
top.title('Traffic Sign Classification')
top.configure(background = "#CDCDCD")
label = Label(top , background = "#CDCDCD" , font = ('arial' , 15, 'bold'))
sign_image = Label(top)
def classify(file_path):
global label_packed
image = Image.open(file_path)
image = image.resize((30 , 30) , Image.NEAREST)
image = np.expand_dims( image , axis = 0)
image = np.array(image)
pred = model.predict_classes([image])[0]
sign = classes[pred+1]
print(sign)
label.configure( foreground = "#011638" , text = sign)
def show_classify_button(file_path):
classify_b = Button( top , text = "Classsify Image" , command = lambda: classify(file_path), padx=10 , pady=5)
classify_b.configure(background= '#364156' , foreground = 'white', font= ('arial',10,'bold'))
classify_b.place(relx = 0.79 , rely = 0.46)
def upload_image():
try:
file_path = filedialog.askopenfilename()
uploaded = Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()) , (top.winfo_height())))
im = ImageTk.PhotoImage(uploaded)
sign_image.configure(image = im)
sign_image.image = im
label.configure(text = '')
show_classify_button(file_path)
except:
pass
upload = Button(top , text = "Upload an Image" , command = upload_image , padx = 10 , pady = 5)
upload.configure( background = "#364156" , foreground = 'white' , font = ('arial' , 10, 'bold'))
upload.pack(side = BOTTOM , pady = 50)
sign_image.pack(side = BOTTOM , expand =True)
label.pack( side =BOTTOM ,expand =True)
heading = Label(top , text = 'Know Your Traffic Sign' , pady = 20 , font = ('arial', 20 ,'bold'))
heading.configure(background = "#CDCDCD", foreground = "#364156")
heading.pack()
top.mainloop()
|
[
"keras.models.load_model",
"PIL.ImageTk.PhotoImage",
"numpy.expand_dims",
"tkinter.filedialog.askopenfilename",
"PIL.Image.open",
"numpy.array",
"tkinter.Tk"
] |
[((185, 220), 'keras.models.load_model', 'load_model', (['"""traffic_classifier.h5"""'], {}), "('traffic_classifier.h5')\n", (195, 220), False, 'from keras.models import load_model\n'), ((1873, 1880), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1878, 1880), True, 'import tkinter as tk\n'), ((2147, 2168), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (2157, 2168), False, 'from PIL import ImageTk, Image\n'), ((2233, 2262), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2247, 2262), True, 'import numpy as np\n'), ((2279, 2294), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2287, 2294), True, 'import numpy as np\n'), ((2791, 2819), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (2817, 2819), False, 'from tkinter import filedialog\n'), ((2839, 2860), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (2849, 2860), False, 'from PIL import ImageTk, Image\n'), ((2948, 2976), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['uploaded'], {}), '(uploaded)\n', (2966, 2976), False, 'from PIL import ImageTk, Image\n')]
|
import matplotlib.pyplot as plt
import numpy as np
from typing import Callable
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets, layers, models
import skimage
from skimage.metrics import structural_similarity as ssim
from sklearn.model_selection import train_test_split
from deep_raman import utils
from deep_raman import metrics
import streamlit as st
def main(num_epochs: int, loss_function: Callable):
x = np.linspace(-200, 200, 1024)
X, y = utils.generate_training_set(x, num_base_examples=64)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
x_train = np.array(X_train).reshape(-1, 1024, 1)
x_test = np.array(X_test).reshape(-1, 1024, 1)
y_train = np.array(y_train).reshape(-1, 1024, 1)
y_test = np.array(y_test).reshape(-1, 1024, 1)
inputs = keras.Input(shape=(32 * 32, 1))
x = layers.BatchNormalization(axis=-1)(inputs)
x = layers.Conv1D(16, 16, input_shape=(32 * 32, 1))(inputs)
x = layers.MaxPooling1D(2)(x)
x = layers.Conv1D(16, 16, 16)(x)
x = layers.MaxPooling1D(3)(x)
x = layers.Conv1D(64, 10)(x)
outputs = layers.Conv1DTranspose(1, 1024)(x)
model = models.Model(inputs=inputs, outputs=outputs, name="cnn_model")
model.compile(
loss=loss_function,
optimizer=keras.optimizers.Nadam(learning_rate=3e-3),
metrics=["mae", "mape"],
)
history = model.fit(
x_train,
y_train,
batch_size=64,
epochs=num_epochs,
validation_split=0.2,
)
test_scores = model.evaluate(x_test, y_test, verbose=2)
st.write("Test loss:", test_scores[0])
st.write("Test accuracy:", test_scores[1])
sample_input, sample_prediction_, sample_target_ = (
x_train[0:1],
model.predict(x_train[0:1]),
y_train[0:1],
)
return sample_input, sample_prediction_, sample_target_
if __name__ == "__main__":
loss_options = {
"peak signal to noise ratio": metrics.psnr_loss,
"mean absolute error": keras.losses.mean_absolute_error,
"mean squared error": keras.losses.mean_squared_error,
}
NUM_EPOCHS = st.selectbox("Number of epochs", [10**i for i in range(0, 3)])
loss_choice = st.selectbox("Loss function", loss_options.keys())
LOSS_FUNCTION = loss_options[loss_choice]
sample_input, sample_prediction_, sample_target_ = main(NUM_EPOCHS, LOSS_FUNCTION)
fig = plt.figure(figsize=(12, 8))
plt.subplot(311)
plt.title("Sample Input")
plt.plot(sample_input.ravel())
plt.subplot(312)
plt.title("Sample Prediction")
plt.plot(sample_prediction_.ravel())
plt.subplot(313)
plt.title("Sample Target")
plt.plot(sample_target_.ravel())
fig.tight_layout()
fig # We call the fig so it will get picked up by streamlit magic.
# TODO: Visualize difference between train loss and test loss - something like tensorboard?
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"tensorflow.keras.layers.BatchNormalization",
"deep_raman.utils.generate_training_set",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.MaxPooling1D",
"tensorflow.keras.layers.Conv1DTranspose",
"streamlit.write",
"tensorflow.keras.optimizers.Nadam",
"tensorflow.keras.models.Model",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace"
] |
[((461, 489), 'numpy.linspace', 'np.linspace', (['(-200)', '(200)', '(1024)'], {}), '(-200, 200, 1024)\n', (472, 489), True, 'import numpy as np\n'), ((502, 554), 'deep_raman.utils.generate_training_set', 'utils.generate_training_set', (['x'], {'num_base_examples': '(64)'}), '(x, num_base_examples=64)\n', (529, 554), False, 'from deep_raman import utils\n'), ((595, 650), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(X, y, test_size=0.33, random_state=42)\n', (611, 650), False, 'from sklearn.model_selection import train_test_split\n'), ((889, 920), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32 * 32, 1)'}), '(shape=(32 * 32, 1))\n', (900, 920), False, 'from tensorflow import keras\n'), ((1238, 1300), 'tensorflow.keras.models.Model', 'models.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""cnn_model"""'}), "(inputs=inputs, outputs=outputs, name='cnn_model')\n", (1250, 1300), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1661, 1699), 'streamlit.write', 'st.write', (['"""Test loss:"""', 'test_scores[0]'], {}), "('Test loss:', test_scores[0])\n", (1669, 1699), True, 'import streamlit as st\n'), ((1704, 1746), 'streamlit.write', 'st.write', (['"""Test accuracy:"""', 'test_scores[1]'], {}), "('Test accuracy:', test_scores[1])\n", (1712, 1746), True, 'import streamlit as st\n'), ((2491, 2518), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (2501, 2518), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2540), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (2535, 2540), True, 'import matplotlib.pyplot as plt\n'), ((2545, 2570), 'matplotlib.pyplot.title', 'plt.title', (['"""Sample Input"""'], {}), "('Sample Input')\n", (2554, 2570), True, 'import matplotlib.pyplot as plt\n'), ((2611, 2627), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (2622, 2627), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2662), 'matplotlib.pyplot.title', 'plt.title', (['"""Sample Prediction"""'], {}), "('Sample Prediction')\n", (2641, 2662), True, 'import matplotlib.pyplot as plt\n'), ((2709, 2725), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (2720, 2725), True, 'import matplotlib.pyplot as plt\n'), ((2730, 2756), 'matplotlib.pyplot.title', 'plt.title', (['"""Sample Target"""'], {}), "('Sample Target')\n", (2739, 2756), True, 'import matplotlib.pyplot as plt\n'), ((929, 963), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (954, 963), False, 'from tensorflow.keras import datasets, layers, models\n'), ((981, 1028), 'tensorflow.keras.layers.Conv1D', 'layers.Conv1D', (['(16)', '(16)'], {'input_shape': '(32 * 32, 1)'}), '(16, 16, input_shape=(32 * 32, 1))\n', (994, 1028), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1045, 1067), 'tensorflow.keras.layers.MaxPooling1D', 'layers.MaxPooling1D', (['(2)'], {}), '(2)\n', (1064, 1067), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1079, 1104), 'tensorflow.keras.layers.Conv1D', 'layers.Conv1D', (['(16)', '(16)', '(16)'], {}), '(16, 16, 16)\n', (1092, 1104), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1116, 1138), 'tensorflow.keras.layers.MaxPooling1D', 'layers.MaxPooling1D', (['(3)'], {}), '(3)\n', (1135, 1138), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1151, 1172), 'tensorflow.keras.layers.Conv1D', 'layers.Conv1D', (['(64)', '(10)'], {}), '(64, 10)\n', (1164, 1172), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1190, 1221), 'tensorflow.keras.layers.Conv1DTranspose', 'layers.Conv1DTranspose', (['(1)', '(1024)'], {}), '(1, 1024)\n', (1212, 1221), False, 'from tensorflow.keras import datasets, layers, models\n'), ((680, 697), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (688, 697), True, 'import numpy as np\n'), ((732, 748), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (740, 748), True, 'import numpy as np\n'), ((785, 802), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (793, 802), True, 'import numpy as np\n'), ((837, 853), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (845, 853), True, 'import numpy as np\n'), ((1367, 1410), 'tensorflow.keras.optimizers.Nadam', 'keras.optimizers.Nadam', ([], {'learning_rate': '(0.003)'}), '(learning_rate=0.003)\n', (1389, 1410), False, 'from tensorflow import keras\n')]
|
from typing import Dict, Tuple, TYPE_CHECKING
import numpy as np
from ..continuous_sensor import ContinuousSensor
if TYPE_CHECKING:
from task import StackingTask
# TODO: This should be a DiscreteSensor
class CurrentPartReleasedSensor(ContinuousSensor["StackingEnv"]):
def __init__(self, part_release_distance: float = 0.05, **kwargs):
super().__init__(normalize=False, clip=False, **kwargs)
self.__part_release_distance = part_release_distance
self.__observation_name = "current_part_released"
def _get_limits(self) -> Dict[str, Tuple[np.ndarray, np.ndarray]]:
return {self.__observation_name: (np.zeros(1), np.ones(1))}
def _reset_unnormalized(self) -> Dict[str, np.ndarray]:
return self._observe_unnormalized()
def _observe_unnormalized(self) -> Dict[str, np.ndarray]:
part = self.task.current_part
robot = self.task.robot
# TODO: Fix typing
part_released = max(robot.finger_distances_to_object(part.scene_object)) \
> self.__part_release_distance
return {self.__observation_name: np.array([float(part_released)])}
|
[
"numpy.zeros",
"numpy.ones"
] |
[((645, 656), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (653, 656), True, 'import numpy as np\n'), ((658, 668), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (665, 668), True, 'import numpy as np\n')]
|
import numpy as np
import tensorflow as tf
from collections import Counter
from utils.process_utils import calculate_iou, non_maximum_suppression
def evaluate(y_pred, y_true, num_classes, score_thresh=0.5, iou_thresh=0.5):
num_images = y_true[0].shape[0]
true_labels_dict = {i:0 for i in range(num_classes)} # {class: count}
pred_labels_dict = {i:0 for i in range(num_classes)}
true_positive_dict = {i:0 for i in range(num_classes)}
for i in range(num_images):
true_labels_list, true_boxes_list = [], []
for j in range(3): # three feature maps
true_probs_temp = y_true[j][i][...,5: ]
true_boxes_temp = y_true[j][i][...,0:4]
object_mask = true_probs_temp.sum(axis=-1) > 0
true_probs_temp = true_probs_temp[object_mask]
true_boxes_temp = true_boxes_temp[object_mask]
true_labels_list += np.argmax(true_probs_temp, axis=-1).tolist()
true_boxes_list += true_boxes_temp.tolist()
if len(true_labels_list) != 0:
for cls, count in Counter(true_labels_list).items(): true_labels_dict[cls] += count
pred_boxes = y_pred[0][i:i+1]
pred_confs = y_pred[1][i:i+1]
pred_probs = y_pred[2][i:i+1]
pred_boxes, pred_confs, pred_labels = non_maximum_suppression(pred_boxes, pred_confs, pred_probs)
true_boxes = np.array(true_boxes_list)
box_centers, box_sizes = true_boxes[:,0:2], true_boxes[:,2:4]
true_boxes[:,0:2] = box_centers - box_sizes / 2.
true_boxes[:,2:4] = true_boxes[:,0:2] + box_sizes
pred_labels_list = [] if pred_labels is None else pred_labels.tolist()
if pred_labels_list == []: continue
detected = []
for k in range(len(true_labels_list)):
# compute iou between predicted box and ground_truth boxes
iou = calculate_iou(true_boxes[k:k+1], pred_boxes)
m = np.argmax(iou) # Extract index of largest overlap
if iou[m] >= iou_thresh and true_labels_list[k] == pred_labels_list[m] and m not in detected:
pred_labels_dict[true_labels_list[k]] += 1
detected.append(m)
pred_labels_list = [pred_labels_list[m] for m in detected]
for c in range(num_classes):
t = true_labels_list.count(c)
p = pred_labels_list.count(c)
true_positive_dict[c] += p if t >= p else t
recall = sum(true_positive_dict.values()) / (sum(true_labels_dict.values()) + 1e-6)
precision = sum(true_positive_dict.values()) / (sum(pred_labels_dict.values()) + 1e-6)
avg_prec = [true_positive_dict[i] / (true_labels_dict[i] + 1e-6) for i in range(num_classes)]
mAP = sum(avg_prec) / (sum([avg_prec[i] != 0 for i in range(num_classes)]) + 1e-6)
return recall, precision, mAP
|
[
"utils.process_utils.non_maximum_suppression",
"numpy.argmax",
"numpy.array",
"utils.process_utils.calculate_iou",
"collections.Counter"
] |
[((1303, 1362), 'utils.process_utils.non_maximum_suppression', 'non_maximum_suppression', (['pred_boxes', 'pred_confs', 'pred_probs'], {}), '(pred_boxes, pred_confs, pred_probs)\n', (1326, 1362), False, 'from utils.process_utils import calculate_iou, non_maximum_suppression\n'), ((1385, 1410), 'numpy.array', 'np.array', (['true_boxes_list'], {}), '(true_boxes_list)\n', (1393, 1410), True, 'import numpy as np\n'), ((1880, 1926), 'utils.process_utils.calculate_iou', 'calculate_iou', (['true_boxes[k:k + 1]', 'pred_boxes'], {}), '(true_boxes[k:k + 1], pred_boxes)\n', (1893, 1926), False, 'from utils.process_utils import calculate_iou, non_maximum_suppression\n'), ((1941, 1955), 'numpy.argmax', 'np.argmax', (['iou'], {}), '(iou)\n', (1950, 1955), True, 'import numpy as np\n'), ((903, 938), 'numpy.argmax', 'np.argmax', (['true_probs_temp'], {'axis': '(-1)'}), '(true_probs_temp, axis=-1)\n', (912, 938), True, 'import numpy as np\n'), ((1075, 1100), 'collections.Counter', 'Counter', (['true_labels_list'], {}), '(true_labels_list)\n', (1082, 1100), False, 'from collections import Counter\n')]
|
import numpy as np
from mltk.core.preprocess.audio.audio_feature_generator import AudioFeatureGenerator
from mltk.core.preprocess.audio.audio_feature_generator.tests.data import (
DEFAULT_SETTINGS,
YES_INPUT_AUDIO,
YES_OUTPUT_FEATURES_INT8,
NO_INPUT_AUDIO,
NO_OUTPUT_FEATURES_INT8
)
def test_yes_samples():
settings = DEFAULT_SETTINGS
mfe = AudioFeatureGenerator(settings)
sample = np.asarray(YES_INPUT_AUDIO, dtype=np.int16)
calculated = mfe.process_sample(sample, dtype=np.int8)
expected = np.reshape(np.array(YES_OUTPUT_FEATURES_INT8, dtype=np.int8), settings.spectrogram_shape)
assert np.allclose(calculated, expected)
def test_no_samples():
settings = DEFAULT_SETTINGS
mfe = AudioFeatureGenerator(settings)
sample = np.asarray(NO_INPUT_AUDIO, dtype=np.int16)
calculated = mfe.process_sample(sample, dtype=np.int8)
expected = np.reshape(np.array(NO_OUTPUT_FEATURES_INT8, dtype=np.int8), settings.spectrogram_shape)
assert np.allclose(calculated, expected)
|
[
"numpy.asarray",
"mltk.core.preprocess.audio.audio_feature_generator.AudioFeatureGenerator",
"numpy.array",
"numpy.allclose"
] |
[((374, 405), 'mltk.core.preprocess.audio.audio_feature_generator.AudioFeatureGenerator', 'AudioFeatureGenerator', (['settings'], {}), '(settings)\n', (395, 405), False, 'from mltk.core.preprocess.audio.audio_feature_generator import AudioFeatureGenerator\n'), ((419, 462), 'numpy.asarray', 'np.asarray', (['YES_INPUT_AUDIO'], {'dtype': 'np.int16'}), '(YES_INPUT_AUDIO, dtype=np.int16)\n', (429, 462), True, 'import numpy as np\n'), ((644, 677), 'numpy.allclose', 'np.allclose', (['calculated', 'expected'], {}), '(calculated, expected)\n', (655, 677), True, 'import numpy as np\n'), ((745, 776), 'mltk.core.preprocess.audio.audio_feature_generator.AudioFeatureGenerator', 'AudioFeatureGenerator', (['settings'], {}), '(settings)\n', (766, 776), False, 'from mltk.core.preprocess.audio.audio_feature_generator import AudioFeatureGenerator\n'), ((790, 832), 'numpy.asarray', 'np.asarray', (['NO_INPUT_AUDIO'], {'dtype': 'np.int16'}), '(NO_INPUT_AUDIO, dtype=np.int16)\n', (800, 832), True, 'import numpy as np\n'), ((1013, 1046), 'numpy.allclose', 'np.allclose', (['calculated', 'expected'], {}), '(calculated, expected)\n', (1024, 1046), True, 'import numpy as np\n'), ((553, 602), 'numpy.array', 'np.array', (['YES_OUTPUT_FEATURES_INT8'], {'dtype': 'np.int8'}), '(YES_OUTPUT_FEATURES_INT8, dtype=np.int8)\n', (561, 602), True, 'import numpy as np\n'), ((923, 971), 'numpy.array', 'np.array', (['NO_OUTPUT_FEATURES_INT8'], {'dtype': 'np.int8'}), '(NO_OUTPUT_FEATURES_INT8, dtype=np.int8)\n', (931, 971), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
"""
Implementation of building LSTM model
"""
# 3rd party imports
import pandas as pd
import numpy as np
import random as rn
import datetime as datetime
# model
import tensorflow as tf
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense
from keras.losses import MeanSquaredLogarithmicError
from sklearn.model_selection import train_test_split
# local imports
from db.model import db, Stat
class Lstm:
def extract(self) -> pd.DataFrame:
"""
extract data from database and output into dataframe
"""
# grab all record from stat table
df = pd.read_sql_table("stat", "sqlite:///db/site.db")
# return
return df
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
transforms done to dataframe:
- calculate the difference of each metric
- onehotencode countries
"""
# get diff
df["confirmed_diff"] = np.where(
df.country == df.country.shift(), df.confirmed - df.confirmed.shift(), 0
)
df["recovered_diff"] = np.where(
df.country == df.country.shift(), df.recovered - df.recovered.shift(), 0
)
df["deaths_diff"] = np.where(
df.country == df.country.shift(), df.deaths - df.deaths.shift(), 0
)
# encode country with pd.dummies
dummies = pd.get_dummies(df.country)
dummies["id"] = df.id
df = pd.merge(df, dummies, on=["id"])
# return
return df
def load(
self,
df: pd.DataFrame,
metric="confirmed",
win_size=7,
epochs=5,
batch_size=32,
save=False,
) -> Sequential:
"""
load dataframe into sequential
"""
# variables
x, y = [], []
countries = db.session.query(Stat.country).distinct().all()
# countries come in the form of [('Afghanistan',), ('Albania',), ... ]
for (country,) in countries:
country_df = df[df.country == country]
series = list(country_df[metric])
for i in range(0, len(series) - win_size):
end = i + win_size
series_x, series_y = series[i:end], series[end]
if series_y:
x.append(series_x)
y.append(series_y)
X, y = np.array(x), np.array(y)
# TTS
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=42
)
# preprocess
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
X_val = X_val.reshape(X_val.shape[0], 1, X_val.shape[1])
# build model
model = Sequential()
model.add(
LSTM(
100,
activation="relu",
input_shape=(1, win_size),
return_sequences=True,
)
)
model.add(LSTM(150, activation="relu"))
model.add(Dense(1, activation="relu"))
# Compile Model
model.compile(optimizer="adam", loss=MeanSquaredLogarithmicError())
# Fit Model
model.fit(
X_train,
y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_val, y_val),
verbose=2,
shuffle=True,
)
# Export Model
if save:
model.save("lstm_model.h5")
def main():
"""
run code
"""
# Set random state for Keras
np.random.seed(42)
rn.seed(12345)
# build model and save it
model = Lstm()
df = model.extract()
df = model.transform(df)
lstm = model.load(df, save=True)
if __name__ == "__main__":
main()
|
[
"numpy.random.seed",
"pandas.get_dummies",
"pandas.merge",
"sklearn.model_selection.train_test_split",
"keras.layers.LSTM",
"keras.layers.Dense",
"random.seed",
"numpy.array",
"keras.losses.MeanSquaredLogarithmicError",
"db.model.db.session.query",
"pandas.read_sql_table",
"keras.models.Sequential"
] |
[((3581, 3599), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3595, 3599), True, 'import numpy as np\n'), ((3604, 3618), 'random.seed', 'rn.seed', (['(12345)'], {}), '(12345)\n', (3611, 3618), True, 'import random as rn\n'), ((652, 701), 'pandas.read_sql_table', 'pd.read_sql_table', (['"""stat"""', '"""sqlite:///db/site.db"""'], {}), "('stat', 'sqlite:///db/site.db')\n", (669, 701), True, 'import pandas as pd\n'), ((1421, 1447), 'pandas.get_dummies', 'pd.get_dummies', (['df.country'], {}), '(df.country)\n', (1435, 1447), True, 'import pandas as pd\n'), ((1491, 1523), 'pandas.merge', 'pd.merge', (['df', 'dummies'], {'on': "['id']"}), "(df, dummies, on=['id'])\n", (1499, 1523), True, 'import pandas as pd\n'), ((2489, 2543), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (2505, 2543), False, 'from sklearn.model_selection import train_test_split\n'), ((2765, 2777), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2775, 2777), False, 'from keras.models import Sequential\n'), ((2408, 2419), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2416, 2419), True, 'import numpy as np\n'), ((2421, 2432), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2429, 2432), True, 'import numpy as np\n'), ((2809, 2887), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'activation': '"""relu"""', 'input_shape': '(1, win_size)', 'return_sequences': '(True)'}), "(100, activation='relu', input_shape=(1, win_size), return_sequences=True)\n", (2813, 2887), False, 'from keras.layers import LSTM\n'), ((2995, 3023), 'keras.layers.LSTM', 'LSTM', (['(150)'], {'activation': '"""relu"""'}), "(150, activation='relu')\n", (2999, 3023), False, 'from keras.layers import LSTM\n'), ((3043, 3070), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""relu"""'}), "(1, activation='relu')\n", (3048, 3070), False, 'from keras.layers import Dense\n'), ((3142, 3171), 'keras.losses.MeanSquaredLogarithmicError', 'MeanSquaredLogarithmicError', ([], {}), '()\n', (3169, 3171), False, 'from keras.losses import MeanSquaredLogarithmicError\n'), ((1870, 1900), 'db.model.db.session.query', 'db.session.query', (['Stat.country'], {}), '(Stat.country)\n', (1886, 1900), False, 'from db.model import db, Stat\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 13:46:08 2019
@author: <NAME>
"""
from binomialTreePricer import asianOptionBinomialTree
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
uly_names = ['Crude Oil WTI', 'Ethanol', 'Gold', 'Silver', 'Natural Gas']
uly_init = df_uly[uly_names].tail(1)
df_opt['bdays'] = 1 + np.busday_count(df_opt['Start Date'].values.astype('datetime64[D]'), df_opt['Maturity Date'].values.astype('datetime64[D]'))
df_uly_vol = df_uly[uly_names].std(skipna=True)
oneOverRho = 3
df_vols = pd.DataFrame([[0.3, 0.01, 0.4, 0.1, 0.001]], columns = uly_names)
df_units = pd.DataFrame([[0.01, 0.0001, 1, 0.001, 0.01]], columns = uly_names)
bdays_year = 252
# =============================================================================
# Define risk free rate, reference to US treasury yield curve as of 20190322
# https://www.treasury.gov/resource-center/data-chart-center/interest-rates/pages/TextView.aspx?data=yieldYear&year=2019
# 1m, 2m, 3m, 6m, 1y, 2y, 3y, 5y, 7y, 10y, 20y, 30y
# =============================================================================
# Define risk free rate according to US
yieldCurveDict = {
'2019-04-22': 2.49,
'2019-05-22': 2.48,
'2019-06-22': 2.46,
'2019-09-22': 2.48,
'2020-03-22': 2.45,
'2021-03-22': 2.31,
'2022-03-22': 2.24,
'2024-03-22': 2.24,
'2026-03-22': 2.34,
'2029-03-22': 2.44,
'2039-03-22': 2.69,
'2049-03-22': 2.88
}
# Derive forward rates from US treasury yield curve
curvePoints = ['2019-03-22'] + list(yieldCurveDict.keys())
forwardCurveDict = {}
for i in range(len(yieldCurveDict)):
datePoint1 = curvePoints[i]
datePoint2 = curvePoints[i + 1]
if (datePoint1 == curvePoints[0]):
forwardCurveDict[datePoint2] = yieldCurveDict[datePoint2]
else:
yieldAtDate1 = yieldCurveDict[datePoint1]
yieldAtDate2 = yieldCurveDict[datePoint2]
busDateDiff1 = np.busday_count(curvePoints[0], datePoint1)
busDateDiff2 = np.busday_count(curvePoints[0], datePoint2)
forwardCurveDict[datePoint2] = float((yieldAtDate2 * busDateDiff2 - yieldAtDate1 * busDateDiff1) / (busDateDiff2 - busDateDiff1))
# Function to get risk free rate given a date (datetime.date object)
def getRiskFreeRate(inputDate):
input_date = inputDate.date()
for i in range(len(forwardCurveDict)):
datePoint1 = datetime.strptime(curvePoints[i],'%Y-%m-%d').date()
datePoint2 = datetime.strptime(curvePoints[i + 1],'%Y-%m-%d').date()
if (input_date >= datePoint1 and input_date < datePoint2):
return forwardCurveDict[curvePoints[i + 1]]
return 0
for row in df_opt.index:
# Retrieve the name of the underlying
tmp_uly = df_opt['Underlying'][row][:-8]
tmp_strike = df_opt['Strike'][row]
tmp_maturity = df_opt['Maturity Date'][row]
tmp_steps = df_opt['bdays'][row]
if tmp_steps > bdays_year:
tmp_steps = bdays_year
tmp_init = uly_init[tmp_uly][0]
tmp_time_period = 1 / bdays_year
tmp_vol = df_uly_vol[tmp_uly]
tmp_ir = get_interest_rate(tmp_steps)
tmp_rates = [getRiskFreeRate(tmp_maturity - timedelta(d)) for d in range(tmp_steps)]
tmp_call = df_opt['Call'][row]
tmp_unit = df_units[tmp_uly][0]
pricer = asianOptionBinomialTree(tmp_steps, tmp_vol, tmp_time_period, oneOverRho, tmp_rates)
sim = pricer.getOptionPrice(tmp_init, tmp_strike * tmp_unit)
print('undeylying: %s; bdays: %d, strile: %6.3f, init: %6.3f --> simulate: %6.3f; actual call: %6.3f' \
% (tmp_uly, tmp_steps, tmp_strike* tmp_unit, tmp_init, sim, tmp_call))
|
[
"pandas.DataFrame",
"binomialTreePricer.asianOptionBinomialTree",
"datetime.datetime.strptime",
"datetime.timedelta",
"numpy.busday_count"
] |
[((555, 618), 'pandas.DataFrame', 'pd.DataFrame', (['[[0.3, 0.01, 0.4, 0.1, 0.001]]'], {'columns': 'uly_names'}), '([[0.3, 0.01, 0.4, 0.1, 0.001]], columns=uly_names)\n', (567, 618), True, 'import pandas as pd\n'), ((632, 697), 'pandas.DataFrame', 'pd.DataFrame', (['[[0.01, 0.0001, 1, 0.001, 0.01]]'], {'columns': 'uly_names'}), '([[0.01, 0.0001, 1, 0.001, 0.01]], columns=uly_names)\n', (644, 697), True, 'import pandas as pd\n'), ((3360, 3447), 'binomialTreePricer.asianOptionBinomialTree', 'asianOptionBinomialTree', (['tmp_steps', 'tmp_vol', 'tmp_time_period', 'oneOverRho', 'tmp_rates'], {}), '(tmp_steps, tmp_vol, tmp_time_period, oneOverRho,\n tmp_rates)\n', (3383, 3447), False, 'from binomialTreePricer import asianOptionBinomialTree\n'), ((2014, 2057), 'numpy.busday_count', 'np.busday_count', (['curvePoints[0]', 'datePoint1'], {}), '(curvePoints[0], datePoint1)\n', (2029, 2057), True, 'import numpy as np\n'), ((2081, 2124), 'numpy.busday_count', 'np.busday_count', (['curvePoints[0]', 'datePoint2'], {}), '(curvePoints[0], datePoint2)\n', (2096, 2124), True, 'import numpy as np\n'), ((2463, 2508), 'datetime.datetime.strptime', 'datetime.strptime', (['curvePoints[i]', '"""%Y-%m-%d"""'], {}), "(curvePoints[i], '%Y-%m-%d')\n", (2480, 2508), False, 'from datetime import datetime, timedelta\n'), ((2536, 2585), 'datetime.datetime.strptime', 'datetime.strptime', (['curvePoints[i + 1]', '"""%Y-%m-%d"""'], {}), "(curvePoints[i + 1], '%Y-%m-%d')\n", (2553, 2585), False, 'from datetime import datetime, timedelta\n'), ((3225, 3237), 'datetime.timedelta', 'timedelta', (['d'], {}), '(d)\n', (3234, 3237), False, 'from datetime import datetime, timedelta\n')]
|
import numpy as np
import pytest
import xarray as xr
from pyomeca import Analogs, Markers, Angles, Rototrans
from ._constants import ANALOGS_DATA, MARKERS_DATA, EXPECTED_VALUES
from .utils import is_expected_array
def test_analogs_creation():
dims = ("channel", "time")
array = Analogs()
np.testing.assert_array_equal(x=array, y=xr.DataArray())
assert array.dims == dims
array = Analogs(ANALOGS_DATA.values)
is_expected_array(array, **EXPECTED_VALUES[56])
size = 10, 100
array = Analogs.from_random_data(size=size)
assert array.shape == size
assert array.dims == dims
with pytest.raises(ValueError):
Analogs(MARKERS_DATA)
def test_markers_creation():
dims = ("axis", "channel", "time")
array = Markers()
np.testing.assert_array_equal(x=array, y=xr.DataArray())
assert array.dims == dims
array = Markers(MARKERS_DATA.values)
is_expected_array(array, **EXPECTED_VALUES[57])
size = 3, 10, 100
array = Markers.from_random_data(size=size)
assert array.shape == (4, size[1], size[2])
assert array.dims == dims
with pytest.raises(ValueError):
Markers(ANALOGS_DATA)
def test_angles_creation():
dims = ("axis", "channel", "time")
array = Angles()
np.testing.assert_array_equal(x=array, y=xr.DataArray())
assert array.dims == dims
array = Angles(MARKERS_DATA.values, time=MARKERS_DATA.time)
is_expected_array(array, **EXPECTED_VALUES[57])
size = 10, 10, 100
array = Angles.from_random_data(size=size)
assert array.shape == size
assert array.dims == dims
with pytest.raises(ValueError):
Angles(ANALOGS_DATA)
def test_rototrans_creation():
dims = ("row", "col", "time")
array = Rototrans()
np.testing.assert_array_equal(x=array, y=xr.DataArray(np.eye(4)[..., np.newaxis]))
assert array.dims == dims
array = Rototrans(MARKERS_DATA.values, time=MARKERS_DATA.time)
is_expected_array(array, **EXPECTED_VALUES[67])
size = 4, 4, 100
array = Rototrans.from_random_data(size=size)
assert array.shape == size
assert array.dims == dims
with pytest.raises(ValueError):
Angles(ANALOGS_DATA)
|
[
"pyomeca.Markers.from_random_data",
"pyomeca.Markers",
"pyomeca.Rototrans",
"pyomeca.Angles",
"pyomeca.Rototrans.from_random_data",
"pytest.raises",
"pyomeca.Angles.from_random_data",
"xarray.DataArray",
"numpy.eye",
"pyomeca.Analogs.from_random_data",
"pyomeca.Analogs"
] |
[((289, 298), 'pyomeca.Analogs', 'Analogs', ([], {}), '()\n', (296, 298), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((403, 431), 'pyomeca.Analogs', 'Analogs', (['ANALOGS_DATA.values'], {}), '(ANALOGS_DATA.values)\n', (410, 431), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((516, 551), 'pyomeca.Analogs.from_random_data', 'Analogs.from_random_data', ([], {'size': 'size'}), '(size=size)\n', (540, 551), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((762, 771), 'pyomeca.Markers', 'Markers', ([], {}), '()\n', (769, 771), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((876, 904), 'pyomeca.Markers', 'Markers', (['MARKERS_DATA.values'], {}), '(MARKERS_DATA.values)\n', (883, 904), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((992, 1027), 'pyomeca.Markers.from_random_data', 'Markers.from_random_data', ([], {'size': 'size'}), '(size=size)\n', (1016, 1027), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((1254, 1262), 'pyomeca.Angles', 'Angles', ([], {}), '()\n', (1260, 1262), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((1367, 1418), 'pyomeca.Angles', 'Angles', (['MARKERS_DATA.values'], {'time': 'MARKERS_DATA.time'}), '(MARKERS_DATA.values, time=MARKERS_DATA.time)\n', (1373, 1418), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((1507, 1541), 'pyomeca.Angles.from_random_data', 'Angles.from_random_data', ([], {'size': 'size'}), '(size=size)\n', (1530, 1541), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((1748, 1759), 'pyomeca.Rototrans', 'Rototrans', ([], {}), '()\n', (1757, 1759), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((1890, 1944), 'pyomeca.Rototrans', 'Rototrans', (['MARKERS_DATA.values'], {'time': 'MARKERS_DATA.time'}), '(MARKERS_DATA.values, time=MARKERS_DATA.time)\n', (1899, 1944), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((2031, 2068), 'pyomeca.Rototrans.from_random_data', 'Rototrans.from_random_data', ([], {'size': 'size'}), '(size=size)\n', (2057, 2068), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((623, 648), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (636, 648), False, 'import pytest\n'), ((658, 679), 'pyomeca.Analogs', 'Analogs', (['MARKERS_DATA'], {}), '(MARKERS_DATA)\n', (665, 679), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((1116, 1141), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1129, 1141), False, 'import pytest\n'), ((1151, 1172), 'pyomeca.Markers', 'Markers', (['ANALOGS_DATA'], {}), '(ANALOGS_DATA)\n', (1158, 1172), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((1613, 1638), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1626, 1638), False, 'import pytest\n'), ((1648, 1668), 'pyomeca.Angles', 'Angles', (['ANALOGS_DATA'], {}), '(ANALOGS_DATA)\n', (1654, 1668), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((2140, 2165), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2153, 2165), False, 'import pytest\n'), ((2175, 2195), 'pyomeca.Angles', 'Angles', (['ANALOGS_DATA'], {}), '(ANALOGS_DATA)\n', (2181, 2195), False, 'from pyomeca import Analogs, Markers, Angles, Rototrans\n'), ((344, 358), 'xarray.DataArray', 'xr.DataArray', ([], {}), '()\n', (356, 358), True, 'import xarray as xr\n'), ((817, 831), 'xarray.DataArray', 'xr.DataArray', ([], {}), '()\n', (829, 831), True, 'import xarray as xr\n'), ((1308, 1322), 'xarray.DataArray', 'xr.DataArray', ([], {}), '()\n', (1320, 1322), True, 'import xarray as xr\n'), ((1818, 1827), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1824, 1827), True, 'import numpy as np\n')]
|
import cv2 as cv
import numpy as np
class FaceMaskAppEngine:
"""
Perform detector which detects faces from input video,
and classifier to classify croped faces to face or mask class
:param config: Is a Config instance which provides necessary parameters.
"""
def __init__(self, config):
self.config = config
self.detector = None
self.classifier_model = None
self.running_video = False
self.device = self.config.DEVICE
if self.device == "x86":
from libs.detectors.x86.detector import Detector
from libs.classifiers.x86.classifier import Classifier
self.detector = Detector(self.config)
self.classifier_model = Classifier(self.config)
elif self.device == "EdgeTPU":
from libs.detectors.edgetpu.detector import Detector
from libs.classifiers.edgetpu.classifier import Classifier
self.detector = Detector(self.config)
self.classifier_model = Classifier(self.config)
else:
raise ValueError('Not supported device named: ', self.device)
self.image_size = (self.config.DETECTOR_INPUT_SIZE[0], self.config.DETECTOR_INPUT_SIZE[1], 3)
self.classifier_img_size = (self.config.CLASSIFIER_INPUT_SIZE, self.config.CLASSIFIER_INPUT_SIZE, 3)
def set_ui(self, ui):
self.ui = ui
def __process(self, cv_image):
# Resize input image to resolution
self.resolution = self.config.APP_VIDEO_RESOLUTION
cv_image = cv.resize(cv_image, tuple(self.resolution))
resized_image = cv.resize(cv_image, tuple(self.image_size[:2]))
rgb_resized_image = cv.cvtColor(resized_image, cv.COLOR_BGR2RGB)
objects_list = self.detector.inference(rgb_resized_image)
[w, h] = self.resolution
#objects_list = [{'id': '1-0', 'bbox': [.1, .2, .5, .5]}, {'id': '1-1', 'bbox': [.3, .1, .5, .5]}]
faces = []
for obj in objects_list:
if 'bbox' in obj.keys():
face_bbox = obj['bbox'] # [ymin, xmin, ymax, xmax]
xmin, xmax = np.multiply([face_bbox[1], face_bbox[3]], self.resolution[0])
ymin, ymax = np.multiply([face_bbox[0], face_bbox[2]], self.resolution[1])
croped_face = cv_image[int(ymin):int(ymin) + (int(ymax) - int(ymin)),
int(xmin):int(xmin) + (int(xmax) - int(xmin))]
# Resizing input image
croped_face = cv.resize(croped_face, tuple(self.classifier_img_size[:2]))
croped_face = cv.cvtColor(croped_face, cv.COLOR_BGR2RGB)
# Normalizing input image to [0.0-1.0]
croped_face = croped_face / 255.0
faces.append(croped_face)
faces = np.array(faces)
face_mask_results, scores = self.classifier_model.inference(faces)
# TODO: it could be optimized by the returned dictionary from openpifpaf (returining List instead dict)
[w, h] = self.resolution
idx = 0
for obj in objects_list:
if 'bbox' in obj.keys():
obj['face_label'] = face_mask_results[idx]
obj['score'] = scores[idx]
idx = idx + 1
box = obj["bbox"]
x0 = box[1]
y0 = box[0]
x1 = box[3]
y1 = box[2]
obj["bbox"] = [x0, y0, x1, y1]
return cv_image, objects_list
def process_video(self, video_uri):
input_cap = cv.VideoCapture(video_uri)
if (input_cap.isOpened()):
print('opened video ', video_uri)
else:
print('failed to load video ', video_uri)
return
self.running_video = True
while input_cap.isOpened() and self.running_video:
_, cv_image = input_cap.read()
if np.shape(cv_image) != ():
cv_image, objects = self.__process(cv_image)
else:
continue
self.ui.update(cv_image, objects)
input_cap.release()
self.running_video = False
# def process_image(self, image_path):
# # Process and pass the image to ui modules
# cv_image = cv.imread(image_path)
# cv_image, objects, distancings = self.__process(cv_image)
# self.ui.update(cv_image, objects, distancings)
|
[
"numpy.multiply",
"libs.classifiers.edgetpu.classifier.Classifier",
"cv2.cvtColor",
"cv2.VideoCapture",
"numpy.shape",
"numpy.array",
"libs.detectors.edgetpu.detector.Detector"
] |
[((1691, 1735), 'cv2.cvtColor', 'cv.cvtColor', (['resized_image', 'cv.COLOR_BGR2RGB'], {}), '(resized_image, cv.COLOR_BGR2RGB)\n', (1702, 1735), True, 'import cv2 as cv\n'), ((2826, 2841), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (2834, 2841), True, 'import numpy as np\n'), ((3585, 3611), 'cv2.VideoCapture', 'cv.VideoCapture', (['video_uri'], {}), '(video_uri)\n', (3600, 3611), True, 'import cv2 as cv\n'), ((674, 695), 'libs.detectors.edgetpu.detector.Detector', 'Detector', (['self.config'], {}), '(self.config)\n', (682, 695), False, 'from libs.detectors.edgetpu.detector import Detector\n'), ((732, 755), 'libs.classifiers.edgetpu.classifier.Classifier', 'Classifier', (['self.config'], {}), '(self.config)\n', (742, 755), False, 'from libs.classifiers.edgetpu.classifier import Classifier\n'), ((959, 980), 'libs.detectors.edgetpu.detector.Detector', 'Detector', (['self.config'], {}), '(self.config)\n', (967, 980), False, 'from libs.detectors.edgetpu.detector import Detector\n'), ((1017, 1040), 'libs.classifiers.edgetpu.classifier.Classifier', 'Classifier', (['self.config'], {}), '(self.config)\n', (1027, 1040), False, 'from libs.classifiers.edgetpu.classifier import Classifier\n'), ((2128, 2189), 'numpy.multiply', 'np.multiply', (['[face_bbox[1], face_bbox[3]]', 'self.resolution[0]'], {}), '([face_bbox[1], face_bbox[3]], self.resolution[0])\n', (2139, 2189), True, 'import numpy as np\n'), ((2219, 2280), 'numpy.multiply', 'np.multiply', (['[face_bbox[0], face_bbox[2]]', 'self.resolution[1]'], {}), '([face_bbox[0], face_bbox[2]], self.resolution[1])\n', (2230, 2280), True, 'import numpy as np\n'), ((2611, 2653), 'cv2.cvtColor', 'cv.cvtColor', (['croped_face', 'cv.COLOR_BGR2RGB'], {}), '(croped_face, cv.COLOR_BGR2RGB)\n', (2622, 2653), True, 'import cv2 as cv\n'), ((3933, 3951), 'numpy.shape', 'np.shape', (['cv_image'], {}), '(cv_image)\n', (3941, 3951), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME>
#
from .util import meta, MetaArray, ltri_ix, p
from pyscf.lib.diis import DIIS
from pyscf.lib.linalg_helper import davidson_nosym1 as davidson
from pyscf.cc.uccsd_slow import _PhysicistsERIs as ERIS_uccsd_slow
from pyscf.cc.gccsd import _PhysicistsERIs as ERIS_gccsd
import numpy
import inspect
from numbers import Number
from collections import OrderedDict
from warnings import warn
import string
def res2amps(residuals, e_occ, e_vir, constant=None):
"""
Converts residuals into amplitudes update.
Args:
residuals (iterable): a list of residuals;
e_occ (array): occupied energies;
e_vir (array): virtual energies;
virtual spaces;
constant (float): a constant in the denominator;
Returns:
A list of updates to amplitudes.
"""
result = []
for res in residuals:
if isinstance(res, Number) and res == 0:
result.append(0)
elif isinstance(res, MetaArray):
diagonal = numpy.zeros_like(res)
ix = [numpy.newaxis] * len(diagonal.shape)
if "labels" not in res.metadata:
raise ValueError("Missing metadata: axes labels")
for j, s in enumerate(res.metadata["labels"]):
ix[j] = slice(None)
if s == 'o':
diagonal += e_occ[tuple(ix)]
elif s == 'v':
diagonal -= e_vir[tuple(ix)]
else:
raise ValueError("Unknown spec '{}' in {}".format(s, residuals.metadata["labels"]))
ix[j] = numpy.newaxis
if constant is not None:
result.append(res / (constant + diagonal))
else:
result.append(res / diagonal)
else:
raise ValueError("Unsupported type: {}".format(type(res)))
return result
def a2v(amplitudes):
"""List of amplitudes into a single array."""
result = []
for v in amplitudes:
result.append(numpy.reshape(v, -1))
return numpy.concatenate(result)
def v2a(vec, like):
"""Array into a list amplitudes."""
result = []
offset = 0
for v in like:
s = v.size
result.append(numpy.reshape(vec[offset:offset+s], v.shape))
if isinstance(v, MetaArray):
result[-1] = MetaArray(result[-1], **v.metadata)
offset += s
return result
def eris_hamiltonian(eris):
"""
Retrieves Hamiltonian matrix elements from pyscf ERIS.
Args:
eris (pyscf.cc.ccsd.ERIS): pyscf ERIS;
Returns:
A dict with Hamiltonian matrix elements.
"""
# TODO: decide on adding '**ov', 'vo**'
nocc = eris.oooo.shape[0]
if isinstance(eris, ERIS_uccsd_slow):
def chess(a):
ix = []
for d in a.shape:
ix.append(numpy.dstack((
numpy.arange(d // 2),
numpy.arange(d // 2, d),
)).reshape(-1))
return a[numpy.ix_(*ix)]
return {k: chess(v) for k, v in dict(
ov=eris.fock[:nocc, nocc:],
vo=eris.fock[nocc:, :nocc],
oo=eris.fock[:nocc, :nocc],
vv=eris.fock[nocc:, nocc:],
oooo=eris.oooo,
oovo=-numpy.transpose(eris.ooov, (0, 1, 3, 2)),
oovv=eris.oovv,
ovoo=eris.ovoo,
ovvo=-numpy.transpose(eris.ovov, (0, 1, 3, 2)),
ovvv=eris.ovvv,
vvoo=numpy.transpose(eris.oovv, (2, 3, 0, 1)),
vvvo=-numpy.transpose(eris.ovvv, (2, 3, 1, 0)),
vvvv=eris.vvvv,
).items()}
elif isinstance(eris, ERIS_gccsd):
return dict(
ov=eris.fock[:nocc, nocc:], #OK
vo=eris.fock[nocc:, :nocc], #OK
oo=eris.fock[:nocc, :nocc], #OK
vv=eris.fock[nocc:, nocc:], #OK
oooo=eris.oooo, #OK
oovo=-numpy.transpose(eris.ooov, (0, 1, 3, 2)), #OK
oovv=eris.oovv,
# ovoo=eris.ovoo,
ovoo=numpy.transpose(eris.ooov, (2, 3, 0, 1)), #OK
# ovvo=-numpy.transpose(eris.ovov, (0, 1, 3, 2)),
ovvo=eris.ovvo,
ovvv=eris.ovvv,
vvoo=numpy.transpose(eris.oovv, (2, 3, 0, 1)),
vvvo=-numpy.transpose(eris.ovvv, (2, 3, 1, 0)),
vvvv=eris.vvvv,
)
else:
raise ValueError("Unknown object: {}".format(eris))
def oneshot(equations, *args):
"""
A one-shot calculation.
Args:
equations (callable): coupled-cluster equations;
args (iterable): amplitudes and hamiltonian matrix elements as dicts;
Returns:
Results of the calculation.
"""
input_args = inspect.getargspec(equations).args
fw_args = {}
for i in args:
fw_args.update(i)
# Remove excess arguments from the Hamiltonian
fw_args = {k: v for k, v in fw_args.items() if k in input_args}
# Check missing arguments
missing = set(input_args) - set(fw_args.keys())
if len(missing) > 0:
raise ValueError("Following arguments are missing: {}".format(', '.join(missing)))
return equations(**fw_args)
def kernel_solve(hamiltonian, equations, initial_guess, tolerance=1e-9, debug=False, diis=True, equation_energy=None,
dim_spec=None, maxiter=50):
"""
Coupled-cluster solver (linear systems).
Args:
hamiltonian (dict): hamiltonian matrix elements or pyscf ERIS;
equations (callable): coupled-cluster equations;
initial_guess (OrderedDict): starting amplitudes;
tolerance (float): convergence criterion;
debug (bool): prints iterations if True;
diis (bool, DIIS): converger for iterations;
equation_energy (callable): energy equation;
dim_spec (iterable): if `initial_guess` is a dict, this parameter defines shapes of arrays in 'ov' notation
(list of strings);
maxiter (int): maximal number of iterations;
Returns:
Resulting coupled-cluster amplitudes and energy if specified.
"""
# Convert ERIS to hamiltonian dict if needed
if not isinstance(hamiltonian, dict):
hamiltonian = eris_hamiltonian(hamiltonian)
if isinstance(initial_guess, (tuple, list)):
initial_guess = OrderedDict((k, 0) for k in initial_guess)
if dim_spec is None:
raise ValueError("dim_spec is not specified")
elif isinstance(initial_guess, OrderedDict):
if dim_spec is None and any(not isinstance(i, MetaArray) for i in initial_guess.values()):
raise ValueError("One or more of initial_guess values is not a MetaArray. Either specify dim_spec or use "
"MetaArrays to provide dimensions' labels in the 'ov' notation")
dim_spec = tuple(i.metadata["labels"] for i in initial_guess.values())
else:
raise ValueError("OrderedDict expected for 'initial_guess'")
tol = None
e_occ = numpy.diag(hamiltonian["oo"])
e_vir = numpy.diag(hamiltonian["vv"])
if diis is True:
diis = DIIS()
while tol is None or tol > tolerance and maxiter > 0:
output = oneshot(equations, hamiltonian, initial_guess)
if not isinstance(output, tuple):
output = (output,)
output = tuple(MetaArray(i, labels=j) if isinstance(i, numpy.ndarray) else i for i, j in zip(output, dim_spec))
dt = res2amps(output, e_occ, e_vir)
tol = max(numpy.linalg.norm(i) for i in dt)
for k, delta in zip(initial_guess, dt):
initial_guess[k] = initial_guess[k] + delta
if diis and not any(isinstance(i, Number) for i in initial_guess.values()):
v = a2v(initial_guess.values())
initial_guess = OrderedDict(zip(
initial_guess.keys(),
v2a(diis.update(v), initial_guess.values())
))
maxiter -= 1
if debug:
if equation_energy is not None:
e = oneshot(equation_energy, hamiltonian, initial_guess)
print("E = {:.10f} delta={:.3e}".format(e, tol))
else:
print("delta={:.3e}".format(tol))
if equation_energy is not None:
return initial_guess, oneshot(equation_energy, hamiltonian, initial_guess)
else:
return initial_guess
def koopmans_guess_ip(nocc, nvir, amplitudes, n, **kwargs):
"""
Koopman's guess for IP-EOM-CC amplitudes.
Args:
nocc (int): occupied space size;
nvir (int): virtual space size;
amplitudes (OrderedDict): an ordered dict with variable name-variable order pairs;
n (int): the root number;
kwargs: keyword arguments to `numpy.zeros`.
Returns:
An ordered dict with variable name-initial guess pairs.
"""
result = OrderedDict()
valid = False
for k, v in amplitudes.items():
result[k] = meta(numpy.zeros((nocc,) * v + (nvir,) * (v-1), **kwargs), labels='o' * v + 'v' * (v-1))
if v == 1:
if valid:
raise ValueError("Several first-order amplitudes encountered: {}".format(amplitudes))
else:
result[k][-n-1] = 1
valid = True
if not valid:
raise ValueError("No first-order amplitudes found: {}".format(amplitudes))
return result
def koopmans_guess_ea(nocc, nvir, amplitudes, n, **kwargs):
"""
Koopman's guess for EA-EOM-CC amplitudes.
Args:
nocc (int): occupied space size;
nvir (int): virtual space size;
amplitudes (OrderedDict): an ordered dict with variable name-variable order pairs;
n (int): the root number;
kwargs: keyword arguments to `numpy.zeros`.
Returns:
An ordered dict with variable name-initial guess pairs.
"""
result = OrderedDict()
valid = False
for k, v in amplitudes.items():
result[k] = meta(numpy.zeros((nocc,) * (v-1) + (nvir,) * v, **kwargs), labels='o' * (v-1) + 'v' * v)
if v == 1:
if valid:
raise ValueError("Several first-order amplitudes encountered: {}".format(amplitudes))
else:
result[k][n] = 1
valid = True
if not valid:
raise ValueError("No first-order amplitudes found: {}".format(amplitudes))
return result
def ltri_ix_amplitudes(a):
"""
Collects lower-triangular indexes of antisymetric amplitudes.
Args:
a (MetaArray): amplitudes to process;
Returns:
Lower-triangular indexes.
"""
if not isinstance(a, MetaArray) or "labels" not in a.metadata:
raise ValueError("Labels metadata is missing")
labels = a.metadata["labels"]
if len(labels) != len(a.shape):
raise ValueError("The length of 'labels' spec does not match the tensor rank")
dim_sizes = OrderedDict()
for label_i, label in enumerate(labels):
dim_size = a.shape[label_i]
if label in dim_sizes:
if dim_sizes[label] != dim_size:
raise ValueError("Dimensions of the same type '{}' do not match: {:d} vs {:d} in {}".format(
label,
dim_sizes[label],
dim_size,
repr(a.shape),
))
else:
dim_sizes[label] = dim_size
ix = OrderedDict()
ix_size = []
for label, dim_size in dim_sizes.items():
indexes = ltri_ix(dim_size, labels.count(label))
ix[label] = iter(indexes)
ix_size.append(len(indexes[0]))
# Label order
label_order = ''.join(ix.keys())
result = []
for label in labels:
x = next(ix[label])
pos = label_order.index(label)
bf = numpy.prod([1] + ix_size[:pos])
ft = numpy.prod([1] + ix_size[pos+1:])
x = numpy.tile(numpy.repeat(x, ft), bf)
result.append(x)
return tuple(result)
def a2v_sym(amplitudes, ixs):
"""
Symmetric amplitudes into vector.
Args:
amplitudes (iterable): amplitudes to join;
ixs (iterable): indexes of lower-triangle parts;
Returns:
A numpy array with amplitudes joined.
"""
return a2v(a[i] for a, i in zip(amplitudes, ixs))
def v2a_sym(a, labels, shapes, ixs):
"""
Decompresses the antisymmetric array.
Args:
a (numpy.ndarray): array to decompress;
labels (iterable): array's axes' labels;
shapes (iterable): arrays' shapes;
ixs (iterable): indexes of lower-triangle parts;
Returns:
Decompressed amplitude tensors.
"""
result = []
pos = 0
for lbls, shape, ix in zip(labels, shapes, ixs):
ampl = numpy.zeros(shape, dtype=a.dtype)
end = pos + len(ix[0])
ampl[ix] = a[pos:end]
pos = end
for l in set(lbls):
letters = iter(string.ascii_lowercase)
str_spec = ''.join(next(letters) if i == l else '.' for i in lbls)
ampl = p(str_spec, ampl)
result.append(ampl)
return result
def kernel_eig(hamiltonian, equations, amplitudes, tolerance=1e-9):
"""
Coupled-cluster solver (eigenvalue problem).
Args:
hamiltonian (dict): hamiltonian matrix elements or pyscf ERIS;
equations (callable): coupled-cluster equations;
amplitudes (iterable): starting amplitudes (a list of OrderedDicts);
tolerance (float): convergence criterion;
Returns:
Resulting coupled-cluster amplitudes and energy if specified.
"""
# Convert ERIS to hamiltonian dict if needed
if not isinstance(hamiltonian, dict):
hamiltonian = eris_hamiltonian(hamiltonian)
# Preconditioning
e_occ = numpy.diag(hamiltonian["oo"])
e_vir = numpy.diag(hamiltonian["vv"])
# Antisymmetry data
sample = amplitudes[0].values()
labels = list(i.metadata["labels"] for i in sample)
ixs = list(ltri_ix_amplitudes(i) for i in sample)
shapes = list(i.shape for i in sample)
def matvec(vec):
result = []
for i in vec:
a = v2a_sym(i, labels, shapes, ixs)
a = OrderedDict(zip(amplitudes[0].keys(), a))
r = oneshot(equations, hamiltonian, a)
result.append(a2v_sym(r, ixs))
return result
def precond(res, e0, x0):
a = v2a_sym(res, labels, shapes, ixs)
a = list(MetaArray(i, **j.metadata) for i, j in zip(a, amplitudes[0].values()))
a = res2amps(a, e_occ, e_vir, constant=e0)
return a2v_sym(a, ixs)
amplitudes_plain = tuple(a2v_sym(i.values(), ixs) for i in amplitudes)
conv, values, vectors = davidson(matvec, amplitudes_plain, precond, tol=tolerance, nroots=len(amplitudes))
if any(not i for i in conv):
warn("Following eigenvalues did not converge: {}".format(list(
i for i, x in enumerate(conv) if not x
)))
return values, list(v2a_sym(i, labels, shapes, ixs) for i in vectors)
|
[
"numpy.zeros_like",
"numpy.ix_",
"pyscf.lib.diis.DIIS",
"numpy.zeros",
"numpy.transpose",
"numpy.prod",
"inspect.getargspec",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.arange",
"collections.OrderedDict",
"numpy.diag",
"numpy.concatenate",
"numpy.repeat"
] |
[((2682, 2707), 'numpy.concatenate', 'numpy.concatenate', (['result'], {}), '(result)\n', (2699, 2707), False, 'import numpy\n'), ((7597, 7626), 'numpy.diag', 'numpy.diag', (["hamiltonian['oo']"], {}), "(hamiltonian['oo'])\n", (7607, 7626), False, 'import numpy\n'), ((7639, 7668), 'numpy.diag', 'numpy.diag', (["hamiltonian['vv']"], {}), "(hamiltonian['vv'])\n", (7649, 7668), False, 'import numpy\n'), ((9450, 9463), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9461, 9463), False, 'from collections import OrderedDict\n'), ((10455, 10468), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10466, 10468), False, 'from collections import OrderedDict\n'), ((11486, 11499), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11497, 11499), False, 'from collections import OrderedDict\n'), ((11979, 11992), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11990, 11992), False, 'from collections import OrderedDict\n'), ((14330, 14359), 'numpy.diag', 'numpy.diag', (["hamiltonian['oo']"], {}), "(hamiltonian['oo'])\n", (14340, 14359), False, 'import numpy\n'), ((14372, 14401), 'numpy.diag', 'numpy.diag', (["hamiltonian['vv']"], {}), "(hamiltonian['vv'])\n", (14382, 14401), False, 'import numpy\n'), ((5346, 5375), 'inspect.getargspec', 'inspect.getargspec', (['equations'], {}), '(equations)\n', (5364, 5375), False, 'import inspect\n'), ((6916, 6958), 'collections.OrderedDict', 'OrderedDict', (['((k, 0) for k in initial_guess)'], {}), '((k, 0) for k in initial_guess)\n', (6927, 6958), False, 'from collections import OrderedDict\n'), ((7706, 7712), 'pyscf.lib.diis.DIIS', 'DIIS', ([], {}), '()\n', (7710, 7712), False, 'from pyscf.lib.diis import DIIS\n'), ((12365, 12396), 'numpy.prod', 'numpy.prod', (['([1] + ix_size[:pos])'], {}), '([1] + ix_size[:pos])\n', (12375, 12396), False, 'import numpy\n'), ((12410, 12445), 'numpy.prod', 'numpy.prod', (['([1] + ix_size[pos + 1:])'], {}), '([1] + ix_size[pos + 1:])\n', (12420, 12445), False, 'import numpy\n'), ((13314, 13347), 'numpy.zeros', 'numpy.zeros', (['shape'], {'dtype': 'a.dtype'}), '(shape, dtype=a.dtype)\n', (13325, 13347), False, 'import numpy\n'), ((2649, 2669), 'numpy.reshape', 'numpy.reshape', (['v', '(-1)'], {}), '(v, -1)\n', (2662, 2669), False, 'import numpy\n'), ((2861, 2907), 'numpy.reshape', 'numpy.reshape', (['vec[offset:offset + s]', 'v.shape'], {}), '(vec[offset:offset + s], v.shape)\n', (2874, 2907), False, 'import numpy\n'), ((9543, 9597), 'numpy.zeros', 'numpy.zeros', (['((nocc,) * v + (nvir,) * (v - 1))'], {}), '((nocc,) * v + (nvir,) * (v - 1), **kwargs)\n', (9554, 9597), False, 'import numpy\n'), ((10548, 10602), 'numpy.zeros', 'numpy.zeros', (['((nocc,) * (v - 1) + (nvir,) * v)'], {}), '((nocc,) * (v - 1) + (nvir,) * v, **kwargs)\n', (10559, 10602), False, 'import numpy\n'), ((12467, 12486), 'numpy.repeat', 'numpy.repeat', (['x', 'ft'], {}), '(x, ft)\n', (12479, 12486), False, 'import numpy\n'), ((1639, 1660), 'numpy.zeros_like', 'numpy.zeros_like', (['res'], {}), '(res)\n', (1655, 1660), False, 'import numpy\n'), ((3637, 3651), 'numpy.ix_', 'numpy.ix_', (['*ix'], {}), '(*ix)\n', (3646, 3651), False, 'import numpy\n'), ((8091, 8111), 'numpy.linalg.norm', 'numpy.linalg.norm', (['i'], {}), '(i)\n', (8108, 8111), False, 'import numpy\n'), ((4665, 4705), 'numpy.transpose', 'numpy.transpose', (['eris.ooov', '(2, 3, 0, 1)'], {}), '(eris.ooov, (2, 3, 0, 1))\n', (4680, 4705), False, 'import numpy\n'), ((4846, 4886), 'numpy.transpose', 'numpy.transpose', (['eris.oovv', '(2, 3, 0, 1)'], {}), '(eris.oovv, (2, 3, 0, 1))\n', (4861, 4886), False, 'import numpy\n'), ((4544, 4584), 'numpy.transpose', 'numpy.transpose', (['eris.ooov', '(0, 1, 3, 2)'], {}), '(eris.ooov, (0, 1, 3, 2))\n', (4559, 4584), False, 'import numpy\n'), ((4906, 4946), 'numpy.transpose', 'numpy.transpose', (['eris.ovvv', '(2, 3, 1, 0)'], {}), '(eris.ovvv, (2, 3, 1, 0))\n', (4921, 4946), False, 'import numpy\n'), ((4108, 4148), 'numpy.transpose', 'numpy.transpose', (['eris.oovv', '(2, 3, 0, 1)'], {}), '(eris.oovv, (2, 3, 0, 1))\n', (4123, 4148), False, 'import numpy\n'), ((3517, 3537), 'numpy.arange', 'numpy.arange', (['(d // 2)'], {}), '(d // 2)\n', (3529, 3537), False, 'import numpy\n'), ((3559, 3582), 'numpy.arange', 'numpy.arange', (['(d // 2)', 'd'], {}), '(d // 2, d)\n', (3571, 3582), False, 'import numpy\n'), ((3905, 3945), 'numpy.transpose', 'numpy.transpose', (['eris.ooov', '(0, 1, 3, 2)'], {}), '(eris.ooov, (0, 1, 3, 2))\n', (3920, 3945), False, 'import numpy\n'), ((4021, 4061), 'numpy.transpose', 'numpy.transpose', (['eris.ovov', '(0, 1, 3, 2)'], {}), '(eris.ovov, (0, 1, 3, 2))\n', (4036, 4061), False, 'import numpy\n'), ((4168, 4208), 'numpy.transpose', 'numpy.transpose', (['eris.ovvv', '(2, 3, 1, 0)'], {}), '(eris.ovvv, (2, 3, 1, 0))\n', (4183, 4208), False, 'import numpy\n')]
|
from collections import OrderedDict
import os.path as osp
import matplotlib.pyplot as plt
import numpy as np
from rlkit.torch.networks import ConcatMlp
from rlkit.torch.sets import set_vae_trainer as svt
from rlkit.torch.sets import models
from rlkit.torch.sets.discriminator import (
DiscriminatorDataset,
DiscriminatorTrainer,
)
from rlkit.torch.sets.set_vae_trainer import PriorModel, CustomDictLoader
from rlkit.torch.sets.batch_algorithm import (
BatchTorchAlgorithm,
)
from rlkit.torch.sets.parallel_algorithms import ParallelAlgorithms
from torch.utils import data
from rlkit.torch import pytorch_util as ptu
from rlkit.torch.vae.vae_torch_trainer import VAE
def create_circle_dataset(num_examples, radius=3, scale=0.5, origin=(0, 0)):
angle = np.random.uniform(size=(num_examples, 1)) * 2 * np.pi
r = scale * np.random.randn(num_examples, 1) + radius
y = r * np.sin(angle) + origin[1]
x = r * np.cos(angle) + origin[0]
return np.concatenate([x, y], axis=1)
def create_box_dataset(num_examples, xlim, ylim):
x = np.random.uniform(xlim[0], xlim[1], size=(num_examples, 1))
y = np.random.uniform(ylim[0], ylim[1], size=(num_examples, 1))
return np.concatenate([x, y], axis=1)
def create_datasets(create_set_kwargs_list=None):
if create_set_kwargs_list is None:
create_set_kwargs_list = [
dict(num_examples=128, version='circle'),
dict(num_examples=128, version='box', xlim=(0, 2), ylim=(0, 2)),
dict(num_examples=128, version='box', xlim=(-2, 0), ylim=(-2, 0)),
dict(num_examples=128, version='box', xlim=(0, 2), ylim=(-2, 0)),
dict(num_examples=128, version='box', xlim=(-2, 2), ylim=(0, 2)),
]
return np.array([
create_set(**kwargs) for kwargs in create_set_kwargs_list
])
def create_set(version, **kwargs):
if version == 'circle':
return create_circle_dataset(**kwargs)
elif version == 'box':
return create_box_dataset(**kwargs)
else:
raise NotImplementedError()
def setup_discriminator(
vae: VAE,
examples,
prior,
discriminator_kwargs=None,
dataset_kwargs=None,
trainer_kwargs=None,
algo_kwargs=None,
name='',
):
if discriminator_kwargs is None:
discriminator_kwargs = {}
if dataset_kwargs is None:
dataset_kwargs = {}
if trainer_kwargs is None:
trainer_kwargs = {}
if algo_kwargs is None:
algo_kwargs = {}
discriminator = ConcatMlp(
input_size=vae.representation_size,
output_size=1,
**discriminator_kwargs
)
discriminator_data_loader = DiscriminatorDataset(
vae, examples, prior, **dataset_kwargs)
discriminator_trainer = DiscriminatorTrainer(
discriminator,
prior,
name=name,
**trainer_kwargs,
)
discriminator_algo = BatchTorchAlgorithm(
discriminator_trainer,
discriminator_data_loader,
**algo_kwargs
)
return discriminator_algo, discriminator, prior
def train_2d_set_vae(
create_set_vae_kwargs,
vae_trainer_kwargs,
vae_algo_kwargs,
debug_kwargs,
num_iters,
x_depends_on_c=False,
vae_data_loader_kwargs=None,
create_train_dataset_kwargs=None,
create_eval_dataset_kwargs=None,
setup_discriminator_kwargs=None,
set_dict_loader_kwargs=None,
):
if set_dict_loader_kwargs is None:
set_dict_loader_kwargs = {}
if vae_data_loader_kwargs is None:
vae_data_loader_kwargs = {}
if setup_discriminator_kwargs is None:
setup_discriminator_kwargs = {}
if create_eval_dataset_kwargs is None:
create_eval_dataset_kwargs = create_train_dataset_kwargs
data_dim = 2
eval_sets = create_datasets(**create_eval_dataset_kwargs)
train_sets = create_datasets(**create_train_dataset_kwargs)
for set_ in train_sets:
plt.scatter(*set_.T)
all_obs = np.vstack(train_sets)
# vae = models.create_vector_vae(
# data_dim=data_dim,
# **create_vae_kwargs,
# )
vae = models.create_vector_set_vae(
data_dim=data_dim,
x_depends_on_c=x_depends_on_c,
**create_set_vae_kwargs,
)
data_key = 'data'
set_key = 'set'
set_index_key = 'set_index'
train_sets_pt = [ptu.from_numpy(s) for s in train_sets]
eval_sets_pt = [ptu.from_numpy(s) for s in eval_sets]
all_obs_pt = ptu.from_numpy(all_obs)
all_obs_iterator_pt = data.DataLoader(all_obs_pt, **vae_data_loader_kwargs)
dict_loader = CustomDictLoader(
data=all_obs_iterator_pt,
sets=train_sets_pt,
data_key=data_key,
set_key=set_key,
set_index_key=set_index_key,
**set_dict_loader_kwargs
)
algos = OrderedDict()
discriminator_algos = []
discriminators = []
if setup_discriminator_kwargs:
prior_models = [PriorModel(vae.representation_size) for _ in train_sets_pt]
for i, examples in enumerate(train_sets_pt):
discriminator_algo, discriminator, prior_m = setup_discriminator(
vae,
examples,
prior_models[i],
name='discriminator{}'.format(i),
**setup_discriminator_kwargs
)
discriminator_algos.append(discriminator_algo)
discriminators.append(discriminator)
else:
prior_models = None
vae_trainer = svt.SetVAETrainer(
vae=vae,
set_key=set_key,
data_key=data_key,
train_sets=train_sets_pt,
eval_sets=eval_sets_pt,
prior_models=prior_models,
discriminators=discriminators,
**vae_trainer_kwargs)
vae_algorithm = BatchTorchAlgorithm(
vae_trainer,
dict_loader,
**vae_algo_kwargs,
)
algos['vae'] = vae_algorithm
for i, algo in enumerate(discriminator_algos):
algos['discriminator_{}'.format(i)] = algo
algorithm = ParallelAlgorithms(algos, num_iters)
algorithm.to(ptu.device)
set_up_debugging(vae_algorithm, prior_models, discriminator_algos, **debug_kwargs)
algorithm.run()
def set_up_debugging(
vae_algorithm,
prior_models,
discriminator_algos,
debug_period=10,
num_samples=25,
dump_posterior_and_prior_samples=False,
):
from rlkit.core import logger
logdir = logger.get_snapshot_dir()
set_loss_version = vae_algorithm.trainer.set_loss_version
# visualize the train/eval set once
plt_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
xmin = xmax = ymin = ymax = 0
for name, list_of_sets in [
('train', vae_algorithm.trainer.train_sets),
('eval', vae_algorithm.trainer.eval_sets),
]:
plt.figure()
for i, set in enumerate(list_of_sets):
set_examples = ptu.get_numpy(set)
plt.scatter(*set_examples.T, color=plt_colors[i])
xmin, xmax, ymin, ymax = plt.axis()
plt.savefig(osp.join(logdir, '{}_set_visualization.png'.format(name)))
plt.close()
def dump_debug_images(
algo,
epoch,
tag='',
):
trainer = algo.trainer
trainer.vae.train()
if debug_period <= 0 or epoch % debug_period != 0:
return
def draw_reconstruction(batch, color=None):
x_np = ptu.get_numpy(batch)
x_hat_np = ptu.get_numpy(trainer.vae.reconstruct(batch))
delta = x_hat_np - x_np
plt.quiver(
x_np[:, 0],
x_np[:, 1],
delta[:, 0],
delta[:, 1],
scale=1.,
scale_units='xy',
linewidth=0.5,
alpha=0.5,
color=color,
)
# batch = trainer.example_batch[trainer.data_key]
# plt.figure()
# draw_reconstruction(batch)
# plt.savefig(osp.join(logdir, '{}_recon.png'.format(epoch)))
#
raw_samples = ptu.get_numpy(trainer.vae.sample(num_samples))
plt.figure()
plt.scatter(*raw_samples.T)
plt.title('samples, epoch {}'.format(epoch))
plt.savefig(osp.join(logdir, 'vae_samples_{epoch}.png'.format(
epoch=epoch)))
plt.close()
for prefix, list_of_sets in [
('eval', trainer.eval_sets),
]:
name = prefix + tag
plt.figure()
for i, set in enumerate(list_of_sets):
draw_reconstruction(set, color=plt_colors[i])
plt.xlim((xmin, xmax))
plt.ylim((ymin, ymax))
plt.title('{}, epoch {}'.format(name, epoch))
plt.savefig(
osp.join(logdir, 'set_recons_{name}_{epoch}.png'.format(
epoch=epoch, name=name)))
plt.close()
for prefix, list_of_sets in [
('train', trainer.train_sets),
('eval', trainer.eval_sets),
]:
name = prefix + tag
for fix_xy_lims in [True, False]:
plt.figure()
for set_i, set in enumerate(list_of_sets):
set_samples = ptu.get_numpy(
trainer.vae.set_sample(num_samples, set))
plt.scatter(*set_samples.T, color=plt_colors[set_i])
if fix_xy_lims:
plt.xlim((xmin, xmax))
plt.ylim((ymin, ymax))
file_name = 'set_vae_samples_fixed_axes_{name}_{epoch}.png'.format(
epoch=epoch, name=name,
)
else:
file_name = 'set_vae_samples_{name}_{epoch}.png'.format(
epoch=epoch, name=name,
)
plt.title('{}, epoch {}'.format(name, epoch))
plt.savefig(osp.join(logdir, file_name))
plt.close()
plt.figure()
for i, set in enumerate(list_of_sets):
draw_reconstruction(set, color=plt_colors[i])
plt.xlim((xmin, xmax))
plt.ylim((ymin, ymax))
plt.title('{}, epoch {}'.format(name, epoch))
plt.savefig(
osp.join(logdir, 'set_recons_{name}_{epoch}.png'.format(
epoch=epoch, name=name)))
plt.close()
def dump_samples(
algo,
epoch,
):
if debug_period <= 0 or epoch % debug_period != 0:
return
# visualize the train/eval set once
data_loaders = [algo.data_loader for algo in discriminator_algos]
def get_last_batch(dl):
batch = None
for batch in dl:
pass
return batch
batches = [get_last_batch(dl) for dl in data_loaders]
nrows = len(batches)
ncols = algo.trainer.vae.representation_size // 2
fig, list_of_axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(2 * ncols, 2 * nrows))
for batch, axes in zip(batches, list_of_axes):
# y = batch['y']
x = batch['x']
xnp = x.cpu().detach().numpy()
posterior_samples = xnp[:128]
prior_samples = xnp[128:]
for i, ax in enumerate(axes):
post_x = posterior_samples[:, 2*i]
post_y = posterior_samples[:, 2*i + 1]
prior_x = prior_samples[:, 2*i]
prior_y = prior_samples[:, 2*i + 1]
ax.scatter(post_x, post_y, color='r')
ax.scatter(prior_x, prior_y, color='b')
plt.title('{}, epoch {}'.format(name, epoch))
plt.savefig(logdir + '/discriminator_samples_{epoch}.png'.format(
epoch=epoch,
))
plt.close()
vae_algorithm.post_epoch_funcs.append(dump_debug_images)
if dump_posterior_and_prior_samples:
vae_algorithm.post_epoch_funcs.append(dump_samples)
# if discriminator_algos:
# vae_algorithm.pre_train_funcs.append(
# functools.partial(dump_debug_images, tag='-pre-vae')
# )
|
[
"matplotlib.pyplot.quiver",
"rlkit.torch.sets.batch_algorithm.BatchTorchAlgorithm",
"rlkit.torch.pytorch_util.from_numpy",
"rlkit.torch.networks.ConcatMlp",
"matplotlib.pyplot.figure",
"numpy.sin",
"rlkit.torch.sets.parallel_algorithms.ParallelAlgorithms",
"rlkit.core.logger.get_snapshot_dir",
"os.path.join",
"rlkit.torch.sets.models.create_vector_set_vae",
"torch.utils.data.DataLoader",
"numpy.random.randn",
"rlkit.torch.sets.set_vae_trainer.SetVAETrainer",
"matplotlib.pyplot.close",
"rlkit.torch.sets.discriminator.DiscriminatorTrainer",
"matplotlib.pyplot.subplots",
"rlkit.torch.pytorch_util.get_numpy",
"matplotlib.pyplot.ylim",
"numpy.cos",
"rlkit.torch.sets.set_vae_trainer.CustomDictLoader",
"numpy.vstack",
"numpy.concatenate",
"numpy.random.uniform",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.scatter",
"rlkit.torch.sets.set_vae_trainer.PriorModel",
"matplotlib.pyplot.axis",
"rlkit.torch.sets.discriminator.DiscriminatorDataset",
"collections.OrderedDict"
] |
[((970, 1000), 'numpy.concatenate', 'np.concatenate', (['[x, y]'], {'axis': '(1)'}), '([x, y], axis=1)\n', (984, 1000), True, 'import numpy as np\n'), ((1061, 1120), 'numpy.random.uniform', 'np.random.uniform', (['xlim[0]', 'xlim[1]'], {'size': '(num_examples, 1)'}), '(xlim[0], xlim[1], size=(num_examples, 1))\n', (1078, 1120), True, 'import numpy as np\n'), ((1129, 1188), 'numpy.random.uniform', 'np.random.uniform', (['ylim[0]', 'ylim[1]'], {'size': '(num_examples, 1)'}), '(ylim[0], ylim[1], size=(num_examples, 1))\n', (1146, 1188), True, 'import numpy as np\n'), ((1200, 1230), 'numpy.concatenate', 'np.concatenate', (['[x, y]'], {'axis': '(1)'}), '([x, y], axis=1)\n', (1214, 1230), True, 'import numpy as np\n'), ((2536, 2625), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': 'vae.representation_size', 'output_size': '(1)'}), '(input_size=vae.representation_size, output_size=1, **\n discriminator_kwargs)\n', (2545, 2625), False, 'from rlkit.torch.networks import ConcatMlp\n'), ((2683, 2743), 'rlkit.torch.sets.discriminator.DiscriminatorDataset', 'DiscriminatorDataset', (['vae', 'examples', 'prior'], {}), '(vae, examples, prior, **dataset_kwargs)\n', (2703, 2743), False, 'from rlkit.torch.sets.discriminator import DiscriminatorDataset, DiscriminatorTrainer\n'), ((2781, 2852), 'rlkit.torch.sets.discriminator.DiscriminatorTrainer', 'DiscriminatorTrainer', (['discriminator', 'prior'], {'name': 'name'}), '(discriminator, prior, name=name, **trainer_kwargs)\n', (2801, 2852), False, 'from rlkit.torch.sets.discriminator import DiscriminatorDataset, DiscriminatorTrainer\n'), ((2917, 3006), 'rlkit.torch.sets.batch_algorithm.BatchTorchAlgorithm', 'BatchTorchAlgorithm', (['discriminator_trainer', 'discriminator_data_loader'], {}), '(discriminator_trainer, discriminator_data_loader, **\n algo_kwargs)\n', (2936, 3006), False, 'from rlkit.torch.sets.batch_algorithm import BatchTorchAlgorithm\n'), ((4019, 4040), 'numpy.vstack', 'np.vstack', (['train_sets'], {}), '(train_sets)\n', (4028, 4040), True, 'import numpy as np\n'), ((4158, 4266), 'rlkit.torch.sets.models.create_vector_set_vae', 'models.create_vector_set_vae', ([], {'data_dim': 'data_dim', 'x_depends_on_c': 'x_depends_on_c'}), '(data_dim=data_dim, x_depends_on_c=\n x_depends_on_c, **create_set_vae_kwargs)\n', (4186, 4266), False, 'from rlkit.torch.sets import models\n'), ((4503, 4526), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['all_obs'], {}), '(all_obs)\n', (4517, 4526), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((4553, 4606), 'torch.utils.data.DataLoader', 'data.DataLoader', (['all_obs_pt'], {}), '(all_obs_pt, **vae_data_loader_kwargs)\n', (4568, 4606), False, 'from torch.utils import data\n'), ((4625, 4788), 'rlkit.torch.sets.set_vae_trainer.CustomDictLoader', 'CustomDictLoader', ([], {'data': 'all_obs_iterator_pt', 'sets': 'train_sets_pt', 'data_key': 'data_key', 'set_key': 'set_key', 'set_index_key': 'set_index_key'}), '(data=all_obs_iterator_pt, sets=train_sets_pt, data_key=\n data_key, set_key=set_key, set_index_key=set_index_key, **\n set_dict_loader_kwargs)\n', (4641, 4788), False, 'from rlkit.torch.sets.set_vae_trainer import PriorModel, CustomDictLoader\n'), ((4846, 4859), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4857, 4859), False, 'from collections import OrderedDict\n'), ((5516, 5717), 'rlkit.torch.sets.set_vae_trainer.SetVAETrainer', 'svt.SetVAETrainer', ([], {'vae': 'vae', 'set_key': 'set_key', 'data_key': 'data_key', 'train_sets': 'train_sets_pt', 'eval_sets': 'eval_sets_pt', 'prior_models': 'prior_models', 'discriminators': 'discriminators'}), '(vae=vae, set_key=set_key, data_key=data_key, train_sets=\n train_sets_pt, eval_sets=eval_sets_pt, prior_models=prior_models,\n discriminators=discriminators, **vae_trainer_kwargs)\n', (5533, 5717), True, 'from rlkit.torch.sets import set_vae_trainer as svt\n'), ((5794, 5858), 'rlkit.torch.sets.batch_algorithm.BatchTorchAlgorithm', 'BatchTorchAlgorithm', (['vae_trainer', 'dict_loader'], {}), '(vae_trainer, dict_loader, **vae_algo_kwargs)\n', (5813, 5858), False, 'from rlkit.torch.sets.batch_algorithm import BatchTorchAlgorithm\n'), ((6041, 6077), 'rlkit.torch.sets.parallel_algorithms.ParallelAlgorithms', 'ParallelAlgorithms', (['algos', 'num_iters'], {}), '(algos, num_iters)\n', (6059, 6077), False, 'from rlkit.torch.sets.parallel_algorithms import ParallelAlgorithms\n'), ((6461, 6486), 'rlkit.core.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (6484, 6486), False, 'from rlkit.core import logger\n'), ((3984, 4004), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*set_.T'], {}), '(*set_.T)\n', (3995, 4004), True, 'import matplotlib.pyplot as plt\n'), ((4389, 4406), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['s'], {}), '(s)\n', (4403, 4406), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((4448, 4465), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['s'], {}), '(s)\n', (4462, 4465), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((6842, 6854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6852, 6854), True, 'import matplotlib.pyplot as plt\n'), ((7044, 7054), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (7052, 7054), True, 'import matplotlib.pyplot as plt\n'), ((7142, 7153), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7151, 7153), True, 'import matplotlib.pyplot as plt\n'), ((8156, 8168), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8166, 8168), True, 'import matplotlib.pyplot as plt\n'), ((8177, 8204), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*raw_samples.T'], {}), '(*raw_samples.T)\n', (8188, 8204), True, 'import matplotlib.pyplot as plt\n'), ((8364, 8375), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8373, 8375), True, 'import matplotlib.pyplot as plt\n'), ((11021, 11091), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'figsize': '(2 * ncols, 2 * nrows)'}), '(nrows=nrows, ncols=ncols, figsize=(2 * ncols, 2 * nrows))\n', (11033, 11091), True, 'import matplotlib.pyplot as plt\n'), ((11857, 11868), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11866, 11868), True, 'import matplotlib.pyplot as plt\n'), ((771, 812), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(num_examples, 1)'}), '(size=(num_examples, 1))\n', (788, 812), True, 'import numpy as np\n'), ((841, 873), 'numpy.random.randn', 'np.random.randn', (['num_examples', '(1)'], {}), '(num_examples, 1)\n', (856, 873), True, 'import numpy as np\n'), ((895, 908), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (901, 908), True, 'import numpy as np\n'), ((933, 946), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (939, 946), True, 'import numpy as np\n'), ((4972, 5007), 'rlkit.torch.sets.set_vae_trainer.PriorModel', 'PriorModel', (['vae.representation_size'], {}), '(vae.representation_size)\n', (4982, 5007), False, 'from rlkit.torch.sets.set_vae_trainer import PriorModel, CustomDictLoader\n'), ((6929, 6947), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['set'], {}), '(set)\n', (6942, 6947), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((6960, 7009), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*set_examples.T'], {'color': 'plt_colors[i]'}), '(*set_examples.T, color=plt_colors[i])\n', (6971, 7009), True, 'import matplotlib.pyplot as plt\n'), ((7455, 7475), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['batch'], {}), '(batch)\n', (7468, 7475), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((7593, 7725), 'matplotlib.pyplot.quiver', 'plt.quiver', (['x_np[:, 0]', 'x_np[:, 1]', 'delta[:, 0]', 'delta[:, 1]'], {'scale': '(1.0)', 'scale_units': '"""xy"""', 'linewidth': '(0.5)', 'alpha': '(0.5)', 'color': 'color'}), "(x_np[:, 0], x_np[:, 1], delta[:, 0], delta[:, 1], scale=1.0,\n scale_units='xy', linewidth=0.5, alpha=0.5, color=color)\n", (7603, 7725), True, 'import matplotlib.pyplot as plt\n'), ((8511, 8523), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8521, 8523), True, 'import matplotlib.pyplot as plt\n'), ((8649, 8671), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin, xmax)'], {}), '((xmin, xmax))\n', (8657, 8671), True, 'import matplotlib.pyplot as plt\n'), ((8684, 8706), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin, ymax)'], {}), '((ymin, ymax))\n', (8692, 8706), True, 'import matplotlib.pyplot as plt\n'), ((8921, 8932), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8930, 8932), True, 'import matplotlib.pyplot as plt\n'), ((10025, 10037), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10035, 10037), True, 'import matplotlib.pyplot as plt\n'), ((10163, 10185), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin, xmax)'], {}), '((xmin, xmax))\n', (10171, 10185), True, 'import matplotlib.pyplot as plt\n'), ((10198, 10220), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin, ymax)'], {}), '((ymin, ymax))\n', (10206, 10220), True, 'import matplotlib.pyplot as plt\n'), ((10435, 10446), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10444, 10446), True, 'import matplotlib.pyplot as plt\n'), ((9160, 9172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9170, 9172), True, 'import matplotlib.pyplot as plt\n'), ((10000, 10011), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10009, 10011), True, 'import matplotlib.pyplot as plt\n'), ((9367, 9419), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*set_samples.T'], {'color': 'plt_colors[set_i]'}), '(*set_samples.T, color=plt_colors[set_i])\n', (9378, 9419), True, 'import matplotlib.pyplot as plt\n'), ((9472, 9494), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(xmin, xmax)'], {}), '((xmin, xmax))\n', (9480, 9494), True, 'import matplotlib.pyplot as plt\n'), ((9515, 9537), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(ymin, ymax)'], {}), '((ymin, ymax))\n', (9523, 9537), True, 'import matplotlib.pyplot as plt\n'), ((9955, 9982), 'os.path.join', 'osp.join', (['logdir', 'file_name'], {}), '(logdir, file_name)\n', (9963, 9982), True, 'import os.path as osp\n')]
|
import numpy as np
def avoid_backward_action(action):
## if backward movement is initiated, stop the car
if np.all(action > 0.0):
action[0] = 0.0
action[1] = 0.0
return action
def reward_path_divergence(position_history, pos_ptr, reward_multiplier):
v2 = position_history[pos_ptr] - position_history[pos_ptr - 1]
v1 = position_history[pos_ptr - 1] - position_history[pos_ptr - 2]
l2_v1 = np.linalg.norm(v1)
l2_v2 = np.linalg.norm(v2)
if l2_v1 == 0 and l2_v2 == 0:
return -1.0 * reward_multiplier
## L2 normalize
if l2_v1 > 0:
v1 = v1 / l2_v1
if l2_v2 > 0:
v2 = v2 / l2_v2
cosine_similarity = np.sum(v1 * v2)
if cosine_similarity > 0.0:
return reward_multiplier * (1.0 - cosine_similarity)
else:
return reward_multiplier * cosine_similarity
|
[
"numpy.linalg.norm",
"numpy.sum",
"numpy.all"
] |
[((114, 134), 'numpy.all', 'np.all', (['(action > 0.0)'], {}), '(action > 0.0)\n', (120, 134), True, 'import numpy as np\n'), ((421, 439), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (435, 439), True, 'import numpy as np\n'), ((449, 467), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (463, 467), True, 'import numpy as np\n'), ((640, 655), 'numpy.sum', 'np.sum', (['(v1 * v2)'], {}), '(v1 * v2)\n', (646, 655), True, 'import numpy as np\n')]
|
import numpy as np
from gym import spaces
from brs_envs.base_envs import BaseURDFBulletEnv
from brs_envs.base_envs import parse_collision
from brs_envs.rocket_landing_scene import RocketLandingScene
from brs_envs.martlet9.martlet9_robot import Martlet9Robot
class RocketLanderEnv(BaseURDFBulletEnv):
LANDING_SPEED_PENALTY = 5
LANDING_SPEED_SURVIVE_THRESH = 10
DEATH_PENALTY = 500
LANDED_SPEED_THRESH = 1e-1
LANDED_BONUS = DEATH_PENALTY
ACCURACY_BONUS = DEATH_PENALTY / 10
HIT_WATER_PENALTY = DEATH_PENALTY
POSITION_THRESH = 20
def __init__(self,
render=False,
gravity=9.8,
timestep=1/60,
sticky=1,
max_lateral_offset=10,
max_vertical_offset=10,
max_roll_offset=0.5,
max_pitch_offset=0.5,
max_yaw_offset=0.1,
mean_robot_start_height=100):
BaseURDFBulletEnv.__init__(self, render)
self._feet_landed = set()
self._gravity = gravity
self._timestep = timestep
self._sticky = sticky
self._max_lateral_offset = max_lateral_offset
self._max_vertical_offset = max_vertical_offset
self._max_roll_offset = max_roll_offset
self._max_pitch_offset = max_pitch_offset
self._max_yaw_offset = max_yaw_offset
self._mean_robot_start_height = mean_robot_start_height
self.observation_space = Martlet9Robot.observation_space
self.action_space = Martlet9Robot.action_space
def step(self, a):
control_cost = self.robot.applyControls(self.p, a)
self.scene.step()
state = self.robot.getState(self.p)
if self.renderable:
self.moveCamera(state)
self.drawArtifacts(a)
collisions_in_water = self.p.getContactPoints(bodyA=self.scene.plane)
collisions_on_pad = self.p.getContactPoints(bodyA=self.scene.pad)
collision_cost, done = self.processCollisions(state,
self._prev_state,
collisions_in_water,
collisions_on_pad)
self._prev_state = state
return state, -(control_cost + collision_cost), done, {}
def processCollisions(self, state, prev_state, water_col, pad_col):
if len(water_col) > 0:
return RocketLanderEnv.HIT_WATER_PENALTY + RocketLanderEnv.DEATH_PENALTY, True
num_landed_feet = 0
new_landed_feet = 0
allowable_contacts = [self.robot.foot1_link, self.robot.foot2_link, self.robot.foot3_link]
feet_landed = set()
prev_state_desc = Martlet9Robot.describeState(prev_state)
state_desc = Martlet9Robot.describeState(state)
if state_desc['position'][-1] >= self._mean_robot_start_height + self._max_vertical_offset:
return RocketLanderEnv.DEATH_PENALTY, True
lateral_dist_to_center = np.linalg.norm(state_desc['position'][:2])
if lateral_dist_to_center > RocketLanderEnv.POSITION_THRESH:
return RocketLanderEnv.DEATH_PENALTY, True
for collision in map(parse_collision, pad_col):
other_link = collision['linkIndexB']
if other_link not in allowable_contacts:
return RocketLanderEnv.DEATH_PENALTY, True
num_landed_feet += 1
feet_landed.add(other_link)
if other_link not in self._feet_landed:
new_landed_feet += 1
self._feet_landed = feet_landed
if num_landed_feet == 0: # No collisions, no penalties
return 0.00 * np.linalg.norm(state_desc['position']), False
speed = np.linalg.norm(state_desc['velocity'])
prev_speed = np.linalg.norm(prev_state_desc['velocity'])
if max(speed, prev_speed) > RocketLanderEnv.LANDING_SPEED_SURVIVE_THRESH:
speed_overshoot = max(speed, prev_speed) - RocketLanderEnv.LANDING_SPEED_SURVIVE_THRESH
speed_penalty = RocketLanderEnv.LANDING_SPEED_PENALTY * (speed_overshoot**2)
center_bonus = max(1, 10 - lateral_dist_to_center) * RocketLanderEnv.ACCURACY_BONUS
# print("Landed too fast! Speed was {}".format(max(speed, prev_speed)))
return RocketLanderEnv.DEATH_PENALTY + speed_penalty - center_bonus, True
landing_cost = max(speed, prev_speed) * RocketLanderEnv.LANDING_SPEED_PENALTY * min(1, new_landed_feet)
if (num_landed_feet < 3) or (speed > RocketLanderEnv.LANDED_SPEED_THRESH):
# return landing_cost, False
return 0, False
# print("Smooth landing!")
return landing_cost - RocketLanderEnv.LANDED_BONUS, True
def reset(self):
state = BaseURDFBulletEnv.reset(self)
self.robot.addToScene(self.scene, self.robotStartPos(), self.robotStartOri())
self._prev_state = self.robot.getState(self.p)
return self._prev_state
def drawArtifacts(self, control):
if self.robot.thruster_fire_id is not None:
r = 1.0
g = 0.8 * control[0]
b = 0.3
a = min(1.0, 0.9 * control[0])
self.p.changeVisualShape(self.robot.uid,
self.robot.thruster_fire_id,
rgbaColor=[r, g, b, a])
if self.robot.steer_smoke_id is not None:
r = 0.4
g = 0.4
b = 0.4
a = min(1.0, 0.2 * control[2])
self.p.changeVisualShape(self.robot.uid,
self.robot.steer_smoke_id,
rgbaColor=[r, g, b, a])
def moveCamera(self, state):
target = state[:3]
ori = self.p.getEulerFromQuaternion(state[3:7])
yaw = 20
pitch = state[2] / 100
distance = 0.3 * state[2] + 50
self.p.resetDebugVisualizerCamera(distance, yaw, pitch, target)
def initializeScene(self):
return RocketLandingScene(self.p, gravity=self._gravity, timestep=self._timestep, sticky=self._sticky)
def initializeRobot(self):
return Martlet9Robot()
def robotStartPos(self):
max_lateral_offset = float(self._max_lateral_offset)
max_vertical_offset = float(self._max_vertical_offset)
mean_robot_start_height = float(self._mean_robot_start_height)
x = np.random.uniform(-max_lateral_offset, max_lateral_offset)
y = np.random.uniform(-max_lateral_offset, max_lateral_offset)
z = mean_robot_start_height + np.random.uniform(-max_vertical_offset, max_vertical_offset)
return [x, y, z]
def robotStartOri(self):
max_roll_offset = float(self._max_roll_offset)
max_pitch_offset = float(self._max_pitch_offset)
max_yaw_offset = float(self._max_yaw_offset)
roll = np.random.uniform(-max_roll_offset, max_roll_offset)
pitch = np.random.uniform(-max_pitch_offset, max_pitch_offset)
yaw = np.random.uniform(-max_yaw_offset, max_yaw_offset)
return self.p.getQuaternionFromEuler([roll, pitch, yaw])
|
[
"brs_envs.rocket_landing_scene.RocketLandingScene",
"numpy.random.uniform",
"brs_envs.base_envs.BaseURDFBulletEnv.__init__",
"brs_envs.martlet9.martlet9_robot.Martlet9Robot",
"numpy.linalg.norm",
"brs_envs.martlet9.martlet9_robot.Martlet9Robot.describeState",
"brs_envs.base_envs.BaseURDFBulletEnv.reset"
] |
[((955, 995), 'brs_envs.base_envs.BaseURDFBulletEnv.__init__', 'BaseURDFBulletEnv.__init__', (['self', 'render'], {}), '(self, render)\n', (981, 995), False, 'from brs_envs.base_envs import BaseURDFBulletEnv\n'), ((2750, 2789), 'brs_envs.martlet9.martlet9_robot.Martlet9Robot.describeState', 'Martlet9Robot.describeState', (['prev_state'], {}), '(prev_state)\n', (2777, 2789), False, 'from brs_envs.martlet9.martlet9_robot import Martlet9Robot\n'), ((2811, 2845), 'brs_envs.martlet9.martlet9_robot.Martlet9Robot.describeState', 'Martlet9Robot.describeState', (['state'], {}), '(state)\n', (2838, 2845), False, 'from brs_envs.martlet9.martlet9_robot import Martlet9Robot\n'), ((3034, 3076), 'numpy.linalg.norm', 'np.linalg.norm', (["state_desc['position'][:2]"], {}), "(state_desc['position'][:2])\n", (3048, 3076), True, 'import numpy as np\n'), ((3775, 3813), 'numpy.linalg.norm', 'np.linalg.norm', (["state_desc['velocity']"], {}), "(state_desc['velocity'])\n", (3789, 3813), True, 'import numpy as np\n'), ((3835, 3878), 'numpy.linalg.norm', 'np.linalg.norm', (["prev_state_desc['velocity']"], {}), "(prev_state_desc['velocity'])\n", (3849, 3878), True, 'import numpy as np\n'), ((4815, 4844), 'brs_envs.base_envs.BaseURDFBulletEnv.reset', 'BaseURDFBulletEnv.reset', (['self'], {}), '(self)\n', (4838, 4844), False, 'from brs_envs.base_envs import BaseURDFBulletEnv\n'), ((6059, 6158), 'brs_envs.rocket_landing_scene.RocketLandingScene', 'RocketLandingScene', (['self.p'], {'gravity': 'self._gravity', 'timestep': 'self._timestep', 'sticky': 'self._sticky'}), '(self.p, gravity=self._gravity, timestep=self._timestep,\n sticky=self._sticky)\n', (6077, 6158), False, 'from brs_envs.rocket_landing_scene import RocketLandingScene\n'), ((6202, 6217), 'brs_envs.martlet9.martlet9_robot.Martlet9Robot', 'Martlet9Robot', ([], {}), '()\n', (6215, 6217), False, 'from brs_envs.martlet9.martlet9_robot import Martlet9Robot\n'), ((6455, 6513), 'numpy.random.uniform', 'np.random.uniform', (['(-max_lateral_offset)', 'max_lateral_offset'], {}), '(-max_lateral_offset, max_lateral_offset)\n', (6472, 6513), True, 'import numpy as np\n'), ((6526, 6584), 'numpy.random.uniform', 'np.random.uniform', (['(-max_lateral_offset)', 'max_lateral_offset'], {}), '(-max_lateral_offset, max_lateral_offset)\n', (6543, 6584), True, 'import numpy as np\n'), ((6919, 6971), 'numpy.random.uniform', 'np.random.uniform', (['(-max_roll_offset)', 'max_roll_offset'], {}), '(-max_roll_offset, max_roll_offset)\n', (6936, 6971), True, 'import numpy as np\n'), ((6988, 7042), 'numpy.random.uniform', 'np.random.uniform', (['(-max_pitch_offset)', 'max_pitch_offset'], {}), '(-max_pitch_offset, max_pitch_offset)\n', (7005, 7042), True, 'import numpy as np\n'), ((7057, 7107), 'numpy.random.uniform', 'np.random.uniform', (['(-max_yaw_offset)', 'max_yaw_offset'], {}), '(-max_yaw_offset, max_yaw_offset)\n', (7074, 7107), True, 'import numpy as np\n'), ((6623, 6683), 'numpy.random.uniform', 'np.random.uniform', (['(-max_vertical_offset)', 'max_vertical_offset'], {}), '(-max_vertical_offset, max_vertical_offset)\n', (6640, 6683), True, 'import numpy as np\n'), ((3713, 3751), 'numpy.linalg.norm', 'np.linalg.norm', (["state_desc['position']"], {}), "(state_desc['position'])\n", (3727, 3751), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Defines a convolutional neural network with residual connections.
Based on the architecture described in:
<NAME>, <NAME>, <NAME>, <NAME>. "Deep residual learning
for image recognition". https://arxiv.org/abs/1512.03385
With batch normalization as described in:
<NAME>, <NAME>. "Batch normalization: Accelerating
deep network training by reducing internal covariate shift".
https://arxiv.org/abs/1502.03167
And parametric ReLU activations as described in:
<NAME>, <NAME>, <NAME>, <NAME>. "Delving Deep into
Rectifiers: Surpassing Human-Level Performance on ImageNet Classification".
https://arxiv.org/abs/1502.01852
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nnetmaker.model import *
from nnetmaker.util import *
class ConvNetClassifierModel0(BaseModel):
def _process_args(self, model_args_validator, **kwargs):
self._learn_alpha = model_args_validator.get("learn_alpha", ATYPE_BOOL, True)
self._alpha = model_args_validator.get("alpha", ATYPE_FLOAT, True)
self._dropout_rate = model_args_validator.get("dropout_rate", ATYPE_FLOAT, True)
self._conv_layer_sizes = model_args_validator.get("conv_layer_sizes", ATYPE_INTS_LIST, True)
self._conv_layer_dims = model_args_validator.get("conv_layer_dims", ATYPE_INTS_LIST, True)
self._fc_layer_dims = model_args_validator.get("fc_layer_dims", ATYPE_INTS_LIST, True)
self._num_input_channels = model_args_validator.get("num_input_channels", ATYPE_INT, True)
self._input_size = model_args_validator.get("input_size", ATYPE_INT, True)
self._output_size = model_args_validator.get("output_size", ATYPE_INT, True)
self._add_biases = model_args_validator.get("add_biases", ATYPE_BOOL, True)
def _get_input_var_names(self, **kwargs):
return ["img"]
def _get_target_var_names(self, **kwargs):
return ["predictions"]
def _build_cost_targets(self, in_vars, target_vars, out_vars, **kwargs):
cost_targets = []
cost_targets.append((target_vars["predictions"], out_vars["predictions"], None))
return cost_targets
def _build_metrics(self, in_vars, target_vars, out_vars, **kwargs):
metrics = {}
targets = tf.argmax(target_vars["predictions"], axis=1)
predicted = tf.argmax(out_vars["predictions"], axis=1)
metrics["accuracy"] = tf.metrics.accuracy(targets, predicted)
return metrics
def _build_prediction_network(self, input_vars, is_training, **kwargs):
weight_vars = []
weight_init_tups = []
# Build convolutional layers.
prev_var = input_vars["img"]
prev_dims = self._num_input_channels
for i in range(len(self._conv_layer_sizes)):
size = self._conv_layer_sizes[i]
cur_dims = self._conv_layer_dims[i]
h_var = self._add_op_square_conv2d(prev_var, "conv%d" % i, weight_vars,
weight_init_tups, self._add_biases,
prev_dims, cur_dims, size)
h_var = self._add_op_batch_norm(h_var, "norm%d" % i, 3, is_training)
h_var = self._add_op_relu(h_var, "relu%d" % i, alpha=self._alpha,
is_variable=self._learn_alpha)
# Add zero padded residual connection.
if cur_dims > prev_dims:
num_zeros = cur_dims - prev_dims
paddings = np.zeros((4, 2), dtype=int)
paddings[3, 1] = num_zeros
h_var = h_var + tf.pad(prev_var, paddings)
else:
h_var = h_var + prev_var[:cur_dims]
prev_dims = cur_dims
prev_var = h_var
# Build fully connected and output layers.
h_var = prev_var
h_size = self._input_size
for i, cur_dims in enumerate(self._fc_layer_dims + [self._output_size]):
if self._dropout_rate > 0:
h_var = self._add_op_dropout(h_var, "dropout%d" % i, self._dropout_rate,
is_training)
h_var = self._add_op_square_conv2d(h_var, "fc%d" % i, weight_vars,
weight_init_tups, self._add_biases,
prev_dims, cur_dims, h_size, pad=False)
h_size = 1
prev_dims = cur_dims
if i < len(self._fc_layer_dims): # Hidden fully connected layer.
h_var = self._add_op_batch_norm(h_var, "fc_norm%d" % i, 3, is_training)
h_var = self._add_op_relu(h_var, "fc_relu%d" % i, alpha=self._alpha,
is_variable=self._learn_alpha)
else: # Final output layer.
h_var = tf.reduce_mean(h_var, axis=1)
h_var = tf.reduce_mean(h_var, axis=1)
h_var = tf.nn.softmax(h_var)
out_vars = {}
out_vars["predictions"] = h_var
return out_vars, weight_vars, weight_init_tups
|
[
"tensorflow.nn.softmax",
"tensorflow.metrics.accuracy",
"tensorflow.argmax",
"tensorflow.pad",
"numpy.zeros",
"tensorflow.reduce_mean"
] |
[((2294, 2339), 'tensorflow.argmax', 'tf.argmax', (["target_vars['predictions']"], {'axis': '(1)'}), "(target_vars['predictions'], axis=1)\n", (2303, 2339), True, 'import tensorflow as tf\n'), ((2356, 2398), 'tensorflow.argmax', 'tf.argmax', (["out_vars['predictions']"], {'axis': '(1)'}), "(out_vars['predictions'], axis=1)\n", (2365, 2398), True, 'import tensorflow as tf\n'), ((2426, 2465), 'tensorflow.metrics.accuracy', 'tf.metrics.accuracy', (['targets', 'predicted'], {}), '(targets, predicted)\n', (2445, 2465), True, 'import tensorflow as tf\n'), ((3443, 3470), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': 'int'}), '((4, 2), dtype=int)\n', (3451, 3470), True, 'import numpy as np\n'), ((4634, 4663), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['h_var'], {'axis': '(1)'}), '(h_var, axis=1)\n', (4648, 4663), True, 'import tensorflow as tf\n'), ((4680, 4709), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['h_var'], {'axis': '(1)'}), '(h_var, axis=1)\n', (4694, 4709), True, 'import tensorflow as tf\n'), ((4726, 4746), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['h_var'], {}), '(h_var)\n', (4739, 4746), True, 'import tensorflow as tf\n'), ((3530, 3556), 'tensorflow.pad', 'tf.pad', (['prev_var', 'paddings'], {}), '(prev_var, paddings)\n', (3536, 3556), True, 'import tensorflow as tf\n')]
|
import sys
sys.path.append("./")
import shiftnet_cuda
import numpy as np
import torch
import torch.cuda
def main():
pattern = np.arange(18 * 18).reshape(18, 18)
src_buf = np.zeros((32, 64, 18, 18)).astype(np.float32)
for bnr in range(32):
for ch in range(64):
src_buf[bnr,ch,:,:] = pattern
x_hin = torch.zeros(32, 64, 18, 18).type(torch.FloatTensor)
#x_hin[:,:,1:4,1:4] = 1.0
x_hin.copy_(torch.from_numpy(src_buf))
y_hin = torch.zeros(32, 64, 18, 18).type(torch.FloatTensor)
x = x_hin.cuda()
y = y_hin.cuda()
#ret = shiftnet_cuda.moduloshift3x3_nchw(x, y)
ret = shiftnet_cuda.moduloshiftgeneric_nchw(x, y, 7, 2, -1)
assert ret == 1
x_hout = x.cpu()
y_hout = y.cpu()
print(x_hout[0,0,:18,:18])
for ch in range(9):
print(y_hout[0,ch,:18,:18])
if __name__ == "__main__":
main()
|
[
"sys.path.append",
"numpy.zeros",
"numpy.arange",
"torch.zeros",
"shiftnet_cuda.moduloshiftgeneric_nchw",
"torch.from_numpy"
] |
[((11, 32), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (26, 32), False, 'import sys\n'), ((601, 654), 'shiftnet_cuda.moduloshiftgeneric_nchw', 'shiftnet_cuda.moduloshiftgeneric_nchw', (['x', 'y', '(7)', '(2)', '(-1)'], {}), '(x, y, 7, 2, -1)\n', (638, 654), False, 'import shiftnet_cuda\n'), ((414, 439), 'torch.from_numpy', 'torch.from_numpy', (['src_buf'], {}), '(src_buf)\n', (430, 439), False, 'import torch\n'), ((131, 149), 'numpy.arange', 'np.arange', (['(18 * 18)'], {}), '(18 * 18)\n', (140, 149), True, 'import numpy as np\n'), ((178, 204), 'numpy.zeros', 'np.zeros', (['(32, 64, 18, 18)'], {}), '((32, 64, 18, 18))\n', (186, 204), True, 'import numpy as np\n'), ((320, 347), 'torch.zeros', 'torch.zeros', (['(32)', '(64)', '(18)', '(18)'], {}), '(32, 64, 18, 18)\n', (331, 347), False, 'import torch\n'), ((452, 479), 'torch.zeros', 'torch.zeros', (['(32)', '(64)', '(18)', '(18)'], {}), '(32, 64, 18, 18)\n', (463, 479), False, 'import torch\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .anchor_head_template import AnchorHeadTemplate
class GradReverse(torch.autograd.Function):
def __init__(self, lambd):
self.lambd = lambd
def forward(self, x):
return x.view_as(x)
def backward(self, grad_output):
return (grad_output * self.lambd)
def grad_reverse(x, lambd):
return GradReverse(lambd)(x)
class RangeIntervalAttentionLayer(nn.Module):
def __init__(self, num_channels, kernel_size, division=6, prior=False):
super(RangeIntervalAttentionLayer, self).__init__()
if prior:
param_list = []
for i in range(division):
param_list.append(torch.tensor([[[i * (division-1)]]], dtype=torch.float32))
param = torch.cat(param_list, dim=-2)
else:
param = torch.randn(1, division, 1)
self.patch_param = nn.Parameter(param, requires_grad=True)
# self.patch_param = nn.Parameter(torch.randn(1, division, 1), requires_grad=True)
self.sigmoid = nn.Sigmoid()
self.kernel_size = kernel_size
self.division = division
self.elem = int(self.kernel_size / division)
def forward(self, input_tensor):
bt, c, h, w = input_tensor.size()
input_tensor = input_tensor.view(-1, h, w)
self.patch_matrix = self.patch_param.repeat(1, 1, self.elem).view(1, -1, 1)
self.patch_matrix = self.patch_matrix.repeat(bt*c, 1, w)
input_tensor = input_tensor * self.patch_matrix
input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)
return input_tensor
class RoadIntervalAttentionLayer(nn.Module):
def __init__(self, num_channels, kernel_size, division=6, prior=False):
super(RoadIntervalAttentionLayer, self).__init__()
if prior:
param_list = []
for i in range(division):
param_list.append(torch.tensor([[[i * (division-1)]]], dtype=torch.float32))
param = torch.cat(param_list, dim=-1)
else:
param = torch.randn(1, 1, division)
self.patch_param = nn.Parameter(param, requires_grad=True)
self.sigmoid = nn.Sigmoid()
self.kernel_size = kernel_size
self.division = division
self.elem = int(self.kernel_size / division)
def forward(self, input_tensor):
bt, c, h, w = input_tensor.size()
input_tensor = input_tensor.view(-1, h, w)
self.patch_matrix = self.patch_param.repeat(1, self.elem, 1).permute(0,2,1).contiguous().view(1,-1,1).permute(0,2,1)
self.patch_matrix = self.patch_matrix.repeat(bt*c, h, 1)
input_tensor = input_tensor * self.patch_matrix
input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)
return input_tensor
class LocationAttentionLayer(nn.Module):
def __init__(self, num_channels, kernel_size, prior=False):
super(LocationAttentionLayer, self).__init__()
self.patch_matrix = nn.Parameter(torch.randn(1, kernel_size, kernel_size), requires_grad=True)
# self.patch_conv = nn.Conv2d(num_channels, 1, kernel_size, kernel_size)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor):
#2, 512, 126, 126
bt, c, h, w = input_tensor.size()
# print("input_tensor", input_tensor.shape)
# patch_tensor = self.patch_conv(input_tensor)
# print("patch_tensor", patch_tensor.shape)
input_tensor = input_tensor.view(-1, h, w)
# self.patch_matrix = self.patch_matrix.repeat(bt*c, 1, 1)
# print("self.patch_matrix.repeat(bt*c, 1, 1)x", self.patch_matrix.repeat(bt*c, 1, 1).shape)
input_tensor = input_tensor * self.patch_matrix.repeat(bt*c, 1, 1)
input_tensor = self.sigmoid(input_tensor).view(bt, c, h, w)
# print("input_tensor")
# print("self.input_tensor", input_tensor.shape)
return input_tensor
class SpatialSELayer(nn.Module):
"""
Re-implementation of SE block -- squeezing spatially and exciting channel-wise described in:
*<NAME> al., Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks, MICCAI 2018*
"""
def __init__(self, num_channels):
"""
:param num_channels: No of input channels
"""
super(SpatialSELayer, self).__init__()
self.conv = nn.Conv2d(num_channels, 1, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor, weights=None):
"""
:param weights: weights for few shot learning
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output_tensor
"""
# spatial squeeze
batch_size, channel, a, b = input_tensor.size()
# print("input_tensor.size()", input_tensor.size()) #2, 512, 126, 126
if weights is not None:
weights = torch.mean(weights, dim=0)
weights = weights.view(1, channel, 1, 1)
out = F.conv2d(input_tensor, weights)
else:
out = self.conv(input_tensor)
# print("out.size()", out.size()) #2, 1, 126, 126
squeeze_tensor = self.sigmoid(out)
# print("squeeze_tensor.size()", squeeze_tensor.size()) # 2, 1, 126, 126
# spatial excitation
squeeze_tensor = squeeze_tensor.view(batch_size, 1, a, b)
# print("squeeze_tensor 2.size()", squeeze_tensor.size()) # 2, 1, 126, 126
output_tensor = torch.mul(input_tensor, squeeze_tensor)
# print("output_tensor 2.size()", output_tensor.size()) #2, 512, 126, 126
#output_tensor = torch.mul(input_tensor, squeeze_tensor)
return output_tensor
class ChannelSELayer(nn.Module):
"""
Re-implementation of Squeeze-and-Excitation (SE) block described in:
*<NAME>., Squeeze-and-Excitation Networks, arXiv:1709.01507*
"""
def __init__(self, num_channels, reduction_ratio=2):
"""
:param num_channels: No of input channels
:param reduction_ratio: By how much should the num_channels should be reduced
"""
super(ChannelSELayer, self).__init__()
num_channels_reduced = num_channels // reduction_ratio
self.reduction_ratio = reduction_ratio
self.fc1 = nn.Linear(num_channels, num_channels_reduced, bias=True)
self.fc2 = nn.Linear(num_channels_reduced, num_channels, bias=True)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, input_tensor):
"""
:param input_tensor: X, shape = (batch_size, num_channels, H, W)
:return: output tensor
"""
batch_size, num_channels, H, W = input_tensor.size() #2, 512, 126, 126
# Average along each channel
squeeze_tensor = input_tensor.view(batch_size, num_channels, -1).mean(dim=2) #2, 512, 126*126(1)
# channel excitation
fc_out_1 = self.relu(self.fc1(squeeze_tensor))
fc_out_2 = self.sigmoid(self.fc2(fc_out_1))
a, b = squeeze_tensor.size()
output_tensor = torch.mul(input_tensor, fc_out_2.view(a, b, 1, 1))
return output_tensor
class LocalDomainClassifier(nn.Module):
def __init__(self, input_channels=256, context=False):
super(LocalDomainClassifier, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 256, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv2 = nn.Conv2d(256, 128, kernel_size=1, stride=1,
padding=0, bias=False)
self.conv3 = nn.Conv2d(128, 1, kernel_size=1, stride=1,
padding=0, bias=False)
self.context = context
self._init_weights()
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
#m.bias.data.zero_()
normal_init(self.conv1, 0, 0.01)
normal_init(self.conv2, 0, 0.01)
normal_init(self.conv3, 0, 0.01)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
if self.context:
feat = F.avg_pool2d(x, (x.size(2), x.size(3)))
x = self.conv3(x)
return F.sigmoid(x),feat
else:
x = self.conv3(x)
return F.sigmoid(x)
class AnchorHeadSingleRangeNewConvDom(AnchorHeadTemplate):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range,
predict_boxes_when_training=True, nusc=False, fpn_layers=[], **kwargs):
super().__init__(
model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range,
predict_boxes_when_training=predict_boxes_when_training, nusc=nusc, fpn_layers=fpn_layers
)
self.num_anchors_per_location = sum(self.num_anchors_per_location)
self.voxel_det_seconv_attention = self.model_cfg.get('VOXEL_DET_SECONV_ATTENTION', False)
self.voxel_det_se_attention = self.model_cfg.get('VOXEL_DET_SE_ATTENTION', False)
self.voxel_det_patch_attention = self.model_cfg.get('VOXEL_DET_PATCH_ATTENTION', False)
self.voxel_dom_seconv_attention = self.model_cfg.get('VOXEL_DOM_SECONV_ATTENTION', False)
self.voxel_dom_se_attention = self.model_cfg.get('VOXEL_DOM_SE_ATTENTION', False)
self.voxel_dom_patch_attention = self.model_cfg.get('VOXEL_DOM_PATCH_ATTENTION', False)
self.voxel_dom_rangeinterval_attention = self.model_cfg.get('VOXEL_DOM_RANGEINTERVAL_ATTENTION', False)
self.voxel_dom_roadinterval_attention = self.model_cfg.get('VOXEL_DOM_ROADINTERVAL_ATTENTION', False)
self.joint_attention = self.model_cfg.get('VOXEL_DETDOM_JOINT_ATTENTION', False)
self.dom_patch_first = self.model_cfg.get('DOM_PATCH_FIRST', False)
if self.range_guidance:
if self.range_guidance_dom_only:
input_channels_dom = input_channels + 2
else:
input_channels = input_channels + 2
input_channels_dom = input_channels
else:
input_channels_dom = input_channels
self.conv_cls = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.num_class,
kernel_size=1
)
self.conv_box = nn.Conv2d(
input_channels, self.num_anchors_per_location * self.box_coder.code_size,
kernel_size=1
)
self.rangeinv = self.model_cfg.get('RANGE_INV', False)
self.keep_x = self.model_cfg.get('KEEP_X', False)
self.keep_y = self.model_cfg.get('KEEP_Y', False)
self.keep_xy = self.model_cfg.get('KEEP_XY', False)
self.center_xy = self.model_cfg.get('CENTER_XY', False)
self.zeroone_prior = self.model_cfg.get('ZEROONE_PRIOR', False)
self.rm_thresh = self.model_cfg.get('RM_THRESH', 0)
if self.rangeinv:
self.conv_range = nn.Conv2d(
input_channels, 1,
kernel_size=1
)
#nn.Sequential(
if self.voxel_det_seconv_attention and not self.joint_attention:
self.att_spatial_se_layer_det = SpatialSELayer(512)
if self.voxel_det_se_attention and not self.joint_attention:
self.att_se_layer_det = ChannelSELayer(512)
if self.voxel_det_patch_attention and not self.joint_attention:
self.att_patch_layer_det = LocationAttentionLayer(512, self.model_cfg.PATCH_SIZE, prior=self.zeroone_prior)
###################
if self.voxel_dom_seconv_attention:
self.att_spatial_se_layer = SpatialSELayer(512)
if self.voxel_dom_se_attention:
self.att_se_layer = ChannelSELayer(512)
if self.voxel_dom_patch_attention:
self.att_patch_layer = LocationAttentionLayer(512, self.model_cfg.PATCH_SIZE, prior=self.zeroone_prior)
if self.voxel_dom_rangeinterval_attention:
self.att_rangeinterval_layer = RangeIntervalAttentionLayer(512, self.model_cfg.PATCH_SIZE, division=self.model_cfg.get('RANGE_INTERVAL_DIVISION', 6), prior=self.zeroone_prior)
if self.voxel_dom_roadinterval_attention:
self.att_roadinterval_layer = RoadIntervalAttentionLayer(512, self.model_cfg.PATCH_SIZE, division=self.model_cfg.get('ROAD_INTERVAL_DIVISION', 6), prior=self.zeroone_prior)
if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None:
self.conv_dir_cls = nn.Conv2d(
input_channels,
self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS,
kernel_size=1
)
else:
self.conv_dir_cls = None
dom_fc1, dom_fc2 = self.model_cfg.get('DOM_FC', [1024, 1024])
# print("dom_fc ", dom_fc1, dom_fc2)
# if self.model_cfg.get('USE_DOMAIN_CLASSIFIER', None) is not None:
if self.range_da > 0:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier_range = nn.ModuleDict()
for n in range(0+self.remove_near_range, self.range_da-self.remove_far_range):
self.domain_classifier_range[str(n)] = nn.Sequential(nn.Linear(input_channels, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
if self.keep_xy:
self.domain_classifier_range2 = nn.ModuleDict()
for n in range(0+self.remove_near_range2, self.range_da-self.remove_far_range2):
self.domain_classifier_range2[str(n)] = nn.Sequential(nn.Linear(input_channels, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
elif self.interval_da > 0:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier_interval = nn.ModuleDict()
for n in range(self.interval_da):
self.domain_classifier_interval[str(n)] = nn.Sequential(nn.Linear(input_channels, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
elif self.range_guidance_conv_dom:
self.conv_dom_layers = self.make_conv_layers(
conv_cfg=self.model_cfg.LOCAL_DOM_FC,
input_channels=input_channels_dom,
output_channels=1
)
if self.range_guidance_double_dom:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier = nn.Sequential(nn.Linear(input_channels_dom, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
elif self.range_guidance_new_conv_dom:
print("input_channels_dom", input_channels_dom)
self.conv_dom_layers = LocalDomainClassifier(input_channels=input_channels_dom, context=self.range_guidance_new_conv_dom_context) #
# elif self.range_guidance_pixelfc_dom:
# # for i in range()
# self.pixelfc_layers = nn.ModuleList()
# # for i in range(self.model_cfg.PATCH_SIZE):
# self.make_fc_layers(
# conv_cfg=self.model_cfg.LOCAL_DOM_FC,
# input_channels=input_channels_dom,
# output_channels=1
# )
else:
self.domain_pool = nn.AdaptiveAvgPool2d(1)
self.domain_classifier = nn.Sequential(nn.Linear(input_channels_dom, dom_fc1),
nn.ReLU(True), nn.Dropout(),
nn.Linear(dom_fc1, dom_fc2), nn.ReLU(True),
nn.Dropout(), nn.Linear(dom_fc2, 1))
self.init_weights()
def init_weights(self):
pi = 0.01
nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.conv_box.weight, mean=0, std=0.001)
def local_attention(self, features, d):
# features.size() = [1, 256, h, w]
# d.size() = [1, 1, h, w] after sigmoid
d = d.clamp(1e-6, 1)
H = - ( d * d.log() + (1-d) * (1-d).log() )
w = 1 - H
features_new = (1 + w) * features
return features_new
def forward(self, data_dict):
t_mode = data_dict['t_mode']
l = data_dict['l']
if 'pseudo' in t_mode:
pseudo = True
else:
pseudo = False
spatial_features_2d = data_dict['spatial_features_2d']
if t_mode == 'tsne':
self.range_da = 2
mid_dim = int(spatial_features_2d.shape[-1]/2.)
range_interval = int(spatial_features_2d.shape[-1]/(2*self.range_da))
start_dim = {}
mid1_dim = {}
mid2_dim = {}
end_dim = {}
interval_idx = {}
interval_feat = {}
if self.keep_xy:
interval_feat2 = {}
# for each range 0,1,2,3 (4)
for n in range(0+self.remove_near_range, self.range_da-self.remove_far_range): # no0,1
start_dim[n] = mid_dim - range_interval*(n+1) # 2-1=1, 2-2=0
mid1_dim[n] = mid_dim - range_interval*n # 2-0=2 2-1=1 #int(spatial_features_2d.shape[-1]/2.)
mid2_dim[n] = mid_dim + range_interval*n # 2+0=2 2+1=3
end_dim[n] = mid_dim + range_interval*(n+1) # 2+1=3 2+2=4
interval_idx[n] = torch.LongTensor([i for i in range(start_dim[n], mid1_dim[n])]+[i for i in range(mid2_dim[n], end_dim[n])])
feat1 = spatial_features_2d[:,:,:,interval_idx[n]]
feat1 = self.domain_pool(feat1).view(feat1.size(0), -1)
data_dict[f'spatial_features_2d_x_{n}'] = feat1
feat2 = spatial_features_2d[:,:,interval_idx[n],:]
feat2 = self.domain_pool(feat2).view(feat2.size(0), -1)
data_dict[f'spatial_features_2d_y_{n}'] = feat2
return data_dict
###########################
if self.range_guidance and not self.range_guidance_dom_only:
total_range = spatial_features_2d.shape[-1]
half_range = int(spatial_features_2d.shape[-1] * 0.5)
x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()
# print("x_range", x_range)
y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()
spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)
# print("spatial_features_2d", spatial_features_2d.shape)
if 'dom_img' in t_mode:
if t_mode == 'dom_img_src':
dom_src = True
elif t_mode == 'dom_img_tgt':
dom_src = False
else:
dom_src = None
#################### PATCH EARLY
if self.voxel_dom_patch_attention and self.dom_patch_first:
spatial_features_2d = self.att_patch_layer(spatial_features_2d)
if self.voxel_dom_rangeinterval_attention and self.dom_patch_first:
spatial_features_2d = self.att_rangeinterval_layer(spatial_features_2d)
if self.voxel_dom_roadinterval_attention and self.dom_patch_first:
spatial_features_2d = self.att_roadinterval_layer(spatial_features_2d)
#################### PATCH LATE
if self.voxel_dom_patch_attention and not self.dom_patch_first:
spatial_features_2d = self.att_patch_layer(spatial_features_2d)
if self.voxel_dom_rangeinterval_attention and not self.dom_patch_first:
spatial_features_2d = self.att_rangeinterval_layer(spatial_features_2d)
if self.voxel_dom_roadinterval_attention and not self.dom_patch_first:
spatial_features_2d = self.att_roadinterval_layer(spatial_features_2d)
####################
if self.range_guidance and self.range_guidance_dom_only:
total_range = spatial_features_2d.shape[-1]
half_range = int(spatial_features_2d.shape[-1] * 0.5)
x_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1, total_range, 1).cuda()
y_range = torch.abs(torch.arange(-half_range, half_range, 1).float() + 0.5).unsqueeze(-1).unsqueeze(0).unsqueeze(0).repeat(spatial_features_2d.shape[0],1,1,total_range).cuda()
spatial_features_2d = torch.cat((spatial_features_2d, x_range, y_range), dim=1)
if self.range_guidance_conv_dom or self.range_guidance_new_conv_dom:
# x_pool = self.domain_pool().view(spatial_features_2d.size(0), -1)
# print('t_mode', t_mode)
# print("l", l)
if self.range_guidance_new_conv_dom_attention:
x_reverse = grad_reverse(spatial_features_2d, l*-1)
if self.range_guidance_new_conv_dom_context:
dom_img_preds, _ = self.conv_dom_layers(x_reverse)
#print(d_pixel)
# if not target:
_, feat_pixel = self.conv_dom_layers(spatial_features_2d.detach())
else:
dom_img_preds = self.conv_dom_layers(x_reverse)
spatial_features_2d = self.local_attention(spatial_features_2d, dom_img_preds.detach())
else:
x_reverse = grad_reverse(spatial_features_2d, l*-1)
dom_img_preds = self.conv_dom_layers(x_reverse)
if self.range_guidance_double_dom:
x_pool2 = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)
x_reverse2 = grad_reverse(x_pool2, l*-1)
# print("x_reverse2", x_reverse2.shape)
dom_img_preds2 = self.domain_classifier(x_reverse2)#.squeeze(-1)
else:
x_pool = self.domain_pool(spatial_features_2d).view(spatial_features_2d.size(0), -1)
x_reverse = grad_reverse(x_pool, l*-1)
dom_img_preds = self.domain_classifier(x_reverse)#.squeeze(-1)
# print("dom_img_preds", dom_img_preds.shape)
if self.dom_squeeze:
dom_img_preds = dom_img_preds.squeeze(-1)
if self.range_guidance_double_dom:
dom_img_preds2 = dom_img_preds2.squeeze(-1)
self.forward_ret_dict['dom_img_preds'] = dom_img_preds
if self.range_guidance_double_dom:
self.forward_ret_dict['dom_img_preds2'] = dom_img_preds2
if self.training:
targets_dict_dom = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
dom_src=dom_src,
pseudo=pseudo
)
# if self.range_guidance_conv_dom:
# targets_dict_dom['dom_labels']
self.forward_ret_dict.update(targets_dict_dom)
if 'det' not in t_mode:
return data_dict
if self.joint_attention:
if self.voxel_det_seconv_attention and self.voxel_det_se_attention:
spatial_features_2d = torch.max(self.att_spatial_se_layer(spatial_features_2d), self.att_se_layer(spatial_features_2d))
# spatial_features_2d_det = spatial_features_2d
elif self.voxel_det_seconv_attention:
# print("spatial_features_2d before", spatial_features_2d.shape)
spatial_features_2d = self.att_spatial_se_layer(spatial_features_2d)
# spatial_features_2d_det = spatial_features_2d
elif self.voxel_det_se_attention:
spatial_features_2d = self.att_se_layer(spatial_features_2d)
# spatial_features_2d_det = spatial_features_2d
# else:
spatial_features_2d_det = spatial_features_2d
else:
if self.voxel_det_seconv_attention and self.voxel_det_se_attention:
spatial_features_2d_out = torch.max(self.att_spatial_se_layer_det(spatial_features_2d), self.att_se_layer_det(spatial_features_2d))
spatial_features_2d_det = spatial_features_2d_out
elif self.voxel_det_seconv_attention:
# print("spatial_features_2d before", spatial_features_2d.shape)
spatial_features_2d_det = self.att_spatial_se_layer_det(spatial_features_2d)
elif self.voxel_det_se_attention:
spatial_features_2d_det = self.att_se_layer_det(spatial_features_2d)
else:
spatial_features_2d_det = spatial_features_2d
# print("spatial_features_2d", spatial_features_2d.shape)
cls_preds = self.conv_cls(spatial_features_2d_det)
box_preds = self.conv_box(spatial_features_2d_det)
cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C]
self.forward_ret_dict['cls_preds'] = cls_preds
self.forward_ret_dict['box_preds'] = box_preds
if self.conv_dir_cls is not None:
dir_cls_preds = self.conv_dir_cls(spatial_features_2d_det)
dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous()
self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds
else:
dir_cls_preds = None
if self.training:
if pseudo:
pseudo_weights = data_dict['pseudo_weights']
else:
pseudo_weights = None
# print("gt_classes", data_dict['gt_classes'].shape)
# print("gt_classes", data_dict['gt_classes'])
# print("pseudo_weights", pseudo_weights)
targets_dict = self.assign_targets(
gt_boxes=data_dict['gt_boxes'],
pseudo=pseudo,
pseudo_weights=pseudo_weights
)
self.forward_ret_dict.update(targets_dict)
if not self.training or self.predict_boxes_when_training:
batch_cls_preds, batch_box_preds = self.generate_predicted_boxes(
batch_size=data_dict['batch_size'],
cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds
)
data_dict['batch_cls_preds'] = batch_cls_preds
data_dict['batch_box_preds'] = batch_box_preds
data_dict['cls_preds_normalized'] = False
if self.rangeinv:
# print("spatial_features_2d", spatial_features_2d.shape) #512,128,128
thresh = self.rm_thresh
start_dim = int(spatial_features_2d.shape[-1]/4.)
mid_dim = int(spatial_features_2d.shape[-1]/2.)
end_dim = start_dim+int(spatial_features_2d.shape[-1]/2.)
near_idx = torch.LongTensor([i for i in range(start_dim, mid_dim-thresh)]+[i for i in range(mid_dim+thresh, end_dim)])
far_idx = torch.LongTensor([i for i in range(start_dim)]+[i for i in range(end_dim, spatial_features_2d.shape[-1])])
if self.keep_x:
near_feat_2d = spatial_features_2d[:,:,:,near_idx]
far_feat_2d = spatial_features_2d[:,:,:, far_idx]
elif self.keep_y:
near_feat_2d = spatial_features_2d[:,:,near_idx,:]
far_feat_2d = spatial_features_2d[:,:,far_idx,:]
near_feat_2d_reverse = grad_reverse(near_feat_2d, l*-1)
range_pred_near = self.conv_range(near_feat_2d_reverse)
# print("near_range_pred", near_range_pred.shape)
far_feat_2d_reverse = grad_reverse(far_feat_2d, l*-1)
range_pred_far = self.conv_range(far_feat_2d_reverse)
# print("far_range_pred", far_range_pred.shape)
range_labels_near = torch.ones((range_pred_near.shape), dtype=torch.float32, device=spatial_features_2d.device)
range_labels_far = torch.zeros((range_pred_far.shape), dtype=torch.float32, device=spatial_features_2d.device)
targets_dict_range = {
'range_pred_near': range_pred_near,
'range_pred_far': range_pred_far,
'range_labels_near': range_labels_near,
'range_labels_far': range_labels_far,
}
self.forward_ret_dict.update(targets_dict_range)
return data_dict
|
[
"torch.nn.Dropout",
"torch.cat",
"torch.randn",
"torch.nn.ModuleDict",
"torch.arange",
"torch.nn.functional.sigmoid",
"torch.ones",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Parameter",
"torch.mean",
"torch.nn.Conv2d",
"torch.nn.functional.conv2d",
"torch.mul",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"numpy.log",
"torch.nn.init.normal_",
"torch.tensor"
] |
[((941, 980), 'torch.nn.Parameter', 'nn.Parameter', (['param'], {'requires_grad': '(True)'}), '(param, requires_grad=True)\n', (953, 980), True, 'import torch.nn as nn\n'), ((1096, 1108), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1106, 1108), True, 'import torch.nn as nn\n'), ((2171, 2210), 'torch.nn.Parameter', 'nn.Parameter', (['param'], {'requires_grad': '(True)'}), '(param, requires_grad=True)\n', (2183, 2210), True, 'import torch.nn as nn\n'), ((2234, 2246), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2244, 2246), True, 'import torch.nn as nn\n'), ((3220, 3232), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3230, 3232), True, 'import torch.nn as nn\n'), ((4426, 4455), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_channels', '(1)', '(1)'], {}), '(num_channels, 1, 1)\n', (4435, 4455), True, 'import torch.nn as nn\n'), ((4479, 4491), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4489, 4491), True, 'import torch.nn as nn\n'), ((5514, 5553), 'torch.mul', 'torch.mul', (['input_tensor', 'squeeze_tensor'], {}), '(input_tensor, squeeze_tensor)\n', (5523, 5553), False, 'import torch\n'), ((6316, 6372), 'torch.nn.Linear', 'nn.Linear', (['num_channels', 'num_channels_reduced'], {'bias': '(True)'}), '(num_channels, num_channels_reduced, bias=True)\n', (6325, 6372), True, 'import torch.nn as nn\n'), ((6392, 6448), 'torch.nn.Linear', 'nn.Linear', (['num_channels_reduced', 'num_channels'], {'bias': '(True)'}), '(num_channels_reduced, num_channels, bias=True)\n', (6401, 6448), True, 'import torch.nn as nn\n'), ((6469, 6478), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6476, 6478), True, 'import torch.nn as nn\n'), ((6502, 6514), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6512, 6514), True, 'import torch.nn as nn\n'), ((7356, 7434), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(input_channels, 256, kernel_size=1, stride=1, padding=0, bias=False)\n', (7365, 7434), True, 'import torch.nn as nn\n'), ((7474, 7541), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(256, 128, kernel_size=1, stride=1, padding=0, bias=False)\n', (7483, 7541), True, 'import torch.nn as nn\n'), ((7594, 7659), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(128, 1, kernel_size=1, stride=1, padding=0, bias=False)\n', (7603, 7659), True, 'import torch.nn as nn\n'), ((10551, 10643), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(self.num_anchors_per_location * self.num_class)'], {'kernel_size': '(1)'}), '(input_channels, self.num_anchors_per_location * self.num_class,\n kernel_size=1)\n', (10560, 10643), True, 'import torch.nn as nn\n'), ((10698, 10801), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(self.num_anchors_per_location * self.box_coder.code_size)'], {'kernel_size': '(1)'}), '(input_channels, self.num_anchors_per_location * self.box_coder.\n code_size, kernel_size=1)\n', (10707, 10801), True, 'import torch.nn as nn\n'), ((16972, 17028), 'torch.nn.init.normal_', 'nn.init.normal_', (['self.conv_box.weight'], {'mean': '(0)', 'std': '(0.001)'}), '(self.conv_box.weight, mean=0, std=0.001)\n', (16987, 17028), True, 'import torch.nn as nn\n'), ((821, 850), 'torch.cat', 'torch.cat', (['param_list'], {'dim': '(-2)'}), '(param_list, dim=-2)\n', (830, 850), False, 'import torch\n'), ((885, 912), 'torch.randn', 'torch.randn', (['(1)', 'division', '(1)'], {}), '(1, division, 1)\n', (896, 912), False, 'import torch\n'), ((2051, 2080), 'torch.cat', 'torch.cat', (['param_list'], {'dim': '(-1)'}), '(param_list, dim=-1)\n', (2060, 2080), False, 'import torch\n'), ((2115, 2142), 'torch.randn', 'torch.randn', (['(1)', '(1)', 'division'], {}), '(1, 1, division)\n', (2126, 2142), False, 'import torch\n'), ((3054, 3094), 'torch.randn', 'torch.randn', (['(1)', 'kernel_size', 'kernel_size'], {}), '(1, kernel_size, kernel_size)\n', (3065, 3094), False, 'import torch\n'), ((4942, 4968), 'torch.mean', 'torch.mean', (['weights'], {'dim': '(0)'}), '(weights, dim=0)\n', (4952, 4968), False, 'import torch\n'), ((5040, 5071), 'torch.nn.functional.conv2d', 'F.conv2d', (['input_tensor', 'weights'], {}), '(input_tensor, weights)\n', (5048, 5071), True, 'import torch.nn.functional as F\n'), ((8633, 8645), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['x'], {}), '(x)\n', (8642, 8645), True, 'import torch.nn.functional as F\n'), ((11325, 11368), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(1)'], {'kernel_size': '(1)'}), '(input_channels, 1, kernel_size=1)\n', (11334, 11368), True, 'import torch.nn as nn\n'), ((12873, 12979), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channels', '(self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS)'], {'kernel_size': '(1)'}), '(input_channels, self.num_anchors_per_location * self.model_cfg.\n NUM_DIR_BINS, kernel_size=1)\n', (12882, 12979), True, 'import torch.nn as nn\n'), ((13342, 13365), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (13362, 13365), True, 'import torch.nn as nn\n'), ((13409, 13424), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (13422, 13424), True, 'import torch.nn as nn\n'), ((19772, 19829), 'torch.cat', 'torch.cat', (['(spatial_features_2d, x_range, y_range)'], {'dim': '(1)'}), '((spatial_features_2d, x_range, y_range), dim=1)\n', (19781, 19829), False, 'import torch\n'), ((29305, 29399), 'torch.ones', 'torch.ones', (['range_pred_near.shape'], {'dtype': 'torch.float32', 'device': 'spatial_features_2d.device'}), '(range_pred_near.shape, dtype=torch.float32, device=\n spatial_features_2d.device)\n', (29315, 29399), False, 'import torch\n'), ((29429, 29523), 'torch.zeros', 'torch.zeros', (['range_pred_far.shape'], {'dtype': 'torch.float32', 'device': 'spatial_features_2d.device'}), '(range_pred_far.shape, dtype=torch.float32, device=\n spatial_features_2d.device)\n', (29440, 29523), False, 'import torch\n'), ((8552, 8564), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['x'], {}), '(x)\n', (8561, 8564), True, 'import torch.nn.functional as F\n'), ((13964, 13979), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (13977, 13979), True, 'import torch.nn as nn\n'), ((14532, 14555), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (14552, 14555), True, 'import torch.nn as nn\n'), ((14602, 14617), 'torch.nn.ModuleDict', 'nn.ModuleDict', ([], {}), '()\n', (14615, 14617), True, 'import torch.nn as nn\n'), ((16941, 16962), 'numpy.log', 'np.log', (['((1 - pi) / pi)'], {}), '((1 - pi) / pi)\n', (16947, 16962), True, 'import numpy as np\n'), ((21865, 21922), 'torch.cat', 'torch.cat', (['(spatial_features_2d, x_range, y_range)'], {'dim': '(1)'}), '((spatial_features_2d, x_range, y_range), dim=1)\n', (21874, 21922), False, 'import torch\n'), ((742, 801), 'torch.tensor', 'torch.tensor', (['[[[i * (division - 1)]]]'], {'dtype': 'torch.float32'}), '([[[i * (division - 1)]]], dtype=torch.float32)\n', (754, 801), False, 'import torch\n'), ((1972, 2031), 'torch.tensor', 'torch.tensor', (['[[[i * (division - 1)]]]'], {'dtype': 'torch.float32'}), '([[[i * (division - 1)]]], dtype=torch.float32)\n', (1984, 2031), False, 'import torch\n'), ((13585, 13619), 'torch.nn.Linear', 'nn.Linear', (['input_channels', 'dom_fc1'], {}), '(input_channels, dom_fc1)\n', (13594, 13619), True, 'import torch.nn as nn\n'), ((13673, 13686), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13680, 13686), True, 'import torch.nn as nn\n'), ((13688, 13700), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (13698, 13700), True, 'import torch.nn as nn\n'), ((13754, 13781), 'torch.nn.Linear', 'nn.Linear', (['dom_fc1', 'dom_fc2'], {}), '(dom_fc1, dom_fc2)\n', (13763, 13781), True, 'import torch.nn as nn\n'), ((13783, 13796), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13790, 13796), True, 'import torch.nn as nn\n'), ((13850, 13862), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (13860, 13862), True, 'import torch.nn as nn\n'), ((13864, 13885), 'torch.nn.Linear', 'nn.Linear', (['dom_fc2', '(1)'], {}), '(dom_fc2, 1)\n', (13873, 13885), True, 'import torch.nn as nn\n'), ((14151, 14185), 'torch.nn.Linear', 'nn.Linear', (['input_channels', 'dom_fc1'], {}), '(input_channels, dom_fc1)\n', (14160, 14185), True, 'import torch.nn as nn\n'), ((14243, 14256), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (14250, 14256), True, 'import torch.nn as nn\n'), ((14258, 14270), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (14268, 14270), True, 'import torch.nn as nn\n'), ((14328, 14355), 'torch.nn.Linear', 'nn.Linear', (['dom_fc1', 'dom_fc2'], {}), '(dom_fc1, dom_fc2)\n', (14337, 14355), True, 'import torch.nn as nn\n'), ((14357, 14370), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (14364, 14370), True, 'import torch.nn as nn\n'), ((14428, 14440), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (14438, 14440), True, 'import torch.nn as nn\n'), ((14442, 14463), 'torch.nn.Linear', 'nn.Linear', (['dom_fc2', '(1)'], {}), '(dom_fc2, 1)\n', (14451, 14463), True, 'import torch.nn as nn\n'), ((14736, 14770), 'torch.nn.Linear', 'nn.Linear', (['input_channels', 'dom_fc1'], {}), '(input_channels, dom_fc1)\n', (14745, 14770), True, 'import torch.nn as nn\n'), ((14824, 14837), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (14831, 14837), True, 'import torch.nn as nn\n'), ((14839, 14851), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (14849, 14851), True, 'import torch.nn as nn\n'), ((14905, 14932), 'torch.nn.Linear', 'nn.Linear', (['dom_fc1', 'dom_fc2'], {}), '(dom_fc1, dom_fc2)\n', (14914, 14932), True, 'import torch.nn as nn\n'), ((14934, 14947), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (14941, 14947), True, 'import torch.nn as nn\n'), ((15001, 15013), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (15011, 15013), True, 'import torch.nn as nn\n'), ((15015, 15036), 'torch.nn.Linear', 'nn.Linear', (['dom_fc2', '(1)'], {}), '(dom_fc2, 1)\n', (15024, 15036), True, 'import torch.nn as nn\n'), ((15375, 15398), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (15395, 15398), True, 'import torch.nn as nn\n'), ((16449, 16472), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (16469, 16472), True, 'import torch.nn as nn\n'), ((15454, 15492), 'torch.nn.Linear', 'nn.Linear', (['input_channels_dom', 'dom_fc1'], {}), '(input_channels_dom, dom_fc1)\n', (15463, 15492), True, 'import torch.nn as nn\n'), ((15546, 15559), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (15553, 15559), True, 'import torch.nn as nn\n'), ((15561, 15573), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (15571, 15573), True, 'import torch.nn as nn\n'), ((15627, 15654), 'torch.nn.Linear', 'nn.Linear', (['dom_fc1', 'dom_fc2'], {}), '(dom_fc1, dom_fc2)\n', (15636, 15654), True, 'import torch.nn as nn\n'), ((15656, 15669), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (15663, 15669), True, 'import torch.nn as nn\n'), ((15723, 15735), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (15733, 15735), True, 'import torch.nn as nn\n'), ((15737, 15758), 'torch.nn.Linear', 'nn.Linear', (['dom_fc2', '(1)'], {}), '(dom_fc2, 1)\n', (15746, 15758), True, 'import torch.nn as nn\n'), ((16524, 16562), 'torch.nn.Linear', 'nn.Linear', (['input_channels_dom', 'dom_fc1'], {}), '(input_channels_dom, dom_fc1)\n', (16533, 16562), True, 'import torch.nn as nn\n'), ((16612, 16625), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (16619, 16625), True, 'import torch.nn as nn\n'), ((16627, 16639), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (16637, 16639), True, 'import torch.nn as nn\n'), ((16689, 16716), 'torch.nn.Linear', 'nn.Linear', (['dom_fc1', 'dom_fc2'], {}), '(dom_fc1, dom_fc2)\n', (16698, 16716), True, 'import torch.nn as nn\n'), ((16718, 16731), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (16725, 16731), True, 'import torch.nn as nn\n'), ((16781, 16793), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (16791, 16793), True, 'import torch.nn as nn\n'), ((16795, 16816), 'torch.nn.Linear', 'nn.Linear', (['dom_fc2', '(1)'], {}), '(dom_fc2, 1)\n', (16804, 16816), True, 'import torch.nn as nn\n'), ((19352, 19392), 'torch.arange', 'torch.arange', (['(-half_range)', 'half_range', '(1)'], {}), '(-half_range, half_range, 1)\n', (19364, 19392), False, 'import torch\n'), ((19581, 19621), 'torch.arange', 'torch.arange', (['(-half_range)', 'half_range', '(1)'], {}), '(-half_range, half_range, 1)\n', (19593, 19621), False, 'import torch\n'), ((21478, 21518), 'torch.arange', 'torch.arange', (['(-half_range)', 'half_range', '(1)'], {}), '(-half_range, half_range, 1)\n', (21490, 21518), False, 'import torch\n'), ((21671, 21711), 'torch.arange', 'torch.arange', (['(-half_range)', 'half_range', '(1)'], {}), '(-half_range, half_range, 1)\n', (21683, 21711), False, 'import torch\n')]
|
"""
Script for extracting the ground plane from the KITTI dataset.
We need to determine the ground plane position and orientation in order to be able to reconstruct
points on it, which we are trying to detect.
We will collect all the points on the ground plane from the dataset and then fit a plane to them
with RANSAC.
----------------------------------------------------------------------------------------------------
python kitti_extract_ground_plane.py path_labels
----------------------------------------------------------------------------------------------------
"""
__date__ = '04/13/2017'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
import argparse
import os
import numpy as np
import random
# import matplotlib
# matplotlib.use('Agg') # Prevents from using X interface for plotting
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from shared.geometry import R3x3_y, t3x1, Rt4x4
####################################################################################################
# DEFINITIONS #
####################################################################################################
# Parameter for RANSAC
# Distance from the plane (in meters), which is considered as an inlier region
INLIER_TRHESHOLD = 1.0
# Number of estimation iterations carried out by RANSAC
RANSAC_ITERS = 10000
####################################################################################################
# FUNCTIONS #
####################################################################################################
def plane_3p(p1, p2, p3):
"""
Computes the equation of a plane passing through the 3 given points.
Input:
p1, p2, p3: 3x1 np.matrix coordinates of points in the plane
Returns:
[a, b, c, d] coefficients as a 1x4 np.matrix
"""
l1 = p2 - p1
l2 = p3 - p1
normal = np.cross(l1, l2, axis=0)
d = - (normal[0,0]*p1[0,0] + normal[1,0]*p1[1,0] + normal[2,0]*p1[2,0])
return np.asmatrix([normal[0,0], normal[1,0], normal[2,0], d])
def show_X_and_gp(gp_X_4xn, gp_1x4):
"""
Show a 3D plot of the estimated ground plane.
"""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal')
ax.scatter(np.array(gp_X_4xn[2,0:1000]), np.array(gp_X_4xn[0,0:1000]), np.array(-gp_X_4xn[1,0:1000]), color='red')
X = np.arange(-20, 20, 1)
Y = np.arange(-1, 10, 1)
X, Y = np.meshgrid(X, Y)
Z = - (gp_1x4[0,0]*X + gp_1x4[0,1]*Y + gp_1x4[0,3]) / gp_1x4[0,2]
ax.plot_surface(Z, X, -Y, linewidth=0, alpha=0.5, antialiased=True)
# Bounding box of the car
ax.plot([3,3,3,3,3], [1.5, 1.5, -1.5, -1.5, 1.5], [0,-1.9,-1.9,0,0], color='green')
ax.plot([-3,-3,-3,-3,-3], [1.5, 1.5, -1.5, -1.5, 1.5], [0,-1.9,-1.9,0,0], color='red')
ax.plot([3, -3], [1.5, 1.5], [0,0], color='blue')
ax.plot([3, -3], [1.5, 1.5], [-1.9,-1.9], color='blue')
ax.plot([3, -3], [-1.5, -1.5], [0,0], color='blue')
ax.plot([3, -3], [-1.5, -1.5], [-1.9,-1.9], color='blue')
ax.set_xlim(-100, 100)
ax.set_ylim(-100, 100)
ax.set_zlim(-100, 100)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
plt.show()
####################################################################################################
# CLASSES #
####################################################################################################
class GroundPlaneEstimator(object):
"""
Takes care of the estimation of the ground plane position in the KITTI dataset.
"""
def __init__(self, path_labels):
"""
Input:
path_labels: Path to the "label_2" folder of the KITTI dataset
"""
super(GroundPlaneEstimator, self).__init__()
self.path_labels = path_labels
self.gp_points = []
def run_estimation(self):
"""
Runs the whole process of estimating the ground plane.
"""
print('-- ESTIMATING GROUND PLANE POSITION')
# Read label files and get all ground plane points
print('-- Reading label files')
self._read_label_files()
print('-- Label files contain ' + str(len(self.gp_points)) + ' points')
# Create a matrix from all the points for easier computation
self.gp_X_4xn = np.asmatrix(np.ones((4, len(self.gp_points))))
for i in xrange(len(self.gp_points)):
self.gp_X_4xn[0:3,i] = self.gp_points[i]
# plt.scatter(self.gp_X_4xn[2,:], self.gp_X_4xn[1,:])
# plt.show()
# Run RANSAC on those points
print('-- Running RANSAC plane estimation')
self._ransac_plane()
def _read_label_files(self):
"""
Reads all label files and extract the points on the ground plane.
"""
filenames = [f for f in os.listdir(self.path_labels)
if os.path.isfile(os.path.join(self.path_labels, f))]
if len(filenames) != 7481:
print('Wrong number (%d) of files in the KITTI dataset! Should be 7481.'%(len(filenames)))
exit(1)
# Read each label file
# i = 0
for f in filenames:
path_label_file = os.path.join(self.path_labels, f)
self._process_label_file(path_label_file)
# i += 1
# if i == 1000: break
def _process_label_file(self, path_label_file):
"""
Processes one label file.
Input:
path_label_file: Path to the TXT label file in KITTI format to be processed.
"""
with open(path_label_file, 'r') as infile_label:
# Read the objects
for line in infile_label:
line = line.rstrip('\n')
data = line.split(' ')
# First element of the data is the label. We don't want to process 'Misc' and
# 'DontCare' labels
if data[0] == 'Misc' or data[0] == 'DontCare': continue
# Extract the points of this object on the ground plane
self._extract_ground_plane_pts(data)
def _extract_ground_plane_pts(self, data):
"""
Extract 3D points from the object bounding box, which lie on the ground plane.
Input:
data: One split line of the label file (line.split(' '))
"""
# Object dimensions
h = float(data[8])
w = float(data[9])
l = float(data[10])
# Position of the center point on the ground plane (xz plane)
cx = float(data[11])
cy = float(data[12])
cz = float(data[13])
# Rotation of the object around y
ry = float(data[14])
# 3D box corners on the ground plane. Careful, the coordinate system of the car is that
# x points forward, not z! (It is rotated by 90deg with respect to the camera one)
# fbr, rbr, fbl, rbl
X = np.asmatrix([[l/2, -l/2, l/2, -l/2],
[0, 0, 0, 0 ],
[-w/2, -w/2, w/2, w/2 ],
[1, 1, 1, 1 ]])
# Rotate the 3D box around y axis and translate it to the correct position in the cam. frame
X = Rt4x4(R3x3_y(ry), t3x1(cx, cy, cz)) * X
self.gp_points.append(X[0:3,0])
self.gp_points.append(X[0:3,1])
self.gp_points.append(X[0:3,2])
self.gp_points.append(X[0:3,3])
def _ransac_plane(self):
"""
Finds "optimal" ground plane position given the points.
Returns:
[a, b, c, d] plane equation ax+by+cz+d=0 coefficients as a 1x4 np.matrix
"""
num_points = len(self.gp_points)
# Variables for storing minimum distance sum from the estimated plane
dist2_sum_min = 99999999999999999
gp_1x4_max = np.asmatrix(np.zeros((1,4)))
for i in range(RANSAC_ITERS):
rp = random.sample(range(0, num_points), 3)
# Compute the equation of the ground plane
gp_1x4 = plane_3p(self.gp_points[rp[0]], self.gp_points[rp[1]], self.gp_points[rp[2]])
# Check that the plane gives small errors on the original points - when we have some
# close to singular situation we have to be careful
if gp_1x4 * self.gp_X_4xn[:,rp[0]] > 0.000000001 or \
gp_1x4 * self.gp_X_4xn[:,rp[1]] > 0.000000001 or \
gp_1x4 * self.gp_X_4xn[:,rp[2]] > 0.000000001:
print('WARNING: Solution not precise, skipping...')
continue
# Compute the sum of distances from this plane
distances2 = np.power(gp_1x4 * self.gp_X_4xn, 2)
dist2_sum = np.sum(distances2, axis=1)
if dist2_sum[0,0] < dist2_sum_min:
print('New min distance sum: ' + str(dist2_sum[0,0]))
dist2_sum_min = dist2_sum[0,0]
gp_1x4_max = gp_1x4
print('-- RANSAC FINISHED')
print('Estimated ground plane: ' + str(gp_1x4_max))
print('Sum of distances: ' + str(dist2_sum_min) + ', ' + str(dist2_sum_min/num_points) + ' per point')
# Show a plot of the plane
show_X_and_gp(self.gp_X_4xn, gp_1x4_max)
return gp_1x4_max
####################################################################################################
# MAIN #
####################################################################################################
def parse_arguments():
"""
Parse input options of the script.
"""
parser = argparse.ArgumentParser(description='Convert KITTI label files into BBTXT.')
parser.add_argument('path_labels', metavar='path_labels', type=str,
help='Path to the "label_2" folder of the KITTI dataset')
args = parser.parse_args()
if not os.path.exists(args.path_labels):
print('Input path "%s" does not exist!'%(args.path_labels))
parser.print_help()
exit(1)
return args
def main():
args = parse_arguments()
gpe = GroundPlaneEstimator(args.path_labels)
gpe.run_estimation()
if __name__ == '__main__':
main()
|
[
"numpy.meshgrid",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.sum",
"shared.geometry.t3x1",
"numpy.power",
"numpy.cross",
"os.path.exists",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.asmatrix",
"numpy.arange",
"numpy.array",
"shared.geometry.R3x3_y",
"os.path.join",
"os.listdir"
] |
[((2022, 2046), 'numpy.cross', 'np.cross', (['l1', 'l2'], {'axis': '(0)'}), '(l1, l2, axis=0)\n', (2030, 2046), True, 'import numpy as np\n'), ((2129, 2187), 'numpy.asmatrix', 'np.asmatrix', (['[normal[0, 0], normal[1, 0], normal[2, 0], d]'], {}), '([normal[0, 0], normal[1, 0], normal[2, 0], d])\n', (2140, 2187), True, 'import numpy as np\n'), ((2288, 2300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2298, 2300), True, 'from matplotlib import pyplot as plt\n'), ((2492, 2513), 'numpy.arange', 'np.arange', (['(-20)', '(20)', '(1)'], {}), '(-20, 20, 1)\n', (2501, 2513), True, 'import numpy as np\n'), ((2519, 2539), 'numpy.arange', 'np.arange', (['(-1)', '(10)', '(1)'], {}), '(-1, 10, 1)\n', (2528, 2539), True, 'import numpy as np\n'), ((2548, 2565), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (2559, 2565), True, 'import numpy as np\n'), ((3260, 3270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3268, 3270), True, 'from matplotlib import pyplot as plt\n'), ((8929, 9005), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert KITTI label files into BBTXT."""'}), "(description='Convert KITTI label files into BBTXT.')\n", (8952, 9005), False, 'import argparse\n'), ((2382, 2411), 'numpy.array', 'np.array', (['gp_X_4xn[2, 0:1000]'], {}), '(gp_X_4xn[2, 0:1000])\n', (2390, 2411), True, 'import numpy as np\n'), ((2412, 2441), 'numpy.array', 'np.array', (['gp_X_4xn[0, 0:1000]'], {}), '(gp_X_4xn[0, 0:1000])\n', (2420, 2441), True, 'import numpy as np\n'), ((2442, 2472), 'numpy.array', 'np.array', (['(-gp_X_4xn[1, 0:1000])'], {}), '(-gp_X_4xn[1, 0:1000])\n', (2450, 2472), True, 'import numpy as np\n'), ((6539, 6648), 'numpy.asmatrix', 'np.asmatrix', (['[[l / 2, -l / 2, l / 2, -l / 2], [0, 0, 0, 0], [-w / 2, -w / 2, w / 2, w / \n 2], [1, 1, 1, 1]]'], {}), '([[l / 2, -l / 2, l / 2, -l / 2], [0, 0, 0, 0], [-w / 2, -w / 2,\n w / 2, w / 2], [1, 1, 1, 1]])\n', (6550, 6648), True, 'import numpy as np\n'), ((9178, 9210), 'os.path.exists', 'os.path.exists', (['args.path_labels'], {}), '(args.path_labels)\n', (9192, 9210), False, 'import os\n'), ((5096, 5129), 'os.path.join', 'os.path.join', (['self.path_labels', 'f'], {}), '(self.path_labels, f)\n', (5108, 5129), False, 'import os\n'), ((7341, 7357), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (7349, 7357), True, 'import numpy as np\n'), ((8020, 8055), 'numpy.power', 'np.power', (['(gp_1x4 * self.gp_X_4xn)', '(2)'], {}), '(gp_1x4 * self.gp_X_4xn, 2)\n', (8028, 8055), True, 'import numpy as np\n'), ((8071, 8097), 'numpy.sum', 'np.sum', (['distances2'], {'axis': '(1)'}), '(distances2, axis=1)\n', (8077, 8097), True, 'import numpy as np\n'), ((4792, 4820), 'os.listdir', 'os.listdir', (['self.path_labels'], {}), '(self.path_labels)\n', (4802, 4820), False, 'import os\n'), ((6808, 6818), 'shared.geometry.R3x3_y', 'R3x3_y', (['ry'], {}), '(ry)\n', (6814, 6818), False, 'from shared.geometry import R3x3_y, t3x1, Rt4x4\n'), ((6820, 6836), 'shared.geometry.t3x1', 't3x1', (['cx', 'cy', 'cz'], {}), '(cx, cy, cz)\n', (6824, 6836), False, 'from shared.geometry import R3x3_y, t3x1, Rt4x4\n'), ((4846, 4879), 'os.path.join', 'os.path.join', (['self.path_labels', 'f'], {}), '(self.path_labels, f)\n', (4858, 4879), False, 'import os\n')]
|
import pytest
import numpy as np
from FastDSP.structures import GPUArray
class TestGPUArray:
@classmethod
def setup_class(cls):
cls.rows = 4
cls.cols = 6
cls.array_uint8 = np.ones((cls.rows, cls.cols), dtype=np.uint8)
cls.array_int = np.ones((cls.rows, cls.cols), dtype=np.int32)
cls.array_float = np.ones((cls.rows, cls.cols), dtype=np.float32)
cls.array_double = np.ones((cls.rows, cls.cols), dtype=np.float64)
cls.array_complex_float = np.ones((cls.rows, cls.cols), dtype=np.complex64)
cls.array_complex_double = np.ones((cls.rows, cls.cols), dtype=np.complex128)
cls.array_uint8_gpu = GPUArray(cls.array_uint8)
cls.array_int_gpu = GPUArray(cls.array_int)
cls.array_float_gpu = GPUArray(cls.array_float)
cls.array_double_gpu = GPUArray(cls.array_double)
cls.array_float_complex_gpu = GPUArray(cls.array_complex_float)
cls.array_complex_double_gpu = GPUArray(cls.array_complex_double)
def test_uint8_array_transfer_to_device_and_back(self):
assert(np.all(self.array_uint8 == self.array_uint8_gpu.get()))
def test_int_array_transfer_to_device_and_back(self):
assert(np.all(self.array_int == self.array_int_gpu.get()))
def test_float_array_transfer_to_device_and_back(self):
assert(np.all(self.array_float == self.array_float_gpu.get()))
def test_double_array_transfer_to_device_and_back(self):
assert(np.all(self.array_double == self.array_double_gpu.get()))
def test_complex_float_array_transfer_to_device(self):
assert(np.all(self.array_complex_float == self.array_float_complex_gpu.get()))
def test_complex_double_array_transfer_to_device(self):
assert(np.all(self.array_complex_double == self.array_complex_double_gpu.get()))
def test_right_item_is_returned(self):
for i in range(self.rows):
for j in range(self.cols):
assert(self.array_uint8[i, j] == self.array_uint8_gpu[i*self.cols + self.rows])
assert(self.array_int[i, j] == self.array_int_gpu[i*self.cols + self.rows])
assert(self.array_float[i, j] == self.array_float_gpu[i*self.cols + self.rows])
assert(self.array_double[i, j] == self.array_double_gpu[i*self.cols + self.rows])
assert(self.array_complex_float[i, j] == self.array_float_complex_gpu[i*self.cols + self.rows])
assert(self.array_complex_double[i, j] == self.array_float_complex_gpu[i*self.cols + self.rows])
def test_uint8_addition_returns_right_value(self):
array3_uint8 = self.array_uint8_gpu + self.array_uint8_gpu
array3 = self.array_uint8 + self.array_uint8
assert(np.all(array3_uint8.get() == array3))
def test_int_addition_returns_right_value(self):
array3_int = self.array_int_gpu + self.array_int_gpu
array3 = self.array_int + self.array_int
assert(np.all(array3_int.get() == array3))
def test_float_addition_returns_right_value(self):
array3_float = self.array_float_gpu + self.array_float_gpu
array3 = self.array_float + self.array_float
assert(np.all(array3_float.get() == array3))
def test_double_addition_returns_right_value(self):
array3_double = self.array_double_gpu + self.array_double_gpu
array3 = self.array_double + self.array_double
assert(np.all(array3_double.get() == array3))
def test_complex_float_addition_returns_the_right_value(self):
array3_complex = self.array_float_complex_gpu + self.array_float_complex_gpu
array3 = self.array_complex_float + self.array_complex_float
assert(np.all(array3_complex.get() == array3))
def test_complex_double_addition_return_the_right_value(self):
array3_complex_double = self.array_complex_double_gpu + self.array_complex_double_gpu
array3 = self.array_complex_double + self.array_complex_double
assert(np.all(array3_complex_double.get() == array3))
def test_uint8_subtraction_returns_right_value(self):
array3_uint8 = self.array_uint8_gpu - self.array_uint8_gpu
array3 = self.array_uint8 - self.array_uint8
assert(np.all(array3_uint8.get() == array3))
def test_int_subtraction_returns_right_value(self):
array3_int = self.array_int_gpu - self.array_int_gpu
array3 = self.array_int - self.array_int
assert(np.all(array3_int.get() == array3))
def test_float_subtraction_returns_right_value(self):
array3_float = self.array_float_gpu - self.array_float_gpu
array3 = self.array_float - self.array_float
assert(np.all(array3_float.get() == array3))
def test_double_subtraction_returns_right_value(self):
array3_double = self.array_double_gpu - self.array_double_gpu
array3 = self.array_double - self.array_double
assert(np.all(array3_double.get() == array3))
def test_complex_float_subtraction_returns_the_right_value(self):
array3_complex = self.array_float_complex_gpu - self.array_float_complex_gpu
array3 = self.array_complex_float - self.array_complex_float
assert(np.all(array3_complex.get() == array3))
def test_complex_double_subtraction_return_the_right_value(self):
array3_complex_double = self.array_complex_double_gpu - self.array_complex_double_gpu
array3 = self.array_complex_double - self.array_complex_double
assert(np.all(array3_complex_double.get() == array3))
def test_uint8_multiplication_returns_right_value(self):
array3_uint8 = self.array_uint8_gpu * self.array_uint8_gpu
array3 = self.array_uint8 * self.array_uint8
assert(np.all(array3_uint8.get() == array3))
def test_int_multiplication_returns_right_value(self):
array3_int = self.array_int_gpu * self.array_int_gpu
array3 = self.array_int * self.array_int
assert(np.all(array3_int.get() == array3))
def test_float_multiplication_returns_right_value(self):
array3_float = self.array_float_gpu * self.array_float_gpu
array3 = self.array_float * self.array_float
assert(np.all(array3_float.get() == array3))
def test_double_multiplication_returns_right_value(self):
array3_double = self.array_double_gpu * self.array_double_gpu
array3 = self.array_double * self.array_double
assert(np.all(array3_double.get() == array3))
def test_complex_float_multiplication_returns_the_right_value(self):
array3_complex = self.array_float_complex_gpu * self.array_float_complex_gpu
array3 = self.array_complex_float * self.array_complex_float
assert(np.all(array3_complex.get() == array3))
def test_complex_double_multiplication_return_the_right_value(self):
array3_complex_double = self.array_complex_double_gpu * self.array_complex_double_gpu
array3 = self.array_complex_double * self.array_complex_double
assert(np.all(array3_complex_double.get() == array3))
def test_uint8_division_returns_right_value(self):
array3_uint8 = self.array_uint8_gpu / self.array_uint8_gpu
array3 = self.array_uint8 / self.array_uint8
assert(np.all(array3_uint8.get() == array3))
def test_int_division_returns_right_value(self):
array3_int = self.array_int_gpu / self.array_int_gpu
array3 = self.array_int / self.array_int
assert(np.all(array3_int.get() == array3))
def test_float_division_returns_right_value(self):
array3_float = self.array_float_gpu / self.array_float_gpu
array3 = self.array_float / self.array_float
assert(np.all(array3_float.get() == array3))
def test_double_division_returns_right_value(self):
array3_double = self.array_double_gpu / self.array_double_gpu
array3 = self.array_double / self.array_double
assert(np.all(array3_double.get() == array3))
def test_complex_float_division_returns_the_right_value(self):
array3_complex = self.array_float_complex_gpu / self.array_float_complex_gpu
array3 = self.array_complex_float / self.array_complex_float
assert(np.all(array3_complex.get() == array3))
def test_complex_double_division_return_the_right_value(self):
array3_complex_double = self.array_complex_double_gpu / self.array_complex_double_gpu
array3 = self.array_complex_double / self.array_complex_double
assert(np.all(array3_complex_double.get() == array3))
|
[
"FastDSP.structures.GPUArray",
"numpy.ones"
] |
[((209, 254), 'numpy.ones', 'np.ones', (['(cls.rows, cls.cols)'], {'dtype': 'np.uint8'}), '((cls.rows, cls.cols), dtype=np.uint8)\n', (216, 254), True, 'import numpy as np\n'), ((279, 324), 'numpy.ones', 'np.ones', (['(cls.rows, cls.cols)'], {'dtype': 'np.int32'}), '((cls.rows, cls.cols), dtype=np.int32)\n', (286, 324), True, 'import numpy as np\n'), ((351, 398), 'numpy.ones', 'np.ones', (['(cls.rows, cls.cols)'], {'dtype': 'np.float32'}), '((cls.rows, cls.cols), dtype=np.float32)\n', (358, 398), True, 'import numpy as np\n'), ((426, 473), 'numpy.ones', 'np.ones', (['(cls.rows, cls.cols)'], {'dtype': 'np.float64'}), '((cls.rows, cls.cols), dtype=np.float64)\n', (433, 473), True, 'import numpy as np\n'), ((508, 557), 'numpy.ones', 'np.ones', (['(cls.rows, cls.cols)'], {'dtype': 'np.complex64'}), '((cls.rows, cls.cols), dtype=np.complex64)\n', (515, 557), True, 'import numpy as np\n'), ((593, 643), 'numpy.ones', 'np.ones', (['(cls.rows, cls.cols)'], {'dtype': 'np.complex128'}), '((cls.rows, cls.cols), dtype=np.complex128)\n', (600, 643), True, 'import numpy as np\n'), ((675, 700), 'FastDSP.structures.GPUArray', 'GPUArray', (['cls.array_uint8'], {}), '(cls.array_uint8)\n', (683, 700), False, 'from FastDSP.structures import GPUArray\n'), ((729, 752), 'FastDSP.structures.GPUArray', 'GPUArray', (['cls.array_int'], {}), '(cls.array_int)\n', (737, 752), False, 'from FastDSP.structures import GPUArray\n'), ((783, 808), 'FastDSP.structures.GPUArray', 'GPUArray', (['cls.array_float'], {}), '(cls.array_float)\n', (791, 808), False, 'from FastDSP.structures import GPUArray\n'), ((840, 866), 'FastDSP.structures.GPUArray', 'GPUArray', (['cls.array_double'], {}), '(cls.array_double)\n', (848, 866), False, 'from FastDSP.structures import GPUArray\n'), ((905, 938), 'FastDSP.structures.GPUArray', 'GPUArray', (['cls.array_complex_float'], {}), '(cls.array_complex_float)\n', (913, 938), False, 'from FastDSP.structures import GPUArray\n'), ((978, 1012), 'FastDSP.structures.GPUArray', 'GPUArray', (['cls.array_complex_double'], {}), '(cls.array_complex_double)\n', (986, 1012), False, 'from FastDSP.structures import GPUArray\n')]
|
import numpy as np
class MotionExplorer:
"""
Aim at exploring motions, represented as sampled observations of a n-dimensional input vector.
This stream of vectors describe a vector space in which the Mahalanobis distance is used to
assess the distance of new samples to previously seen samples. Everytime a new sample is
observed that is when that K nearest neighbour are in average further away than N standard deviation, the new sample is deamed original and saved to the attribute observations.
"""
def __init__(self, inputdim = 2, stepsize = 10, order = 4, window = 30,
start_buffer = 10, periodic_recompute = 5, number_of_neighbour = 5,
number_of_stdev = 4.5
):
"""
Parameters
----------
inputdim : int
the number of dimension of the input vector.
stepsize : int
The size of the interpolation step in milliseconds.
order : int
The dimension of the output vector, 1 is position only, 2 includes velocity, 3 provides acceleration, and so on.
window : int
The size of the averaging window in samples.
start_buffer : int
The number of sample is takes before any observation can be saved, this leaves time
for the Savitsky Golay interpolation to start ouputing some data.
periodic_recompute : int
The number of samples after which mean and covarianve of saved observations will be recomputed.
number_of_neighbour : int
The number of closest neighnbours that are considered when assessing if a new sample is original or not.
number_of_stdev : float
The number of standard deviation a new vectors has to be from the mean of K nearest neighbour as measured by Mahalanobis distance. When the mean of K is greater than this value, the new sample is considered original and saved to observations.
"""
self.inputdim = inputdim
self.order = order
## filtering
self.axis = [AxisFilter(stepsize, order, window) for _ in range(inputdim)]
## observations space
self.observations = np.zeros((1,self.inputdim*self.order))
self.mean = np.zeros(self.inputdim*self.order)
self.icov = np.eye(self.inputdim*self.order)
## variable logic
self.counter = 0
self.start_buffer = start_buffer
self.periodic_recompute = periodic_recompute
self.number_of_neighbour = 5
self.number_of_stdev = 4.5
self.last_sample = np.zeros(self.inputdim*self.order)
def new_sample(self, ms, ndata):
"""Passes a new observed sample to the motionexplorer. It will filter it based on the last
observed sample and compute the distance of this current sample to all previously saved
original samples. If the average distance of the N nearest neightbour is greater than X
stdev, then the current sample is saved to the class attribute observations.
Parameters
----------
ms : int
Timestamp in milliseconds. This can be easily produced with the time module and the
call to: int(round(time.time() * 1000)).
ndata : iterable
An iterable object (tuple, ndarray, ..) representing the N dimensional vector of the
current sample.
Returns
-------
int, bool
average Mahalanobis distance to the K nearest neighboour and flag saying if the
current sample is added to the set of original observations.
"""
## ndata.shape == inputdim
self.counter += 1
for i, data in enumerate(ndata):
self.axis[i].new_sample(ms, data)
## recompute mean and icov every periodic_recompute
if self.counter % self.periodic_recompute == 0:
self.compute_observations_mean_icov()
## get last sample from each axis and squash to 1D
sample = np.array([self.axis[i].samples[-1] for i in range(self.inputdim)]).reshape(-1)
## compute the distance of sample to all stored observations
distances = self.distance_to_observations(sample)
distance_meank = np.mean(distances[:self.number_of_neighbour])
if (self.counter > self.start_buffer) and self.axis[0].full:
## keep the sample if further than number of stdev to previous observations
if distance_meank > self.number_of_stdev:
self.observations = np.vstack((self.observations, sample))
added = True
else: added = False
else:
added = False
self.last_sample = sample
return distance_meank, added
def distance_to_observations(self, vector):
"""Return the Mahalanobis distance of vector to the space of all observations.
The ouput distances are sorted.
https://en.wikipedia.org/wiki/Mahalanobis_distance
"""
diff = self.observations - vector
distances = np.sqrt(np.diag(np.dot(np.dot(diff, self.icov), diff.T)))
return np.sort(distances)
def compute_observations_mean_icov(self):
self.mean = np.mean(self.observations, axis=0)
# print self.observations.shape[0]
if self.observations.shape[0] > 1:
self.icov = np.linalg.pinv(np.cov((self.observations-self.mean).transpose()))
class AxisFilter:
"""Filters an unevenly sampled measurement dimension. It interpolates at constant time steps `stepsize` in ms, performs Butter worth filetering and Savitsky Golay interpolation of order `order` over a moving window `window`.
"""
def __init__(self, stepsize, order, window):
"""
Parameters
----------
stepsize : int
The size of the interpolation step in milliseconds.
order : int
The dimension of the output vector, 1 is position only, 2 includes velocity, 3 provides acceleration, and so on.
window : int
The size of the averaging window in samples.
"""
self.stepsize = stepsize
self.order = order
self.interpolator = TimeInterpolator(stepsize)
self.sgfitter = SavitskyGolayFitter(order, window)
self.full = False
def new_sample(self, time, value):
self.samples = np.empty((0,self.order))
self.interpolator.new_sample(time, value)
for point in self.interpolator.value_steps:
point = self.sgfitter.new_sample(point)
self.samples = np.vstack((self.samples, point))
self.full = self.sgfitter.full
class TimeInterpolator:
"""Interpolate between 2 measurements at constant step size X in ms.
"""
def __init__(self, stepsize):
self.stepsize = stepsize
self.firstpoint = True
def new_sample(self, time, value):
if self.firstpoint == True:
self.firstpoint = False
self.time_steps = np.array([time])
self.value_steps = np.array([value])
else:
self.time_steps = np.arange(self.last_time, time, self.stepsize)
self.value_steps = np.interp(self.time_steps, [self.last_time, time], [self.last_value, value])
self.last_time = time
self.last_value = value
class SavitskyGolayFitter:
def __init__(self, order = 4, window = 30):
self.order = order
if window%2==0:
window = window + 1
self.window = window
#compute the savitzky-golay differentiators
sgolay = self.savitzky_golay(order, window)
self.sgolay_diff = []
self.buffers = []
self.samples = 0
self.full = False
#create the filters
for i in range(order):
self.sgolay_diff.append(np.ravel(sgolay[i, :]))
self.buffers.append(IIRFilter(self.sgolay_diff[i], [1]))
def new_sample(self, x):
self.samples = self.samples + 1
if self.samples>self.window:
self.full = True
fits = np.zeros((self.order,))
# use enumerate or map
c = 0
for buffer in self.buffers:
fits[c] = buffer.filter(x)
c = c + 1
return fits
#sg coefficient computation
def savitzky_golay(self, order = 2, window = 30):
if window is None:
window = order + 2
if window % 2 != 1 or window < 1:
raise TypeError("window size must be a positive odd number")
if window < order + 2:
raise TypeError("window size is too small for the polynomial")
# A second order polynomial has 3 coefficients
order_range = range(order+1)
half_window = (window-1)//2
B = np.mat(
[ [k**i for i in order_range] for k in range(-half_window, half_window+1)] )
M = np.linalg.pinv(B)
return M
class IIRFilter:
def __init__(self, B, A):
"""Create an IIR filter, given the B and A coefficient vectors.
"""
self.B = B
self.A = A
if len(A)>2:
self.prev_outputs = Ringbuffer(len(A)-1)
else:
self.prev_outputs = Ringbuffer(3)
self.prev_inputs = Ringbuffer(len(B))
def filter(self, x):
"""Take one sample and filter it. Return the output.
"""
y = 0
self.prev_inputs.new_sample(x)
k =0
for b in self.B:
y = y + b * self.prev_inputs.reverse_index(k)
k = k + 1
k = 0
for a in self.A[1:]:
y = y - a * self.prev_outputs.reverse_index(k)
k = k + 1
y = y / self.A[0]
self.prev_outputs.new_sample(y)
return y
def new_sample(self, x):
return self.filter(x)
class Ringbuffer:
def __init__(self, size, init=0):
if size<1:
throw(Exception("Invalid size for a ringbuffer: must be >=1"))
self.n_samples = size
self.samples = np.ones((size,))*init
self.read_head = 1
self.write_head = 0
self.sum = 0
def get_length(self):
return self.n_samples
def get_samples(self):
return np.hstack((self.samples[self.read_head-1:],self.samples[0:self.read_head-1]))
def get_sum(self):
return self.sum
def get_output(self):
#self.read_head %= self.n_samples
return self.samples[self.read_head-1]
def get_mean(self):
return self.sum / float(self.n_samples)
def forward_index(self, i):
new_index = self.read_head+i-1
new_index = new_index % self.n_samples
return self.samples[new_index]
def reverse_index(self, i):
new_index = self.write_head-i-1
while new_index<0:
new_index+=self.n_samples
return self.samples[new_index]
def new_sample(self, x):
s = self.samples[self.write_head]
self.samples[self.write_head] = x
self.sum += x
self.sum -= self.samples[self.read_head]
self.read_head += 1
self.write_head += 1
self.read_head %= self.n_samples
self.write_head %= self.n_samples
return s
|
[
"numpy.dot",
"numpy.ravel",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.sort",
"numpy.mean",
"numpy.array",
"numpy.arange",
"numpy.interp",
"numpy.eye",
"numpy.linalg.pinv",
"numpy.vstack"
] |
[((2181, 2222), 'numpy.zeros', 'np.zeros', (['(1, self.inputdim * self.order)'], {}), '((1, self.inputdim * self.order))\n', (2189, 2222), True, 'import numpy as np\n'), ((2240, 2276), 'numpy.zeros', 'np.zeros', (['(self.inputdim * self.order)'], {}), '(self.inputdim * self.order)\n', (2248, 2276), True, 'import numpy as np\n'), ((2295, 2329), 'numpy.eye', 'np.eye', (['(self.inputdim * self.order)'], {}), '(self.inputdim * self.order)\n', (2301, 2329), True, 'import numpy as np\n'), ((2575, 2611), 'numpy.zeros', 'np.zeros', (['(self.inputdim * self.order)'], {}), '(self.inputdim * self.order)\n', (2583, 2611), True, 'import numpy as np\n'), ((4240, 4285), 'numpy.mean', 'np.mean', (['distances[:self.number_of_neighbour]'], {}), '(distances[:self.number_of_neighbour])\n', (4247, 4285), True, 'import numpy as np\n'), ((5134, 5152), 'numpy.sort', 'np.sort', (['distances'], {}), '(distances)\n', (5141, 5152), True, 'import numpy as np\n'), ((5220, 5254), 'numpy.mean', 'np.mean', (['self.observations'], {'axis': '(0)'}), '(self.observations, axis=0)\n', (5227, 5254), True, 'import numpy as np\n'), ((6373, 6398), 'numpy.empty', 'np.empty', (['(0, self.order)'], {}), '((0, self.order))\n', (6381, 6398), True, 'import numpy as np\n'), ((8073, 8096), 'numpy.zeros', 'np.zeros', (['(self.order,)'], {}), '((self.order,))\n', (8081, 8096), True, 'import numpy as np\n'), ((8879, 8896), 'numpy.linalg.pinv', 'np.linalg.pinv', (['B'], {}), '(B)\n', (8893, 8896), True, 'import numpy as np\n'), ((10209, 10295), 'numpy.hstack', 'np.hstack', (['(self.samples[self.read_head - 1:], self.samples[0:self.read_head - 1])'], {}), '((self.samples[self.read_head - 1:], self.samples[0:self.read_head -\n 1]))\n', (10218, 10295), True, 'import numpy as np\n'), ((6581, 6613), 'numpy.vstack', 'np.vstack', (['(self.samples, point)'], {}), '((self.samples, point))\n', (6590, 6613), True, 'import numpy as np\n'), ((7002, 7018), 'numpy.array', 'np.array', (['[time]'], {}), '([time])\n', (7010, 7018), True, 'import numpy as np\n'), ((7050, 7067), 'numpy.array', 'np.array', (['[value]'], {}), '([value])\n', (7058, 7067), True, 'import numpy as np\n'), ((7113, 7159), 'numpy.arange', 'np.arange', (['self.last_time', 'time', 'self.stepsize'], {}), '(self.last_time, time, self.stepsize)\n', (7122, 7159), True, 'import numpy as np\n'), ((7191, 7267), 'numpy.interp', 'np.interp', (['self.time_steps', '[self.last_time, time]', '[self.last_value, value]'], {}), '(self.time_steps, [self.last_time, time], [self.last_value, value])\n', (7200, 7267), True, 'import numpy as np\n'), ((10011, 10027), 'numpy.ones', 'np.ones', (['(size,)'], {}), '((size,))\n', (10018, 10027), True, 'import numpy as np\n'), ((4535, 4573), 'numpy.vstack', 'np.vstack', (['(self.observations, sample)'], {}), '((self.observations, sample))\n', (4544, 4573), True, 'import numpy as np\n'), ((7829, 7851), 'numpy.ravel', 'np.ravel', (['sgolay[i, :]'], {}), '(sgolay[i, :])\n', (7837, 7851), True, 'import numpy as np\n'), ((5084, 5107), 'numpy.dot', 'np.dot', (['diff', 'self.icov'], {}), '(diff, self.icov)\n', (5090, 5107), True, 'import numpy as np\n')]
|
import numpy as np
import bokeh.plotting as bp
bp.output_file("bokeh1.html")
x = np.linspace(0, 2 * np.pi, 1024)
y = np.cos(x)
fig = bp.figure( )
fig.line(x, y)
bp.show(fig)
|
[
"bokeh.plotting.figure",
"bokeh.plotting.output_file",
"bokeh.plotting.show",
"numpy.cos",
"numpy.linspace"
] |
[((47, 76), 'bokeh.plotting.output_file', 'bp.output_file', (['"""bokeh1.html"""'], {}), "('bokeh1.html')\n", (61, 76), True, 'import bokeh.plotting as bp\n'), ((81, 112), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1024)'], {}), '(0, 2 * np.pi, 1024)\n', (92, 112), True, 'import numpy as np\n'), ((117, 126), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (123, 126), True, 'import numpy as np\n'), ((133, 144), 'bokeh.plotting.figure', 'bp.figure', ([], {}), '()\n', (142, 144), True, 'import bokeh.plotting as bp\n'), ((161, 173), 'bokeh.plotting.show', 'bp.show', (['fig'], {}), '(fig)\n', (168, 173), True, 'import bokeh.plotting as bp\n')]
|
import pdb
import torch
import numpy as np
import time
from tools.utils import Progbar,AverageMeter
from matplotlib import pyplot as plt
from scipy.integrate import simps
def predict_set(nets, dataloader, runtime_params):
run_type = runtime_params['run_type']
#net = net.eval()
progbar = Progbar(len(dataloader.dataset), stateful_metrics=['run-type'])
batch_time = AverageMeter()
names = []
pred_landmarks = np.array([])
gt_landmarks = np.array([])
with torch.no_grad():
for i, (landmarks, imgs, img_paths) in enumerate(dataloader):
s_time = time.time()
imgs = imgs.cuda()
names.extend(img_paths)
net = nets[0]
if 'half' in runtime_params.values():
output = net(imgs.half())
else:
output = net(imgs)
output = output.cpu().numpy()
pred_landmarks = np.concatenate((pred_landmarks,output),axis=0)
gt_landmarks = np.concatenate((gt_landmarks,landmarks.data.numpy()),axis=0)
progbar.add(imgs.size(0), values=[('run-type', run_type)]) # ,('batch_time', batch_time.val)])
batch_time.update(time.time() - s_time)
if runtime_params['debug'] and i:
break
pred_landmarks = pred_landmarks.reshape((-1,28,2))
gt_landmarks = gt_landmarks.reshape((-1,28,2))
assert gt_landmarks.shape == pred_landmarks.shape
return gt_landmarks, gt_landmarks, names
def dist(gtLandmark, dist_type='centers', left_pt=0, right_pt=8, num_eye_pts=8):
if dist_type=='centers':
normDist = np.linalg.norm(np.mean(gtLandmark[left_pt:left_pt+num_eye_pts], axis=0) -
np.mean(gtLandmark[right_pt:right_pt+num_eye_pts], axis=0))
elif dist_type=='corners':
normDist = np.linalg.norm(gtLandmark[left_pt] - gtLandmark[right_pt+num_eye_pts/2])
elif dist_type=='diagonal':
height, width = np.max(gtLandmark, axis=0) - np.min(gtLandmark, axis=0)
normDist = np.sqrt(width**2 + height**2)
return normDist
def landmark_error(gtLandmarks, predict_Landmarks, dist_type='centers', show_results=False, verbose=False):
norm_errors = []
errors = []
for i in range(len(gtLandmarks)):
norm_dist = dist(gtLandmarks[i], dist_type=dist_type)
error = np.mean(np.sqrt(np.sum((gtLandmarks[i] - predict_Landmarks[i])**2, axis=1)))
norm_error = error/norm_dist
errors.append(error)
norm_errors.append(norm_error)
if verbose:
print('{0}: {1}'.format(i, error))
if verbose:
print("Image idxs sorted by error")
print(np.argsort(errors))
avg_error = np.mean(errors)
avg_norm_error = np.mean(norm_errors)
print("Average error: {0}".format(avg_error))
print("Average norm error: {0}".format(avg_norm_error))
return norm_errors, errors
def auc_error(errors, failure_threshold=0.03, step=0.0001, save_path='', showCurve=True):
nErrors = len(errors)
xAxis = list(np.arange(0., failure_threshold+step, step))
ced = [float(np.count_nonzero([errors <= x])) / nErrors for x in xAxis]
auc = simps(ced, x=xAxis) / failure_threshold
failure_rate = 1. - ced[-1]
print("AUC @ {0}: {1}".format(failure_threshold, auc))
print("Failure rate: {0}".format(failure_rate))
if showCurve:
plt.plot(xAxis, ced)
plt.savefig(save_path)
return auc, failure_rate
def evaluate(gt_landmarks, landmarks,th,save_path):
gt_landmarks = gt_landmarks.permute((1,0,2)).cpu().numpy()
landmarks = landmarks.permute((1, 0, 2)).cpu().numpy()
norm_errors, errors = landmark_error(gt_landmarks,landmarks)
auc, failure_rate = auc_error(errors,th,save_path=save_path)
return {'auc':auc,'failure_rate':failure_rate,"errors":errors}
|
[
"matplotlib.pyplot.savefig",
"numpy.sum",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.count_nonzero",
"time.time",
"numpy.argsort",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.arange",
"numpy.linalg.norm",
"numpy.min",
"torch.no_grad",
"scipy.integrate.simps",
"tools.utils.AverageMeter",
"numpy.sqrt"
] |
[((383, 397), 'tools.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (395, 397), False, 'from tools.utils import Progbar, AverageMeter\n'), ((434, 446), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (442, 446), True, 'import numpy as np\n'), ((466, 478), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (474, 478), True, 'import numpy as np\n'), ((2713, 2728), 'numpy.mean', 'np.mean', (['errors'], {}), '(errors)\n', (2720, 2728), True, 'import numpy as np\n'), ((2750, 2770), 'numpy.mean', 'np.mean', (['norm_errors'], {}), '(norm_errors)\n', (2757, 2770), True, 'import numpy as np\n'), ((488, 503), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (501, 503), False, 'import torch\n'), ((3046, 3092), 'numpy.arange', 'np.arange', (['(0.0)', '(failure_threshold + step)', 'step'], {}), '(0.0, failure_threshold + step, step)\n', (3055, 3092), True, 'import numpy as np\n'), ((3177, 3196), 'scipy.integrate.simps', 'simps', (['ced'], {'x': 'xAxis'}), '(ced, x=xAxis)\n', (3182, 3196), False, 'from scipy.integrate import simps\n'), ((3387, 3407), 'matplotlib.pyplot.plot', 'plt.plot', (['xAxis', 'ced'], {}), '(xAxis, ced)\n', (3395, 3407), True, 'from matplotlib import pyplot as plt\n'), ((3416, 3438), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (3427, 3438), True, 'from matplotlib import pyplot as plt\n'), ((596, 607), 'time.time', 'time.time', ([], {}), '()\n', (605, 607), False, 'import time\n'), ((919, 967), 'numpy.concatenate', 'np.concatenate', (['(pred_landmarks, output)'], {'axis': '(0)'}), '((pred_landmarks, output), axis=0)\n', (933, 967), True, 'import numpy as np\n'), ((1837, 1913), 'numpy.linalg.norm', 'np.linalg.norm', (['(gtLandmark[left_pt] - gtLandmark[right_pt + num_eye_pts / 2])'], {}), '(gtLandmark[left_pt] - gtLandmark[right_pt + num_eye_pts / 2])\n', (1851, 1913), True, 'import numpy as np\n'), ((2677, 2695), 'numpy.argsort', 'np.argsort', (['errors'], {}), '(errors)\n', (2687, 2695), True, 'import numpy as np\n'), ((1634, 1692), 'numpy.mean', 'np.mean', (['gtLandmark[left_pt:left_pt + num_eye_pts]'], {'axis': '(0)'}), '(gtLandmark[left_pt:left_pt + num_eye_pts], axis=0)\n', (1641, 1692), True, 'import numpy as np\n'), ((1727, 1787), 'numpy.mean', 'np.mean', (['gtLandmark[right_pt:right_pt + num_eye_pts]'], {'axis': '(0)'}), '(gtLandmark[right_pt:right_pt + num_eye_pts], axis=0)\n', (1734, 1787), True, 'import numpy as np\n'), ((2041, 2074), 'numpy.sqrt', 'np.sqrt', (['(width ** 2 + height ** 2)'], {}), '(width ** 2 + height ** 2)\n', (2048, 2074), True, 'import numpy as np\n'), ((2369, 2429), 'numpy.sum', 'np.sum', (['((gtLandmarks[i] - predict_Landmarks[i]) ** 2)'], {'axis': '(1)'}), '((gtLandmarks[i] - predict_Landmarks[i]) ** 2, axis=1)\n', (2375, 2429), True, 'import numpy as np\n'), ((3108, 3139), 'numpy.count_nonzero', 'np.count_nonzero', (['[errors <= x]'], {}), '([errors <= x])\n', (3124, 3139), True, 'import numpy as np\n'), ((1192, 1203), 'time.time', 'time.time', ([], {}), '()\n', (1201, 1203), False, 'import time\n'), ((1966, 1992), 'numpy.max', 'np.max', (['gtLandmark'], {'axis': '(0)'}), '(gtLandmark, axis=0)\n', (1972, 1992), True, 'import numpy as np\n'), ((1995, 2021), 'numpy.min', 'np.min', (['gtLandmark'], {'axis': '(0)'}), '(gtLandmark, axis=0)\n', (2001, 2021), True, 'import numpy as np\n')]
|
########################################
# MIT License
#
# Copyright (c) 2020 <NAME>
########################################
'''
Define the data_types.for the variables to use in the package.
'''
import ctypes
import numpy as np
__all__ = []
# Types for numpy.ndarray objects
cpu_float = np.float64 # double
cpu_complex = np.complex128 # complex double
cpu_int = np.int32 # int
# unsigned integer (char does not seem to be allowed in CUDA), and int16 is too small (must not be equal to cpu_int)
cpu_bool = np.uint32
cpu_real_bool = np.bool # bool (not allowed in PyOpenCL)
def array_float(*args, **kwargs):
return np.array(*args, dtype=cpu_float, **kwargs)
def array_int(*args, **kwargs):
return np.array(*args, dtype=cpu_int, **kwargs)
def empty_float(*args, **kwargs):
return np.empty(*args, dtype=cpu_float, **kwargs)
def empty_int(*args, **kwargs):
return np.empty(*args, dtype=cpu_int, **kwargs)
def fromiter_float(i):
return np.fromiter(i, dtype=cpu_float)
def fromiter_int(i):
return np.fromiter(i, dtype=cpu_int)
def full_float(n, f):
return np.full(n, f, dtype=cpu_float)
def full_int(n, i):
return np.full(n, i, dtype=cpu_int)
# Expose ctypes objects
c_double = ctypes.c_double
c_double_p = ctypes.POINTER(c_double)
c_int = ctypes.c_int
c_int_p = ctypes.POINTER(c_int)
py_object = ctypes.py_object
# Functions to deal with ctypes and numpy value types
def as_c_double(*args):
'''
Transform arguments to a :mod:`ctypes` "double".
'''
if len(args) == 1:
return c_double(*args)
else:
return tuple(c_double(a) for a in args)
def as_double(*args):
'''
Transform arguments to a :mod:`numpy` "double".
'''
if len(args) == 1:
return cpu_float(*args)
else:
return tuple(cpu_float(a) for a in args)
def data_as_c_double(*args):
'''
Transform arguments to a :mod:`ctypes` "double*".
'''
if len(args) == 1:
return args[0].ctypes.data_as(c_double_p)
else:
return tuple(a.ctypes.data_as(c_double_p) for a in args)
def as_c_integer(*args):
'''
Transform arguments to a :mode:`ctypes` "int".
'''
if len(args) == 1:
return c_int(*args)
else:
return tuple(c_int(a) for a in args)
def as_integer(*args):
'''
Transform arguments to a :mod:`numpy` "integral" type.
'''
if len(args) == 1:
return cpu_int(*args)
else:
return tuple(cpu_int(a) for a in args)
def data_as_c_int(*args):
'''
Transform arguments to a :mod:`ctypes` "int*".
'''
if len(args) == 1:
return args[0].ctypes.data_as(c_int_p)
else:
return tuple(a.ctypes.data_as(c_int_p) for a in args)
def as_py_object(*args):
'''
Transform arguments to a :mod:`ctypes` "PyObject".
'''
if len(args) == 1:
return py_object(*args)
else:
return tuple(py_object(a) for a in args)
|
[
"numpy.full",
"numpy.empty",
"numpy.array",
"numpy.fromiter",
"ctypes.POINTER"
] |
[((1258, 1282), 'ctypes.POINTER', 'ctypes.POINTER', (['c_double'], {}), '(c_double)\n', (1272, 1282), False, 'import ctypes\n'), ((1314, 1335), 'ctypes.POINTER', 'ctypes.POINTER', (['c_int'], {}), '(c_int)\n', (1328, 1335), False, 'import ctypes\n'), ((627, 669), 'numpy.array', 'np.array', (['*args'], {'dtype': 'cpu_float'}), '(*args, dtype=cpu_float, **kwargs)\n', (635, 669), True, 'import numpy as np\n'), ((715, 755), 'numpy.array', 'np.array', (['*args'], {'dtype': 'cpu_int'}), '(*args, dtype=cpu_int, **kwargs)\n', (723, 755), True, 'import numpy as np\n'), ((803, 845), 'numpy.empty', 'np.empty', (['*args'], {'dtype': 'cpu_float'}), '(*args, dtype=cpu_float, **kwargs)\n', (811, 845), True, 'import numpy as np\n'), ((891, 931), 'numpy.empty', 'np.empty', (['*args'], {'dtype': 'cpu_int'}), '(*args, dtype=cpu_int, **kwargs)\n', (899, 931), True, 'import numpy as np\n'), ((968, 999), 'numpy.fromiter', 'np.fromiter', (['i'], {'dtype': 'cpu_float'}), '(i, dtype=cpu_float)\n', (979, 999), True, 'import numpy as np\n'), ((1034, 1063), 'numpy.fromiter', 'np.fromiter', (['i'], {'dtype': 'cpu_int'}), '(i, dtype=cpu_int)\n', (1045, 1063), True, 'import numpy as np\n'), ((1099, 1129), 'numpy.full', 'np.full', (['n', 'f'], {'dtype': 'cpu_float'}), '(n, f, dtype=cpu_float)\n', (1106, 1129), True, 'import numpy as np\n'), ((1163, 1191), 'numpy.full', 'np.full', (['n', 'i'], {'dtype': 'cpu_int'}), '(n, i, dtype=cpu_int)\n', (1170, 1191), True, 'import numpy as np\n')]
|
# bezier_curve()生成贝塞尔曲线坐标。
import numpy as np
from skimage.draw import bezier_curve
img = np.zeros((10, 10), dtype=np.uint8)
print(img[1, 5])
rr, cc = bezier_curve(1, 5, 5, -2, 8, 8, 2)
img[rr, cc] = 1
print(img)
print(img[1, 5])
print(img[5, -2])
print(img[8, 8])
|
[
"numpy.zeros",
"skimage.draw.bezier_curve"
] |
[((90, 124), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {'dtype': 'np.uint8'}), '((10, 10), dtype=np.uint8)\n', (98, 124), True, 'import numpy as np\n'), ((151, 185), 'skimage.draw.bezier_curve', 'bezier_curve', (['(1)', '(5)', '(5)', '(-2)', '(8)', '(8)', '(2)'], {}), '(1, 5, 5, -2, 8, 8, 2)\n', (163, 185), False, 'from skimage.draw import bezier_curve\n')]
|
import numpy as np
import cv2
import imutils
import os
import time
# This function calculates the distance between the center of two individuals in a
# single frame of the video
def Check(a, b):
dist = (((a[0] - b[0]) ** 2) + (a[1] - b[1]) ** 2) ** 0.5
calibration = (a[1] + b[1]) / 2
if 0 < dist < 0.25 * calibration:
return True
else:
return False
# This function joins the path components, reads the network model stored in Darknet,
# and gets all the layers of the network model to only store the indexes of layers with
# unconnected outputs
def Setup(yolo):
global net, ln, LABELS
weights = os.path.sep.join([yolo, "/users/anshsahny/darknet/yolov3.weights"])
config = os.path.sep.join([yolo, "/users/anshsahny/darknet/cfg/yolov3.cfg"])
labelsPath = os.path.sep.join([yolo, "/users/anshsahny/darknet/data/coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
net = cv2.dnn.readNetFromDarknet(config, weights)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# This function processes each frame of the video with the "Check" function between
# every individual and returns in to the main function
def ImageProcess(image):
global processedImg
(H, W) = (None, None)
frame = image.copy()
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
starttime = time.time()
layerOutputs = net.forward(ln)
stoptime = time.time()
print("Video is Getting Processed at {:.4f} seconds per frame".format((stoptime - starttime)))
confidences = []
outline = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
maxi_class = np.argmax(scores)
confidence = scores[maxi_class]
if LABELS[maxi_class] == "person":
if confidence > 0.5:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
outline.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
box_line = cv2.dnn.NMSBoxes(outline, confidences, 0.5, 0.3)
if len(box_line) > 0:
flat_box = box_line.flatten()
pairs = []
center = []
status = []
for i in flat_box:
(x, y) = (outline[i][0], outline[i][1])
(w, h) = (outline[i][2], outline[i][3])
center.append([int(x + w / 2), int(y + h / 2)])
status.append(False)
for i in range(len(center)):
for j in range(len(center)):
close = Check(center[i], center[j])
if close:
pairs.append([center[i], center[j]])
status[i] = True
status[j] = True
index = 0
for i in flat_box:
(x, y) = (outline[i][0], outline[i][1])
(w, h) = (outline[i][2], outline[i][3])
if status[index] == True:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 150), 2)
elif status[index] == False:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
index += 1
for h in pairs:
cv2.line(frame, tuple(h[0]), tuple(h[1]), (0, 0, 255), 2)
processedImg = frame.copy()
create = None
frameno = 0
filename = "input.gif"
yolo = ""
opname = "output.mp4"
cap = cv2.VideoCapture(filename)
# Main function where input video is broken into single frames and performs all the
# functions above, creates output frame and combines it to create an output video
time1 = time.time()
while (True):
ret, frame = cap.read()
if not ret:
break
current_img = frame.copy()
current_img = imutils.resize(current_img, width=480)
video = current_img.shape
frameno += 1
if (frameno % 2 == 0 or frameno == 1):
Setup(yolo)
ImageProcess(current_img)
Frame = processedImg
if create is None:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
create = cv2.VideoWriter(opname, fourcc, 30, (Frame.shape[1], Frame.shape[0]), True)
create.write(Frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
break
time2 = time.time()
print("Completed. Total Time Taken: {} minutes".format((time2 - time1) / 60))
cap.release()
cv2.destroyAllWindows()
|
[
"cv2.dnn.NMSBoxes",
"cv2.VideoWriter_fourcc",
"numpy.argmax",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"cv2.dnn.readNetFromDarknet",
"time.time",
"cv2.VideoCapture",
"cv2.rectangle",
"numpy.array",
"cv2.VideoWriter",
"imutils.resize",
"cv2.destroyAllWindows",
"os.path.sep.join"
] |
[((3685, 3711), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (3701, 3711), False, 'import cv2\n'), ((3888, 3899), 'time.time', 'time.time', ([], {}), '()\n', (3897, 3899), False, 'import time\n'), ((4501, 4512), 'time.time', 'time.time', ([], {}), '()\n', (4510, 4512), False, 'import time\n'), ((4606, 4629), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4627, 4629), False, 'import cv2\n'), ((640, 707), 'os.path.sep.join', 'os.path.sep.join', (["[yolo, '/users/anshsahny/darknet/yolov3.weights']"], {}), "([yolo, '/users/anshsahny/darknet/yolov3.weights'])\n", (656, 707), False, 'import os\n'), ((721, 788), 'os.path.sep.join', 'os.path.sep.join', (["[yolo, '/users/anshsahny/darknet/cfg/yolov3.cfg']"], {}), "([yolo, '/users/anshsahny/darknet/cfg/yolov3.cfg'])\n", (737, 788), False, 'import os\n'), ((806, 874), 'os.path.sep.join', 'os.path.sep.join', (["[yolo, '/users/anshsahny/darknet/data/coco.names']"], {}), "([yolo, '/users/anshsahny/darknet/data/coco.names'])\n", (822, 874), False, 'import os\n'), ((942, 985), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['config', 'weights'], {}), '(config, weights)\n', (968, 985), False, 'import cv2\n'), ((1394, 1470), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n', (1415, 1470), False, 'import cv2\n'), ((1510, 1521), 'time.time', 'time.time', ([], {}), '()\n', (1519, 1521), False, 'import time\n'), ((1572, 1583), 'time.time', 'time.time', ([], {}), '()\n', (1581, 1583), False, 'import time\n'), ((2378, 2426), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['outline', 'confidences', '(0.5)', '(0.3)'], {}), '(outline, confidences, 0.5, 0.3)\n', (2394, 2426), False, 'import cv2\n'), ((4022, 4060), 'imutils.resize', 'imutils.resize', (['current_img'], {'width': '(480)'}), '(current_img, width=480)\n', (4036, 4060), False, 'import imutils\n'), ((1847, 1864), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1856, 1864), True, 'import numpy as np\n'), ((4284, 4315), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (4306, 4315), False, 'import cv2\n'), ((4337, 4412), 'cv2.VideoWriter', 'cv2.VideoWriter', (['opname', 'fourcc', '(30)', '(Frame.shape[1], Frame.shape[0])', '(True)'], {}), '(opname, fourcc, 30, (Frame.shape[1], Frame.shape[0]), True)\n', (4352, 4412), False, 'import cv2\n'), ((4444, 4458), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4455, 4458), False, 'import cv2\n'), ((3268, 3328), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 0, 150)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 0, 150), 2)\n', (3281, 3328), False, 'import cv2\n'), ((3386, 3446), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (3399, 3446), False, 'import cv2\n'), ((2036, 2058), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (2044, 2058), True, 'import numpy as np\n')]
|
"""
Transformer 实现
Author: <NAME>
Date: 2021/3/7
REF: http://nlp.seas.harvard.edu/2018/04/03/attention.html
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
from torch.autograd import Variable
from torch.nn.modules.container import Sequential
from torch.nn.modules.normalization import LayerNorm
class EncoderDecoder(nn.Module):
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
return self.decode(self.encode(src, src_mask), src_mask, tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask)
class Generator(nn.Module):
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, X):
return F.log_softmax(self.proj(X), dim=-1)
def clones(module, N):
""" 将一个模型拷贝多次叠加 """
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
""" 编码器 """
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, X, mask):
for layer in self.layers:
X = layer(X, mask)
return self.norm(X)
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, X):
mean = X.mean(-1, keepdim=True)
std = X.std(-1, keepdim=True)
return self.a_2 * (X - mean) / (std + self.eps) + self.b_2
class SubLayerConnection(nn.Module):
""" 残差连接 """
def __init__(self, size, dropout):
"""
Param
-----
:size
:dropout
"""
super(SubLayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, X, sublayer):
return X + self.dropout(sublayer(self.norm(X)))
class EncoderLayer(nn.Module):
""" 编码器的一层 """
def __init__(self, size, self_attn, feed_forward, dropout):
"""
Param
-----
:size
:self_attn
:feed_forward
:dropout
"""
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SubLayerConnection(size, dropout), 2)
self.size = size
def forward(self, X, mask):
X = self.sublayer[0](X, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](X, self.feed_forward)
class Decoder(nn.Module):
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, X, memory, src_mask, tgt_mask):
for layer in self.layers:
X = layer(X, memory, src_mask, tgt_mask)
return self.norm(X)
class DecoderLayer(nn.Module):
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SubLayerConnection(size, dropout), 3)
def forward(self, X, memory, src_mask, tgt_mask):
m = memory
X = self.sublayer[0](X, lambda x:self.self_attn(x, x, x, tgt_mask))
X = self.sublayer[1](X, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](X, self.feed_forward)
def subsequent_mask(size):
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k = 1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / torch.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -100)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
super(MultiHeadAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in zip(self.linears, (query, key, value))]
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h*self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, X):
return self.w_2(self.dropout(F.relu(self.w_1(X))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, X):
return self.lut(X) * torch.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout, max_len=5000):
"""
位置编码
Param
-----
:d_model 模型的维度(输出的编码维度)
:dropout
:max_len 最大句子长度
"""
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
# (max_len, 1)
div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
# (d_model/2)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
# (1, max_len, d_model)
self.register_buffer('pe', pe)
def forward(self, X):
X = X + Variable(self.pe[:, :X.size(1)], requires_grad=False)
return self.dropout(X)
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 5))
pe = PositionalEncoding(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0,:,4:8].data.numpy())
plt.show()
def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
"""
构造模型
Param
-----
:src_vocab
:tgt_vocab
:N
:d_model 模型维度
:d_ff
:h 多头注意力的头数
:dropout
"""
c = copy.deepcopy
attn = MultiHeadAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab)
)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
|
[
"torch.nn.Dropout",
"torch.sqrt",
"torch.nn.Embedding",
"numpy.ones",
"torch.cos",
"matplotlib.pyplot.figure",
"numpy.arange",
"torch.arange",
"torch.ones",
"torch.nn.Linear",
"torch.zeros",
"math.log",
"torch.matmul",
"torch.nn.modules.normalization.LayerNorm",
"copy.deepcopy",
"matplotlib.pyplot.show",
"torch.nn.init.xavier_uniform_",
"torch.from_numpy",
"torch.nn.functional.softmax",
"torch.sin"
] |
[((7195, 7222), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (7205, 7222), True, 'import matplotlib.pyplot as plt\n'), ((7354, 7364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7362, 7364), True, 'import matplotlib.pyplot as plt\n'), ((4623, 4648), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (4632, 4648), True, 'import torch.nn.functional as F\n'), ((7313, 7327), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (7322, 7327), True, 'import numpy as np\n'), ((1178, 1203), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'vocab'], {}), '(d_model, vocab)\n', (1187, 1203), True, 'import torch.nn as nn\n'), ((1578, 1599), 'torch.nn.modules.normalization.LayerNorm', 'LayerNorm', (['layer.size'], {}), '(layer.size)\n', (1587, 1599), False, 'from torch.nn.modules.normalization import LayerNorm\n'), ((2404, 2419), 'torch.nn.modules.normalization.LayerNorm', 'LayerNorm', (['size'], {}), '(size)\n', (2413, 2419), False, 'from torch.nn.modules.normalization import LayerNorm\n'), ((2443, 2462), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2453, 2462), True, 'import torch.nn as nn\n'), ((3349, 3370), 'torch.nn.modules.normalization.LayerNorm', 'LayerNorm', (['layer.size'], {}), '(layer.size)\n', (3358, 3370), False, 'from torch.nn.modules.normalization import LayerNorm\n'), ((4334, 4367), 'torch.from_numpy', 'torch.from_numpy', (['subsequent_mask'], {}), '(subsequent_mask)\n', (4350, 4367), False, 'import torch\n'), ((4516, 4531), 'torch.sqrt', 'torch.sqrt', (['d_k'], {}), '(d_k)\n', (4526, 4531), False, 'import torch\n'), ((4721, 4748), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (4733, 4748), False, 'import torch\n'), ((5089, 5110), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (5099, 5110), True, 'import torch.nn as nn\n'), ((5779, 5803), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (5788, 5803), True, 'import torch.nn as nn\n'), ((5823, 5847), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (5832, 5847), True, 'import torch.nn as nn\n'), ((5871, 5890), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (5881, 5890), True, 'import torch.nn as nn\n'), ((6114, 6142), 'torch.nn.Embedding', 'nn.Embedding', (['vocab', 'd_model'], {}), '(vocab, d_model)\n', (6126, 6142), True, 'import torch.nn as nn\n'), ((6565, 6586), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (6575, 6586), True, 'import torch.nn as nn\n'), ((6601, 6630), 'torch.zeros', 'torch.zeros', (['max_len', 'd_model'], {}), '(max_len, d_model)\n', (6612, 6630), False, 'import torch\n'), ((6846, 6876), 'torch.sin', 'torch.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (6855, 6876), False, 'import torch\n'), ((6899, 6929), 'torch.cos', 'torch.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (6908, 6929), False, 'import torch\n'), ((7278, 7301), 'torch.zeros', 'torch.zeros', (['(1)', '(100)', '(20)'], {}), '(1, 100, 20)\n', (7289, 7301), False, 'import torch\n'), ((1360, 1381), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (1373, 1381), False, 'import copy\n'), ((1878, 1898), 'torch.ones', 'torch.ones', (['features'], {}), '(features)\n', (1888, 1898), False, 'import torch\n'), ((1932, 1953), 'torch.zeros', 'torch.zeros', (['features'], {}), '(features)\n', (1943, 1953), False, 'import torch\n'), ((5009, 5036), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (5018, 5036), True, 'import torch.nn as nn\n'), ((6234, 6258), 'torch.sqrt', 'torch.sqrt', (['self.d_model'], {}), '(self.d_model)\n', (6244, 6258), False, 'import torch\n'), ((8186, 8212), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (8209, 8212), True, 'import torch.nn as nn\n'), ((4279, 4298), 'numpy.ones', 'np.ones', (['attn_shape'], {}), '(attn_shape)\n', (4286, 4298), True, 'import numpy as np\n'), ((6650, 6674), 'torch.arange', 'torch.arange', (['(0)', 'max_len'], {}), '(0, max_len)\n', (6662, 6674), False, 'import torch\n'), ((6740, 6767), 'torch.arange', 'torch.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (6752, 6767), False, 'import torch\n'), ((6772, 6789), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (6780, 6789), False, 'import math\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import sys
from environment import MountainCar
def get_state(mode, state):
if mode == 'raw':
s = np.zeros((2, 1))
else:
s = np.zeros((2048, 1))
for k in state:
s[k] = state[k]
return s
def get_q(s, w, b):
return w.T @ s + b
def train_q_learning(mode,
episodes,
max_episode_len,
epsilon,
gamma,
lr):
n_S = 2048
n_A = 3
if mode == 'raw':
n_S = 2
w = np.zeros((n_S, n_A))
b = 0
returns = []
car = MountainCar(mode)
for i in range(episodes):
s = get_state(mode, car.reset())
R = 0.
g = 1.
for t in range(max_episode_len):
assert s.shape[0] <= n_S
isGreedy = np.random.uniform(0, 1, 1) >= epsilon
if isGreedy:
a = int(np.argmax(get_q(s, w, b)))
else:
a = int(np.random.randint(0, n_A, 1))
state, r, done = car.step(a)
s_ = get_state(mode, state)
R += (r * g)
# g *= gamma
q_sa = float(get_q(s, w, b)[a])
# if done:
# max_q_s_ = 0
# else:
max_q_s_ = float(np.max(get_q(s_, w, b)))
td = q_sa - (r + gamma * max_q_s_)
w[:, a] = w[:, a] - lr * td * s.flatten()
b = b - lr * td
s = s_
if done:
break
returns.append(R)
return np.array(returns), w, b
def write_array_to_file(filepath, array):
"""Write a numpy matrix to a file.
Args:
filepath (str): File path.
array (ndarray): Numpy 1D array.
"""
assert len(array.shape) < 2
out_str = '\n'.join(array.astype('U'))
with open(filepath, 'w') as f:
f.write(out_str)
print('Array written to {0} successfully!'.format(filepath))
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
if __name__ == "__main__":
assert len(sys.argv) == 1 + 8
mode = sys.argv[1]
weight_out = sys.argv[2]
returns_out = sys.argv[3]
episodes = int(sys.argv[4])
max_episode_len = int(sys.argv[5])
epsilon = float(sys.argv[6])
gamma = float(sys.argv[7])
lr = float(sys.argv[8])
print(f'{mode = }')
print(f'{weight_out = }')
print(f'{returns_out = }')
print(f'{episodes = }')
print(f'{max_episode_len = }')
print(f'{epsilon = }')
print(f'{gamma = }')
print(f'{lr = }')
returns, w, b = train_q_learning(mode,
episodes,
max_episode_len,
epsilon,
gamma,
lr)
weights = np.concatenate((np.array([b]), w.flatten()))
write_array_to_file(weight_out, weights)
write_array_to_file(returns_out, returns)
plt.plot(list(range(1, episodes + 1)), returns, 'ro-', label='returns')
plt.plot(list(range(25, episodes + 1)), moving_average(returns, 25), 'bo-', label='rolling_mean')
plt.title('Tile')
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"environment.MountainCar",
"matplotlib.pyplot.legend",
"numpy.zeros",
"numpy.cumsum",
"numpy.random.randint",
"numpy.array"
] |
[((618, 638), 'numpy.zeros', 'np.zeros', (['(n_S, n_A)'], {}), '((n_S, n_A))\n', (626, 638), True, 'import numpy as np\n'), ((683, 700), 'environment.MountainCar', 'MountainCar', (['mode'], {}), '(mode)\n', (694, 700), False, 'from environment import MountainCar\n'), ((2142, 2167), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (2151, 2167), True, 'import numpy as np\n'), ((3409, 3426), 'matplotlib.pyplot.title', 'plt.title', (['"""Tile"""'], {}), "('Tile')\n", (3418, 3426), True, 'import matplotlib.pyplot as plt\n'), ((3432, 3444), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3442, 3444), True, 'import matplotlib.pyplot as plt\n'), ((3450, 3460), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3458, 3460), True, 'import matplotlib.pyplot as plt\n'), ((172, 188), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (180, 188), True, 'import numpy as np\n'), ((213, 232), 'numpy.zeros', 'np.zeros', (['(2048, 1)'], {}), '((2048, 1))\n', (221, 232), True, 'import numpy as np\n'), ((1677, 1694), 'numpy.array', 'np.array', (['returns'], {}), '(returns)\n', (1685, 1694), True, 'import numpy as np\n'), ((3098, 3111), 'numpy.array', 'np.array', (['[b]'], {}), '([b])\n', (3106, 3111), True, 'import numpy as np\n'), ((918, 944), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (935, 944), True, 'import numpy as np\n'), ((1080, 1108), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_A', '(1)'], {}), '(0, n_A, 1)\n', (1097, 1108), True, 'import numpy as np\n')]
|
# - * - coding: utf - 8 - * -
"""
This module tests the functions in dam_tol.py.
"""
__version__ = '1.0'
__author__ = '<NAME>'
import sys
import pytest
import numpy as np
sys.path.append(r'C:\LAYLA')
from src.LAYLA_V02.constraints import Constraints
from src.guidelines.dam_tol import is_dam_tol
@pytest.mark.parametrize(
"stack, constraints, expect", [
(np.array([45, 0, -45]), Constraints(dam_tol=True, dam_tol_rule=1), True),
(np.array([45, 0, 0]), Constraints(dam_tol=True, dam_tol_rule=1), False),
(np.array([45, -45, 0, 45, -45]), Constraints(dam_tol=True, dam_tol_rule=2), True),
(np.array([45, -45, 0, 90, -45]), Constraints(dam_tol=True, dam_tol_rule=2), False),
])
def test_is_dam_tol(stack, constraints, expect):
output = is_dam_tol(stack, constraints)
assert output == expect
|
[
"sys.path.append",
"src.LAYLA_V02.constraints.Constraints",
"numpy.array",
"src.guidelines.dam_tol.is_dam_tol"
] |
[((176, 204), 'sys.path.append', 'sys.path.append', (['"""C:\\\\LAYLA"""'], {}), "('C:\\\\LAYLA')\n", (191, 204), False, 'import sys\n'), ((788, 818), 'src.guidelines.dam_tol.is_dam_tol', 'is_dam_tol', (['stack', 'constraints'], {}), '(stack, constraints)\n', (798, 818), False, 'from src.guidelines.dam_tol import is_dam_tol\n'), ((373, 395), 'numpy.array', 'np.array', (['[45, 0, -45]'], {}), '([45, 0, -45])\n', (381, 395), True, 'import numpy as np\n'), ((397, 438), 'src.LAYLA_V02.constraints.Constraints', 'Constraints', ([], {'dam_tol': '(True)', 'dam_tol_rule': '(1)'}), '(dam_tol=True, dam_tol_rule=1)\n', (408, 438), False, 'from src.LAYLA_V02.constraints import Constraints\n'), ((456, 476), 'numpy.array', 'np.array', (['[45, 0, 0]'], {}), '([45, 0, 0])\n', (464, 476), True, 'import numpy as np\n'), ((478, 519), 'src.LAYLA_V02.constraints.Constraints', 'Constraints', ([], {'dam_tol': '(True)', 'dam_tol_rule': '(1)'}), '(dam_tol=True, dam_tol_rule=1)\n', (489, 519), False, 'from src.LAYLA_V02.constraints import Constraints\n'), ((538, 569), 'numpy.array', 'np.array', (['[45, -45, 0, 45, -45]'], {}), '([45, -45, 0, 45, -45])\n', (546, 569), True, 'import numpy as np\n'), ((571, 612), 'src.LAYLA_V02.constraints.Constraints', 'Constraints', ([], {'dam_tol': '(True)', 'dam_tol_rule': '(2)'}), '(dam_tol=True, dam_tol_rule=2)\n', (582, 612), False, 'from src.LAYLA_V02.constraints import Constraints\n'), ((630, 661), 'numpy.array', 'np.array', (['[45, -45, 0, 90, -45]'], {}), '([45, -45, 0, 90, -45])\n', (638, 661), True, 'import numpy as np\n'), ((663, 704), 'src.LAYLA_V02.constraints.Constraints', 'Constraints', ([], {'dam_tol': '(True)', 'dam_tol_rule': '(2)'}), '(dam_tol=True, dam_tol_rule=2)\n', (674, 704), False, 'from src.LAYLA_V02.constraints import Constraints\n')]
|
import numpy as np
import torch
import logging
logger = logging.getLogger(__name__)
class Learner(object):
def __init__(self, config):
self.config = config
self.npr = np.random.RandomState(config.seed)
self.calibrate = config.learner.calibrate
self.semi_supervised = config.learner.semi_supervised
self.risk_thres = config.learner.risk_thres
self.use_cuda = torch.cuda.is_available()
self.early_stop_scope = config.learner.early_stop_scope
self.prototype_as_val = config.learner.prototype_as_val
def save_state(self):
raise NotImplementedError
def load_state(self):
raise NotImplementedError
def fit_and_predict(self, features, prototype_targets, belief, n_annotation, ground_truth):
raise NotImplementedError
class DummyLearner(Learner):
def __init__(self, config):
Learner.__init__(self, config)
logger.info('No learner is used')
def save_state(self):
pass
def load_state(self, state):
pass
def fit_and_predict(self, features, prototype_targets, belief, n_annotation, ground_truth):
return None
def get_learner_class(config):
from .nn_learner import LinearNNLearner
if config.learner.algo == 'dummy':
return DummyLearner
elif config.learner.algo == 'mlp':
return LinearNNLearner
else:
raise ValueError
|
[
"torch.cuda.is_available",
"numpy.random.RandomState",
"logging.getLogger"
] |
[((57, 84), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (74, 84), False, 'import logging\n'), ((190, 224), 'numpy.random.RandomState', 'np.random.RandomState', (['config.seed'], {}), '(config.seed)\n', (211, 224), True, 'import numpy as np\n'), ((413, 438), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (436, 438), False, 'import torch\n')]
|
from typing import Optional, Callable
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.serialization import save
import numpy as np
import pandas as pd
from glog import logger
import joblib as jl
from sklearn.impute import SimpleImputer
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from fire import Fire
class Baseline(nn.Module):
def __init__(self, n_features, inner_size=1024):
super().__init__()
self.features = nn.Linear(n_features, inner_size)
self.model = nn.Sequential(
nn.LeakyReLU(),
nn.Dropout(p=.5),
nn.Linear(inner_size, 14),
)
def forward(self, x):
x = self.features(x)
return self.model(x)
class PlasticcDataset(Dataset):
def __init__(self, x_data: np.array, y_data: np.array, folds: tuple):
data = zip(x_data, y_data)
self.data = [x for i, x in enumerate(data) if i % 5 in folds]
logger.info(f'There are {len(self.data)} records in the dataset')
self.features_shape = x_data.shape
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
def map_classes(y_full):
classes = sorted(list(set(y_full)))
mapping = {}
for i, y in enumerate(classes):
mapping[y] = i
logger.info(f'Mapping is {mapping}')
return np.array([mapping[y] for y in y_full])
def read_train():
data = pd.read_csv('data/processed_training.csv', engine='c', sep=';')
data.pop('object_id')
y_full = data.pop('target').values
x_full = data.values.astype('float32')
x_full[np.isnan(x_full)] = 0
x_full[np.isinf(x_full)] = 0
return x_full, y_full
def prepare_data():
if os.path.exists('data/train.bin'):
return jl.load('data/train.bin')
x_full, y_full = read_train()
imputer = SimpleImputer()
vt = VarianceThreshold(threshold=.0001)
pipeline = make_pipeline(imputer, vt, StandardScaler())
x_full = pipeline.fit_transform(x_full)
jl.dump(pipeline, 'preprocess.bin')
y_full = map_classes(y_full)
x_full = x_full.astype('float32')
jl.dump((x_full, y_full), 'data/train.bin')
return x_full, y_full
def make_dataloaders():
x_full, y_full = prepare_data()
train = PlasticcDataset(x_data=x_full, y_data=y_full, folds=(0, 1, 2, 3))
val = PlasticcDataset(x_data=x_full, y_data=y_full, folds=(4,))
shared_params = {'batch_size': 2048, 'shuffle': True}
train = DataLoader(train, drop_last=True, **shared_params)
val = DataLoader(val, drop_last=False, **shared_params)
return train, val
class Trainer:
def __init__(self,
model: nn.Module,
train: DataLoader,
val: DataLoader,
epochs: int = 500,
optimizer: Optional[torch.optim.Optimizer] = None,
loss_fn: Optional[Callable] = None,
scheduler: Optional[ReduceLROnPlateau] = None,
reg_lambda: float = .00002,
reg_norm: int = 1,
device: str = 'cuda:0',
checkpoint: str = './model.pt',
):
self.epochs = epochs
self.model = model.to(device)
self.device = device
self.train = train
self.val = val
self.optimizer = optimizer if optimizer is not None else torch.optim.Adam(model.parameters(), lr=1e-3)
self.scheduler = scheduler if scheduler is not None else ReduceLROnPlateau(optimizer=self.optimizer,
verbose=True)
self.loss_fn = loss_fn if loss_fn is not None else F.cross_entropy
self.reg_lambda = reg_lambda
self.reg_norm = reg_norm
self.current_metric = -np.inf
self.last_improvement = 0
self.checkpoint = checkpoint
def fit_one_epoch(self, n_epoch):
self.model.train(True)
losses, reg_losses = [], []
for i, (x, y) in enumerate(self.train):
x, y = x.to(self.device), y.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(x)
loss = self.loss_fn(outputs, y)
losses.append(loss.item())
for param in self.model.model.parameters():
loss += self.reg_lambda * torch.norm(param, p=self.reg_norm)
reg_loss = loss.item()
reg_losses.append(reg_loss)
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), 1)
self.optimizer.step()
self.model.train(False)
val_losses = []
y_pred_acc, y_true_acc = [], []
with torch.no_grad():
for i, (x, y) in enumerate(self.val):
x, y = x.to(self.device), y.to(self.device)
outputs = self.model(x)
loss = self.loss_fn(outputs, y)
val_losses.append(loss.item())
y_pred_acc.append(outputs.detach().cpu().numpy())
y_true_acc.append(y.detach().cpu().numpy())
train_loss = np.mean(losses)
train_reg_loss = np.mean(reg_losses)
val_loss = np.mean(val_losses)
msg = f'Epoch {n_epoch}: train loss is {train_loss:.5f} (raw), {train_reg_loss:.5f} (reg); val loss is {val_loss:.5f}'
logger.info(msg)
self.scheduler.step(metrics=val_loss, epoch=n_epoch)
y_true_acc, y_pred_acc = map(np.vstack, (y_true_acc, y_pred_acc))
metric = self.evaluate(y_pred=y_pred_acc, y_true=y_true_acc)
if metric > self.current_metric:
self.current_metric = metric
self.last_improvement = n_epoch
save(self.model, f=self.checkpoint)
logger.info(f'Best model has been saved at {n_epoch}, accuracy is {metric:.4f}')
return train_loss, val_loss, metric
def evaluate(self, y_pred, y_true):
return (y_pred.argmax(-1) == y_true).sum() / y_true.shape[-1]
def fit(self):
for i in range(self.epochs):
self.fit_one_epoch(i)
def fit(inner_size=1024, **kwargs):
train, val = make_dataloaders()
trainer = Trainer(model=Baseline(n_features=train.dataset.features_shape[1],
inner_size=inner_size,
),
train=train,
val=val,
loss_fn=F.cross_entropy,
**kwargs
)
trainer.fit()
if __name__ == '__main__':
Fire(fit)
|
[
"torch.nn.Dropout",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"joblib.dump",
"numpy.isnan",
"numpy.mean",
"torch.no_grad",
"sklearn.impute.SimpleImputer",
"torch.utils.data.DataLoader",
"os.path.exists",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.Linear",
"sklearn.feature_selection.VarianceThreshold",
"torch.norm",
"numpy.isinf",
"torch.nn.LeakyReLU",
"glog.logger.info",
"fire.Fire",
"numpy.array",
"joblib.load",
"torch.serialization.save"
] |
[((1547, 1583), 'glog.logger.info', 'logger.info', (['f"""Mapping is {mapping}"""'], {}), "(f'Mapping is {mapping}')\n", (1558, 1583), False, 'from glog import logger\n'), ((1595, 1633), 'numpy.array', 'np.array', (['[mapping[y] for y in y_full]'], {}), '([mapping[y] for y in y_full])\n', (1603, 1633), True, 'import numpy as np\n'), ((1665, 1728), 'pandas.read_csv', 'pd.read_csv', (['"""data/processed_training.csv"""'], {'engine': '"""c"""', 'sep': '""";"""'}), "('data/processed_training.csv', engine='c', sep=';')\n", (1676, 1728), True, 'import pandas as pd\n'), ((1958, 1990), 'os.path.exists', 'os.path.exists', (['"""data/train.bin"""'], {}), "('data/train.bin')\n", (1972, 1990), False, 'import os\n'), ((2082, 2097), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {}), '()\n', (2095, 2097), False, 'from sklearn.impute import SimpleImputer\n'), ((2107, 2142), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {'threshold': '(0.0001)'}), '(threshold=0.0001)\n', (2124, 2142), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((2251, 2286), 'joblib.dump', 'jl.dump', (['pipeline', '"""preprocess.bin"""'], {}), "(pipeline, 'preprocess.bin')\n", (2258, 2286), True, 'import joblib as jl\n'), ((2363, 2406), 'joblib.dump', 'jl.dump', (['(x_full, y_full)', '"""data/train.bin"""'], {}), "((x_full, y_full), 'data/train.bin')\n", (2370, 2406), True, 'import joblib as jl\n'), ((2715, 2765), 'torch.utils.data.DataLoader', 'DataLoader', (['train'], {'drop_last': '(True)'}), '(train, drop_last=True, **shared_params)\n', (2725, 2765), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2776, 2825), 'torch.utils.data.DataLoader', 'DataLoader', (['val'], {'drop_last': '(False)'}), '(val, drop_last=False, **shared_params)\n', (2786, 2825), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6802, 6811), 'fire.Fire', 'Fire', (['fit'], {}), '(fit)\n', (6806, 6811), False, 'from fire import Fire\n'), ((688, 721), 'torch.nn.Linear', 'nn.Linear', (['n_features', 'inner_size'], {}), '(n_features, inner_size)\n', (697, 721), False, 'from torch import nn\n'), ((1848, 1864), 'numpy.isnan', 'np.isnan', (['x_full'], {}), '(x_full)\n', (1856, 1864), True, 'import numpy as np\n'), ((1881, 1897), 'numpy.isinf', 'np.isinf', (['x_full'], {}), '(x_full)\n', (1889, 1897), True, 'import numpy as np\n'), ((2007, 2032), 'joblib.load', 'jl.load', (['"""data/train.bin"""'], {}), "('data/train.bin')\n", (2014, 2032), True, 'import joblib as jl\n'), ((2184, 2200), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2198, 2200), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5355, 5370), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (5362, 5370), True, 'import numpy as np\n'), ((5396, 5415), 'numpy.mean', 'np.mean', (['reg_losses'], {}), '(reg_losses)\n', (5403, 5415), True, 'import numpy as np\n'), ((5435, 5454), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (5442, 5454), True, 'import numpy as np\n'), ((5590, 5606), 'glog.logger.info', 'logger.info', (['msg'], {}), '(msg)\n', (5601, 5606), False, 'from glog import logger\n'), ((770, 784), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (782, 784), False, 'from torch import nn\n'), ((798, 815), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (808, 815), False, 'from torch import nn\n'), ((828, 853), 'torch.nn.Linear', 'nn.Linear', (['inner_size', '(14)'], {}), '(inner_size, 14)\n', (837, 853), False, 'from torch import nn\n'), ((3727, 3784), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'optimizer': 'self.optimizer', 'verbose': '(True)'}), '(optimizer=self.optimizer, verbose=True)\n', (3744, 3784), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((4943, 4958), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4956, 4958), False, 'import torch\n'), ((5952, 5987), 'torch.serialization.save', 'save', (['self.model'], {'f': 'self.checkpoint'}), '(self.model, f=self.checkpoint)\n', (5956, 5987), False, 'from torch.serialization import save\n'), ((6000, 6085), 'glog.logger.info', 'logger.info', (['f"""Best model has been saved at {n_epoch}, accuracy is {metric:.4f}"""'], {}), "(f'Best model has been saved at {n_epoch}, accuracy is {metric:.4f}'\n )\n", (6011, 6085), False, 'from glog import logger\n'), ((4591, 4625), 'torch.norm', 'torch.norm', (['param'], {'p': 'self.reg_norm'}), '(param, p=self.reg_norm)\n', (4601, 4625), False, 'import torch\n')]
|
import numpy as np
import sympy
from enum import Enum
from detmodel.hit import Hit
from detmodel.signal import Signal, Segment
from detmodel.muon import Muon
from detmodel import util
class DetType(Enum):
MM = 'mm'
MDT = 'mdt'
STGC = 'stgc'
def asint(self):
return {
DetType.MM: 0,
DetType.MDT: 1,
DetType.STGC: 2
}.get(self)
class Plane:
## planes are aligned in z
## tilt only on x segmentation so far
## width_x: geometrical width of detector in x
## width_y: geometrical width of detector in y
## width_t: time window (in BCs = 25ns) to integrate signal
## n_?_seg: number of allowed segments in each coordinate
## segment size is then width_?/n_?_seg
def __init__(self, type, z, width_x=10, width_y=10, width_t=10,
n_x_seg=10, n_y_seg=0, n_t_seg=10,
x_res=0, y_res=0, z_res=0, t_res=0,
tilt=0, offset=0, max_hits=0, sig_eff=0):
## type
self.p_type = DetType(type)
## geometry
self.z = z
self.point = sympy.Point3D(0,0,z,evaluate=False)
self.plane = sympy.Plane(self.point, normal_vector=(0,0,1))
## noise info
self.noise_rate = 0
self.noise_type = 'constant'
## detector plane tilt and offset
self.tilt = tilt
self.offset = offset
self.max_hits = max_hits
self.sig_eff = sig_eff
## detector geometrical boundaries, assuming squares now
self.sizes = {
'x': width_x, 'y': width_y,
't': width_t
}
## detector spatial segmentation in x and y,
## and timing segmentation in t
## Note: if you have a tilt in x, you need to increase the range
## of the segmentation to ensure full coverage
tilt_width_x_min = -0.5*width_x
tilt_width_x_max = 0.5*width_x
if abs(self.tilt) > 0:
tilt_dx = width_y*np.abs( np.tan(tilt) )
tilt_width_x_max = 0.5*width_x + 0.5*tilt_dx
tilt_width_x_min = -0.5*width_x - 0.5*tilt_dx
self.segmentations = {
'x': np.linspace( tilt_width_x_min+self.offset, tilt_width_x_max+self.offset, n_x_seg+1 ),
'y': np.linspace( -0.5*width_y, 0.5*width_y, n_y_seg+1 ),
't': np.linspace( -0.5*width_t, 0.5*width_t, n_t_seg+1 )
}
self.seg_mids = {}
for coord in self.segmentations:
if len(self.segmentations[coord]) > 1:
self.seg_mids[coord] = 0.5*(self.segmentations[coord][:-1] + self.segmentations[coord][1:])
else:
self.seg_mids[coord] = self.segmentations[coord]
## plane segmentation lines (centers)
self.seg_lines = {'x':[], 'y':[]}
for this_x_center in self.seg_mids['x']:
this_p1 = sympy.Point3D(this_x_center, 0, self.z, evaluate=False)
this_p2 = sympy.Point3D(this_x_center + 0.5*width_y*np.tan(tilt), 0.5*width_y, self.z, evaluate=False)
self.seg_lines['x'].append(Segment( sympy.Line3D(this_p1, this_p2), coord='x', z=self.z ))
for this_y_center in self.seg_mids['y']:
this_p1 = sympy.Point3D(0, this_y_center, self.z, evaluate=False)
this_p2 = sympy.Point3D(0.5*width_x, this_y_center, self.z, evaluate=False)
self.seg_lines['y'].append(Segment( sympy.Line3D(this_p1, this_p2), coord='y', z=self.z ))
## keeping position resolution as 0 for now
## timing resolution of 5 BCs
## Resolution smearings are applied to muon only, since noise is random
self.resolutions = {
'x': x_res, 'y': y_res,
'z': z_res, 't': t_res
}
## raw hits
self.hits = []
def get_edge(self, edge):
# x
# __|__ y
# |
# top and bottom below refer to this orientation
if 'right' in edge:
return sympy.Line3D( sympy.Point3D(-0.5*self.sizes['x'], 0.5*self.sizes['y'], self.z, evaluate=False),
sympy.Point3D( 0.5*self.sizes['x'], 0.5*self.sizes['y'], self.z, evaluate=False) )
elif 'left' in edge:
return sympy.Line3D( sympy.Point3D(-0.5*self.sizes['x'], -0.5*self.sizes['y'], self.z, evaluate=False),
sympy.Point3D( 0.5*self.sizes['x'], -0.5*self.sizes['y'], self.z, evaluate=False) )
elif 'bottom' in edge:
return sympy.Line3D( sympy.Point3D(-0.5*self.sizes['x'], -0.5*self.sizes['y'], self.z, evaluate=False),
sympy.Point3D(-0.5*self.sizes['x'], 0.5*self.sizes['y'], self.z, evaluate=False) )
elif 'top' in edge:
return sympy.Line3D( sympy.Point3D( 0.5*self.sizes['x'], -0.5*self.sizes['y'], self.z, evaluate=False),
sympy.Point3D( 0.5*self.sizes['x'], 0.5*self.sizes['y'], self.z, evaluate=False) )
elif 'midx' in edge:
return sympy.Line3D( sympy.Point3D( 0, 0, self.z, evaluate=False),
sympy.Point3D( 1, 0, self.z, evaluate=False) )
elif 'midy' in edge:
return sympy.Line3D( sympy.Point3D( 0, 0, self.z, evaluate=False),
sympy.Point3D( 0, 1, self.z, evaluate=False) )
else:
print('Must specify: right, left, bottom, top, midx or midy')
return -1
def clear_hits(self):
self.hits = []
for slx in self.seg_lines['x']:
slx.reset()
for sly in self.seg_lines['y']:
sly.reset()
def smear(self, pos, coord):
## smear muon hit position and time
if coord not in self.resolutions:
print('Could not understand coordinate, must be x y z or t, but received', coord)
return -99
if self.resolutions[coord] > 0:
return np.random.normal(pos, self.resolutions[coord])
else:
return pos
def pass_muon(self, muon, randseed=42):
np.random.seed(int(randseed + 10*(self.z)))
## apply signal efficiency
if self.sig_eff > 0:
rnd_number_eff = np.random.uniform(0.0, 1.0)
if rnd_number_eff > self.sig_eff:
return 0 ## missed muon signal
## find intersection of muon and detector plane
pmu_intersect = self.plane.intersection(muon.line)
if len(pmu_intersect) == 0 or len(pmu_intersect) > 1:
print("There should always be one and only one muon-plane intersection. What's happening?")
print(pmu_intersect)
return -1
intersection_point = pmu_intersect[0]
mu_ip_x = self.smear(float(intersection_point.x), 'x')
mu_ip_y = self.smear(float(intersection_point.y), 'y')
mu_ip_z = self.smear(float(intersection_point.z), 'z')
mu_ip_t = self.smear(muon.time, 't')
## if muon is outside the detector fiducial volume
## or outside the time window, return 0
if np.abs(mu_ip_x) > 0.5*self.sizes['x']:
return 0
if np.abs(mu_ip_y) > 0.5*self.sizes['y']:
return 0
if np.abs(mu_ip_t) > 0.5*self.sizes['t']:
return 0
# To compute the drift radius (for MDT detector), need to find detector element (i.e. wire)
# for which this muon has the smallest distance of closest approach to the wire
# Caveat: the calculation below assumes tubes are exactly vertical
mu_ix = -9999
mu_rdrift = 9999.
if self.p_type == DetType.MDT:
for islx, slx in enumerate(self.seg_lines['x']):
wirepos = sympy.Point(slx.line.p1.x, slx.line.p1.z, evaluate=False)
muonpos1 = sympy.Point(muon.line.p1.x, muon.line.p1.z, evaluate=False)
muonpos2 = sympy.Point(muon.line.p2.x, muon.line.p2.z, evaluate=False)
muonline = sympy.Line(muonpos1, muonpos2)
rdrift = muonline.distance(wirepos)
if rdrift.evalf() < mu_rdrift:
mu_ix = islx
mu_rdrift = rdrift.evalf()
# do not record hit if closest wire further than tube radius
if mu_rdrift > 0.5*self.sizes['x']/len(self.seg_lines['x']):
return 0
muhit = Hit(mu_ip_x,
mu_ip_y,
mu_ip_z,
mu_ip_t,
mu_ix,
mu_rdrift,
True)
self.hits.append(muhit)
return 1
def set_noise(self, noise_rate, noise_type='constant'):
self.noise_rate = noise_rate
self.noise_type = noise_type
def add_noise(self, noise_scale, override_n_noise_per_plane=-1, randseed=42):
'''
p_width_t is the time window in which to integrate the signal (in nano seconds)
therefore, the number of noise hits is:
noise_scale * noise_rate per strip (Hz) * number of strips * p_width_t (ns) * 1e-9
'''
if 'constant' not in self.noise_type:
print('Only support constant noise now')
return -1
if self.sizes['t'] < 1e-15:
print('Time integration width must be larger than 0')
return -1
n_noise_init = noise_scale * (len(self.segmentations['x']) -1) \
* self.noise_rate * self.sizes['t'] * 1e-9
if override_n_noise_per_plane > 0:
n_noise_init = override_n_noise_per_plane
np.random.seed(int(randseed + (self.z)))
n_noise = np.random.poisson(n_noise_init)
noise_x = np.random.uniform(-0.5*self.sizes['x'], 0.5*self.sizes['x'], int(n_noise))
noise_y = np.random.uniform(-0.5*self.sizes['y'], 0.5*self.sizes['y'], int(n_noise))
noise_z = self.z*np.ones(int(n_noise))
noise_t = np.random.uniform(-0.5*self.sizes['t'], 0.5*self.sizes['t'], int(n_noise))
noise_r = np.random.uniform(0.0, 0.5*self.sizes['x']/len(self.seg_lines['x']), int(n_noise))
for inoise in range(int(n_noise)):
# find detector element (segment) closest to each noise hit along x, as needed for MDT
noise_ix = np.argmin( [ np.abs(noise_x[inoise]-xseg.line.p1.x) for xseg in self.seg_lines['x'] ] )
noise_hit = Hit(noise_x[inoise],
noise_y[inoise],
noise_z[inoise],
noise_t[inoise],
noise_ix,
noise_r[inoise],
False)
self.hits.append(noise_hit)
def find_signal(self, this_hit):
## find which detector segment this hit has activated
hit_distancex_seg = None
hit_hash_ix = -10
hit_distancey_seg = None
hit_hash_iy = -10
if self.p_type == DetType.MDT: # association between hit and detector element (segment) already done according to rdrift
hit_hash_ix = this_hit.seg_ix
else:
# if no tilt in this plane or the hit is in the middle of detector element along y,
# speed up finding of nearest element
#hit_hash_ix = np.argmin( [ xseg.line.distance(this_hit.point()) for xseg in self.seg_lines['x'] ] )
hit_hash_ix = np.argmin( [ util.distpoint2line(xseg, this_hit) for xseg in self.seg_lines['x'] ] )
#hit_hash_iy = np.argmin( [ yseg.line.distance(this_hit.point()) for yseg in self.seg_lines['y'] ] )
hit_hash_iy = np.argmin( [ util.distpoint2line(yseg, this_hit) for yseg in self.seg_lines['y'] ] )
## if segment already has signal, skip (but set to muon if new signal is from muon)
if self.seg_lines['x'][hit_hash_ix].is_sig == False or \
self.seg_lines['y'][hit_hash_iy].is_sig == False:
if self.p_type == DetType.STGC and this_hit.rdrift < -9998.: # do not promote as signal
return None
isig = Signal( hash_seg_line_x=hit_hash_ix, hash_seg_line_y=hit_hash_iy,
x=this_hit.x, y=this_hit.y, z=this_hit.z,
time=this_hit.time, seg_ix=this_hit.seg_ix, rdrift=this_hit.rdrift,
is_muon=this_hit.is_muon )
self.seg_lines['x'][hit_hash_ix].add_signal(isig)
self.seg_lines['y'][hit_hash_iy].add_signal(isig)
return isig.get_info_wrt_plane(self, display=False)
else:
if this_hit.is_muon:
if self.seg_lines['x'][hit_hash_ix].is_sig and \
self.seg_lines['y'][hit_hash_iy].is_sig:
self.seg_lines['x'][hit_hash_ix].sig.is_muon = True
self.seg_lines['y'][hit_hash_iy].sig.is_muon = True
return None
def hit_processor(self, summary=False):
## decide on overlapping hits
## sorting hits by which one arrived first
if self.p_type == DetType.MDT:
self.hits.sort(key=lambda hit: hit.rdrift)
else:
self.hits.sort(key=lambda hit: hit.time)
## apply additional position smearing by combining muon and noise hits if sTGC plane
if len(self.hits) > 1 and self.p_type == DetType.STGC:
self.combine_hits(False)
out_signals = []
if summary:
print("Total number of hits:", len(self.hits) )
for ihit in self.hits:
isig_info = self.find_signal(ihit)
if isig_info is not None:
out_signals.append(isig_info)
if self.max_hits > 0 and len(out_signals) == self.max_hits:
break
n_sigs = len(out_signals)
if n_sigs < 1:
return None
n_props = len(out_signals[0])
sig_matrix = np.zeros( (n_sigs, n_props) )
for ns in range(n_sigs):
sig_matrix[ns][:] = list( out_signals[ns].values() )
return (sig_matrix, list(out_signals[ns].keys()) )
def combine_hits(self, summary=False):
## Combine hits in the same plane into one hit
## For the time being, only do so if a muon hit exists
## Background noise hit positions are averaged with muon hit position but with a reduced weight
imu = -1
sumx = 0.
sumw = 0.
ibkg = []
list_seg_ix = [] # store detector segment with hits
for ihit, hit in enumerate(self.hits):
hit_ix = np.argmin( [ util.distpoint2line(xseg, hit) for xseg in self.seg_lines['x'] ] )
# Here we rely on the hits being ordered to avoid multiple hits on same detector segment
if hit_ix in list_seg_ix:
continue
list_seg_ix.append(hit_ix)
if hit.is_muon:
imu = ihit
weight = 1.0
else: # background noise hit
ibkg.append(ihit)
weight = 0.2
sumx += weight*hit.x
sumw += weight
## Update x position of muon hit (if one exists)
if imu >= 0:
self.hits[imu].x = sumx/sumw
## Flag background noise hits
for i in ibkg:
self.hits[i].rdrift = -9999. # use as flag not to promote hit as signal
return None
def return_signal(self, summary=False):
return self.hit_processor(summary)
|
[
"detmodel.util.distpoint2line",
"numpy.random.uniform",
"numpy.abs",
"detmodel.hit.Hit",
"sympy.Line3D",
"sympy.Point",
"numpy.zeros",
"sympy.Plane",
"sympy.Line",
"numpy.tan",
"detmodel.signal.Signal",
"numpy.random.poisson",
"numpy.linspace",
"numpy.random.normal",
"sympy.Point3D"
] |
[((1130, 1168), 'sympy.Point3D', 'sympy.Point3D', (['(0)', '(0)', 'z'], {'evaluate': '(False)'}), '(0, 0, z, evaluate=False)\n', (1143, 1168), False, 'import sympy\n'), ((1187, 1235), 'sympy.Plane', 'sympy.Plane', (['self.point'], {'normal_vector': '(0, 0, 1)'}), '(self.point, normal_vector=(0, 0, 1))\n', (1198, 1235), False, 'import sympy\n'), ((8626, 8689), 'detmodel.hit.Hit', 'Hit', (['mu_ip_x', 'mu_ip_y', 'mu_ip_z', 'mu_ip_t', 'mu_ix', 'mu_rdrift', '(True)'], {}), '(mu_ip_x, mu_ip_y, mu_ip_z, mu_ip_t, mu_ix, mu_rdrift, True)\n', (8629, 8689), False, 'from detmodel.hit import Hit\n'), ((9915, 9946), 'numpy.random.poisson', 'np.random.poisson', (['n_noise_init'], {}), '(n_noise_init)\n', (9932, 9946), True, 'import numpy as np\n'), ((14362, 14389), 'numpy.zeros', 'np.zeros', (['(n_sigs, n_props)'], {}), '((n_sigs, n_props))\n', (14370, 14389), True, 'import numpy as np\n'), ((2191, 2283), 'numpy.linspace', 'np.linspace', (['(tilt_width_x_min + self.offset)', '(tilt_width_x_max + self.offset)', '(n_x_seg + 1)'], {}), '(tilt_width_x_min + self.offset, tilt_width_x_max + self.offset,\n n_x_seg + 1)\n', (2202, 2283), True, 'import numpy as np\n'), ((2290, 2345), 'numpy.linspace', 'np.linspace', (['(-0.5 * width_y)', '(0.5 * width_y)', '(n_y_seg + 1)'], {}), '(-0.5 * width_y, 0.5 * width_y, n_y_seg + 1)\n', (2301, 2345), True, 'import numpy as np\n'), ((2356, 2411), 'numpy.linspace', 'np.linspace', (['(-0.5 * width_t)', '(0.5 * width_t)', '(n_t_seg + 1)'], {}), '(-0.5 * width_t, 0.5 * width_t, n_t_seg + 1)\n', (2367, 2411), True, 'import numpy as np\n'), ((2906, 2961), 'sympy.Point3D', 'sympy.Point3D', (['this_x_center', '(0)', 'self.z'], {'evaluate': '(False)'}), '(this_x_center, 0, self.z, evaluate=False)\n', (2919, 2961), False, 'import sympy\n'), ((3264, 3319), 'sympy.Point3D', 'sympy.Point3D', (['(0)', 'this_y_center', 'self.z'], {'evaluate': '(False)'}), '(0, this_y_center, self.z, evaluate=False)\n', (3277, 3319), False, 'import sympy\n'), ((3342, 3409), 'sympy.Point3D', 'sympy.Point3D', (['(0.5 * width_x)', 'this_y_center', 'self.z'], {'evaluate': '(False)'}), '(0.5 * width_x, this_y_center, self.z, evaluate=False)\n', (3355, 3409), False, 'import sympy\n'), ((6173, 6219), 'numpy.random.normal', 'np.random.normal', (['pos', 'self.resolutions[coord]'], {}), '(pos, self.resolutions[coord])\n', (6189, 6219), True, 'import numpy as np\n'), ((6448, 6475), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (6465, 6475), True, 'import numpy as np\n'), ((7315, 7330), 'numpy.abs', 'np.abs', (['mu_ip_x'], {}), '(mu_ip_x)\n', (7321, 7330), True, 'import numpy as np\n'), ((7386, 7401), 'numpy.abs', 'np.abs', (['mu_ip_y'], {}), '(mu_ip_y)\n', (7392, 7401), True, 'import numpy as np\n'), ((7457, 7472), 'numpy.abs', 'np.abs', (['mu_ip_t'], {}), '(mu_ip_t)\n', (7463, 7472), True, 'import numpy as np\n'), ((10654, 10763), 'detmodel.hit.Hit', 'Hit', (['noise_x[inoise]', 'noise_y[inoise]', 'noise_z[inoise]', 'noise_t[inoise]', 'noise_ix', 'noise_r[inoise]', '(False)'], {}), '(noise_x[inoise], noise_y[inoise], noise_z[inoise], noise_t[inoise],\n noise_ix, noise_r[inoise], False)\n', (10657, 10763), False, 'from detmodel.hit import Hit\n'), ((12423, 12633), 'detmodel.signal.Signal', 'Signal', ([], {'hash_seg_line_x': 'hit_hash_ix', 'hash_seg_line_y': 'hit_hash_iy', 'x': 'this_hit.x', 'y': 'this_hit.y', 'z': 'this_hit.z', 'time': 'this_hit.time', 'seg_ix': 'this_hit.seg_ix', 'rdrift': 'this_hit.rdrift', 'is_muon': 'this_hit.is_muon'}), '(hash_seg_line_x=hit_hash_ix, hash_seg_line_y=hit_hash_iy, x=this_hit\n .x, y=this_hit.y, z=this_hit.z, time=this_hit.time, seg_ix=this_hit.\n seg_ix, rdrift=this_hit.rdrift, is_muon=this_hit.is_muon)\n', (12429, 12633), False, 'from detmodel.signal import Signal, Segment\n'), ((4083, 4171), 'sympy.Point3D', 'sympy.Point3D', (["(-0.5 * self.sizes['x'])", "(0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(-0.5 * self.sizes['x'], 0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (4096, 4171), False, 'import sympy\n'), ((4216, 4303), 'sympy.Point3D', 'sympy.Point3D', (["(0.5 * self.sizes['x'])", "(0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(0.5 * self.sizes['x'], 0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (4229, 4303), False, 'import sympy\n'), ((7956, 8013), 'sympy.Point', 'sympy.Point', (['slx.line.p1.x', 'slx.line.p1.z'], {'evaluate': '(False)'}), '(slx.line.p1.x, slx.line.p1.z, evaluate=False)\n', (7967, 8013), False, 'import sympy\n'), ((8041, 8100), 'sympy.Point', 'sympy.Point', (['muon.line.p1.x', 'muon.line.p1.z'], {'evaluate': '(False)'}), '(muon.line.p1.x, muon.line.p1.z, evaluate=False)\n', (8052, 8100), False, 'import sympy\n'), ((8128, 8187), 'sympy.Point', 'sympy.Point', (['muon.line.p2.x', 'muon.line.p2.z'], {'evaluate': '(False)'}), '(muon.line.p2.x, muon.line.p2.z, evaluate=False)\n', (8139, 8187), False, 'import sympy\n'), ((8215, 8245), 'sympy.Line', 'sympy.Line', (['muonpos1', 'muonpos2'], {}), '(muonpos1, muonpos2)\n', (8225, 8245), False, 'import sympy\n'), ((11935, 11970), 'detmodel.util.distpoint2line', 'util.distpoint2line', (['yseg', 'this_hit'], {}), '(yseg, this_hit)\n', (11954, 11970), False, 'from detmodel import util\n'), ((2016, 2028), 'numpy.tan', 'np.tan', (['tilt'], {}), '(tilt)\n', (2022, 2028), True, 'import numpy as np\n'), ((3125, 3155), 'sympy.Line3D', 'sympy.Line3D', (['this_p1', 'this_p2'], {}), '(this_p1, this_p2)\n', (3137, 3155), False, 'import sympy\n'), ((3456, 3486), 'sympy.Line3D', 'sympy.Line3D', (['this_p1', 'this_p2'], {}), '(this_p1, this_p2)\n', (3468, 3486), False, 'import sympy\n'), ((4362, 4451), 'sympy.Point3D', 'sympy.Point3D', (["(-0.5 * self.sizes['x'])", "(-0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(-0.5 * self.sizes['x'], -0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (4375, 4451), False, 'import sympy\n'), ((4495, 4583), 'sympy.Point3D', 'sympy.Point3D', (["(0.5 * self.sizes['x'])", "(-0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(0.5 * self.sizes['x'], -0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (4508, 4583), False, 'import sympy\n'), ((10554, 10594), 'numpy.abs', 'np.abs', (['(noise_x[inoise] - xseg.line.p1.x)'], {}), '(noise_x[inoise] - xseg.line.p1.x)\n', (10560, 10594), True, 'import numpy as np\n'), ((11705, 11740), 'detmodel.util.distpoint2line', 'util.distpoint2line', (['xseg', 'this_hit'], {}), '(xseg, this_hit)\n', (11724, 11740), False, 'from detmodel import util\n'), ((15079, 15109), 'detmodel.util.distpoint2line', 'util.distpoint2line', (['xseg', 'hit'], {}), '(xseg, hit)\n', (15098, 15109), False, 'from detmodel import util\n'), ((3026, 3038), 'numpy.tan', 'np.tan', (['tilt'], {}), '(tilt)\n', (3032, 3038), True, 'import numpy as np\n'), ((4643, 4732), 'sympy.Point3D', 'sympy.Point3D', (["(-0.5 * self.sizes['x'])", "(-0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(-0.5 * self.sizes['x'], -0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (4656, 4732), False, 'import sympy\n'), ((4776, 4864), 'sympy.Point3D', 'sympy.Point3D', (["(-0.5 * self.sizes['x'])", "(0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(-0.5 * self.sizes['x'], 0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (4789, 4864), False, 'import sympy\n'), ((4921, 5009), 'sympy.Point3D', 'sympy.Point3D', (["(0.5 * self.sizes['x'])", "(-0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(0.5 * self.sizes['x'], -0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (4934, 5009), False, 'import sympy\n'), ((5054, 5141), 'sympy.Point3D', 'sympy.Point3D', (["(0.5 * self.sizes['x'])", "(0.5 * self.sizes['y'])", 'self.z'], {'evaluate': '(False)'}), "(0.5 * self.sizes['x'], 0.5 * self.sizes['y'], self.z,\n evaluate=False)\n", (5067, 5141), False, 'import sympy\n'), ((5200, 5243), 'sympy.Point3D', 'sympy.Point3D', (['(0)', '(0)', 'self.z'], {'evaluate': '(False)'}), '(0, 0, self.z, evaluate=False)\n', (5213, 5243), False, 'import sympy\n'), ((5296, 5339), 'sympy.Point3D', 'sympy.Point3D', (['(1)', '(0)', 'self.z'], {'evaluate': '(False)'}), '(1, 0, self.z, evaluate=False)\n', (5309, 5339), False, 'import sympy\n'), ((5405, 5448), 'sympy.Point3D', 'sympy.Point3D', (['(0)', '(0)', 'self.z'], {'evaluate': '(False)'}), '(0, 0, self.z, evaluate=False)\n', (5418, 5448), False, 'import sympy\n'), ((5501, 5544), 'sympy.Point3D', 'sympy.Point3D', (['(0)', '(1)', 'self.z'], {'evaluate': '(False)'}), '(0, 1, self.z, evaluate=False)\n', (5514, 5544), False, 'import sympy\n')]
|
"""An agent that makes random decisions using a TensorFlow policy."
This agent creates and uses a new randomly initialized
TensorFlow NN policy for each step but doesn't do any
learning.
"""
import agentos
from tensorflow import keras
import numpy as np
class Policy:
def __init__(self):
self.nn = keras.Sequential(
[
keras.layers.Dense(
4, activation="relu", input_shape=(4,), dtype="float64"
),
keras.layers.Dense(1, activation="sigmoid", dtype="float64"),
]
)
def compute_action(self, obs):
return int(round(self.nn(np.array(obs)[np.newaxis]).numpy()[0][0]))
class RandomTFAgent(agentos.Agent):
def _init(self):
self.ret_vals = []
def advance(self):
ret = sum(self.evaluate_policy(Policy(), max_steps=2000))
self.ret_vals.append(ret)
def __del__(self):
print(
f"Agent done!\n"
f"Num rollouts: {len(self.ret_vals)}\n"
f"Avg return: {np.mean(self.ret_vals)}\n"
f"Max return: {max(self.ret_vals)}\n"
f"Median return: {np.median(self.ret_vals)}\n"
)
if __name__ == "__main__":
from gym.envs.classic_control import CartPoleEnv
agentos.run_agent(RandomTFAgent, CartPoleEnv, max_iters=5)
|
[
"tensorflow.keras.layers.Dense",
"numpy.median",
"numpy.mean",
"numpy.array",
"agentos.run_agent"
] |
[((1280, 1338), 'agentos.run_agent', 'agentos.run_agent', (['RandomTFAgent', 'CartPoleEnv'], {'max_iters': '(5)'}), '(RandomTFAgent, CartPoleEnv, max_iters=5)\n', (1297, 1338), False, 'import agentos\n'), ((361, 436), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(4)'], {'activation': '"""relu"""', 'input_shape': '(4,)', 'dtype': '"""float64"""'}), "(4, activation='relu', input_shape=(4,), dtype='float64')\n", (379, 436), False, 'from tensorflow import keras\n'), ((492, 552), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'dtype': '"""float64"""'}), "(1, activation='sigmoid', dtype='float64')\n", (510, 552), False, 'from tensorflow import keras\n'), ((1047, 1069), 'numpy.mean', 'np.mean', (['self.ret_vals'], {}), '(self.ret_vals)\n', (1054, 1069), True, 'import numpy as np\n'), ((1154, 1178), 'numpy.median', 'np.median', (['self.ret_vals'], {}), '(self.ret_vals)\n', (1163, 1178), True, 'import numpy as np\n'), ((647, 660), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (655, 660), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## For Testing Matrix2vec on dataset MNIST
## PCA, Kernel PCA, ISOMAP, NMDS, LLE, LE
# import tensorflow as ts
import logging
import os.path
import sys
import multiprocessing
import numpy as np
import argparse
import scipy.io
import datetime
import matrix2vec
from sklearn import datasets as ds
from sklearn.datasets import load_digits
from sklearn.manifold import LocallyLinearEmbedding
# from keras.datasets import mnist
from sklearn.manifold import SpectralEmbedding
from sklearn.decomposition import PCA
from sklearn.manifold import MDS, Isomap
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import svm, metrics
from sklearn.preprocessing import scale
#import cPickle as pickle
import pickle
from scipy import misc
import matplotlib.image as mpimg
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo)
return np.array(dict['data']), np.array(dict['labels'])
def load_data(path):
x_train, y_train = ds.load_svmlight_file(path)
x_train.todense()
return x_train,y_train
def read_data(data_file):
import gzip
f = gzip.open(data_file, "rb")
# train, val, test = pickle.load(f)
train, val, test = pickle.load(f, encoding='bytes')
f.close()
train_x = train[0]
train_y = train[1]
test_x = test[0]
test_y = test[1]
return train_x, train_y, test_x, test_y
def resizeSVHDShape(matrix):
svhd = np.zeros((5000,3072))
[rows, cols] = svhd.shape
for r in range(rows):
for c in range(cols):
svhd[r][c]=matrix[(c%1024)/32][(c%1024)%32][c/1024][r]
return svhd
if __name__ == "__main__":
#args = parse_args()
# CoilData = scipy.io.loadmat("D:/NSFC/project/data/coil20/COIL20.mat") # Loading coil20.mat
# coil_x = CoilData['X']
# coil_y = CoilData['Y']
# x_train = coil_x
# y_train = []
# for item in coil_y:
# y_train.append(item[0])
# print("Load the COIL20 dataset finished...")
# SVHDData = scipy.io.loadmat("D:/NSFC/project/data/SVHN/train_32x32.mat") # Loading SVHN
# svhd_x = SVHDData['X']
# x_train = resizeSVHDShape(svhd_x)
# svhd_y = SVHDData['y']
# y_train = []
# for item in svhd_y:
# y_train.append(item[0])
# print("Load dataset finished...")
#data,label =load_data("D:/NSFC/project/data/movie/train.bow")
# data, label = load_data(args.input)
x_train, y_train, x_test, y_test = read_data("D:/NSFC/project/data/MNIST/origData/mnistpklgz/mnist.pkl.gz")
print("Load dataset MNIST finished...")
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
x_train=x_train[0:5000, :]
y_train = y_train[0:5000]
print(x_train.shape)
print(x_train)
models = []
emb_size=64
num_neighbors=32
for emb_size in (32,64):
print("********************* emb_size="+str(emb_size)+" ***************")
models=[]
models.append(LocallyLinearEmbedding(n_neighbors=num_neighbors,n_components=emb_size,n_jobs=multiprocessing.cpu_count()))
models.append(SpectralEmbedding(n_neighbors=num_neighbors,n_components=emb_size,n_jobs=multiprocessing.cpu_count()))
models.append(PCA(n_components=emb_size))
models.append(MDS(n_components=emb_size,n_jobs=multiprocessing.cpu_count()))
models.append(Isomap(n_neighbors=num_neighbors, n_components=emb_size, n_jobs=multiprocessing.cpu_count()))
models.append('matrix2vec')
model_names = ['lle', 'le', 'pca', 'MDS', 'ISOMAP', 'matrix2vec'] # names corresponding to model
for index, embedding in enumerate(models):
print('Start running model '+model_names[index]+"...")
start = datetime.datetime.now()
X_transformed= np.zeros((x_train.shape[0],emb_size))
if(index<=4):
# X_transformed = embedding.fit_transform(x_train)
X_transformed = embedding.fit_transform(x_train)
else:
X_transformed=matrix2vec.matrix2vec(x_train,emb_size,topk=5,num_iter=10)
end = datetime.datetime.now()
#scale
X_transformed=scale(X_transformed)
print('Model '+model_names[index]+' Finished in '+str(end-start)+" s.")
#Using KNN classifier to test the result with cross_validation
knn = KNeighborsClassifier()
param = {"n_neighbors": [1, 3, 5, 7, 11]} # 构造一些参数的值进行搜索 (字典类型,可以有多个参数)
gc = GridSearchCV(knn, param_grid=param, cv=4)
gc.fit(X_transformed, y_train)
knn = gc.best_estimator_
print("The best parameter: n_neighbors=" + str(knn.n_neighbors))
scores = cross_val_score(knn, X_transformed, y_train, cv=4)
print("交叉验证Accuracy: ", scores)
print("Accuracy: %0.4f (+/- %0.4f)" % (scores.mean(), scores.std() * 2))
|
[
"sklearn.model_selection.GridSearchCV",
"gzip.open",
"sklearn.preprocessing.scale",
"sklearn.model_selection.cross_val_score",
"numpy.zeros",
"sklearn.neighbors.KNeighborsClassifier",
"pickle.load",
"numpy.array",
"sklearn.datasets.load_svmlight_file",
"sklearn.decomposition.PCA",
"matrix2vec.matrix2vec",
"datetime.datetime.now",
"logging.getLogger",
"multiprocessing.cpu_count"
] |
[((1143, 1170), 'sklearn.datasets.load_svmlight_file', 'ds.load_svmlight_file', (['path'], {}), '(path)\n', (1164, 1170), True, 'from sklearn import datasets as ds\n'), ((1273, 1299), 'gzip.open', 'gzip.open', (['data_file', '"""rb"""'], {}), "(data_file, 'rb')\n", (1282, 1299), False, 'import gzip\n'), ((1363, 1395), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (1374, 1395), False, 'import pickle\n'), ((1583, 1605), 'numpy.zeros', 'np.zeros', (['(5000, 3072)'], {}), '((5000, 3072))\n', (1591, 1605), True, 'import numpy as np\n'), ((2776, 2802), 'logging.getLogger', 'logging.getLogger', (['program'], {}), '(program)\n', (2793, 2802), False, 'import logging\n'), ((1022, 1037), 'pickle.load', 'pickle.load', (['fo'], {}), '(fo)\n', (1033, 1037), False, 'import pickle\n'), ((1049, 1071), 'numpy.array', 'np.array', (["dict['data']"], {}), "(dict['data'])\n", (1057, 1071), True, 'import numpy as np\n'), ((1073, 1097), 'numpy.array', 'np.array', (["dict['labels']"], {}), "(dict['labels'])\n", (1081, 1097), True, 'import numpy as np\n'), ((3372, 3398), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'emb_size'}), '(n_components=emb_size)\n', (3375, 3398), False, 'from sklearn.decomposition import PCA\n'), ((3884, 3907), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3905, 3907), False, 'import datetime\n'), ((3935, 3973), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0], emb_size)'], {}), '((x_train.shape[0], emb_size))\n', (3943, 3973), True, 'import numpy as np\n'), ((4257, 4280), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4278, 4280), False, 'import datetime\n'), ((4327, 4347), 'sklearn.preprocessing.scale', 'scale', (['X_transformed'], {}), '(X_transformed)\n', (4332, 4347), False, 'from sklearn.preprocessing import scale\n'), ((4540, 4562), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4560, 4562), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4665, 4706), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['knn'], {'param_grid': 'param', 'cv': '(4)'}), '(knn, param_grid=param, cv=4)\n', (4677, 4706), False, 'from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score\n'), ((4885, 4935), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['knn', 'X_transformed', 'y_train'], {'cv': '(4)'}), '(knn, X_transformed, y_train, cv=4)\n', (4900, 4935), False, 'from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score\n'), ((4180, 4241), 'matrix2vec.matrix2vec', 'matrix2vec.matrix2vec', (['x_train', 'emb_size'], {'topk': '(5)', 'num_iter': '(10)'}), '(x_train, emb_size, topk=5, num_iter=10)\n', (4201, 4241), False, 'import matrix2vec\n'), ((3195, 3222), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3220, 3222), False, 'import multiprocessing\n'), ((3320, 3347), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3345, 3347), False, 'import multiprocessing\n'), ((3455, 3482), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3480, 3482), False, 'import multiprocessing\n'), ((3571, 3598), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3596, 3598), False, 'import multiprocessing\n')]
|
import os
import numpy as np
import logging
log = logging.getLogger('data_utils')
def resample_ants(nii_file, nii_file_newres, new_res=(1.37, 1.37, 10, 1)):
'''
Call ANTs to resample an image to a given resolution and save a new resampled file.
:param nii_file: the path of the input file
:param nii_file_newres: the path of the output file
:param new_res: the pixel resolution to resample
'''
print('Resampling %s at resolution %s to file %s' % (nii_file, str(new_res), nii_file_newres))
os.system('~/bin/ants/bin/ResampleImage %d %s %s %s' %
(len(new_res), nii_file, nii_file_newres, 'x'.join([str(r) for r in new_res])))
def normalise(array, min_value, max_value):
array = (max_value - min_value) * (array - float(array.min())) / (array.max() - array.min()) + min_value
assert array.max() == max_value and array.min() == min_value
return array
def crop_same(image_list, mask_list, size=(None, None), mode='equal', pad_mode='constant'):
'''
Crop the data in the image and mask lists, so that they have the same size.
:param image_list: a list of images. Each element should be 4-dimensional, (sl,h,w,chn)
:param mask_list: a list of masks. Each element should be 4-dimensional, (sl,h,w,chn)
:param size: dimensions to crop the images to.
:param mode: can be one of [equal, left, right]. Denotes where to crop pixels from. Defaults to middle.
:param pad_mode: can be one of ['edge', 'constant']. 'edge' pads using the values of the edge pixels,
'constant' pads with a constant value
:return: the modified arrays
'''
min_w = np.min([m.shape[1] for m in mask_list]) if size[0] is None else size[0]
min_h = np.min([m.shape[2] for m in mask_list]) if size[1] is None else size[1]
# log.debug('Resizing list1 of size %s to size %s' % (str(image_list[0].shape), str((min_w, min_h))))
# log.debug('Resizing list2 of size %s to size %s' % (str(mask_list[0].shape), str((min_w, min_h))))
img_result, msk_result = [], []
for i in range(len(mask_list)):
im = image_list[i]
m = mask_list[i]
if m.shape[1] > min_w:
m = _crop(m, 1, min_w, mode)
if im.shape[1] > min_w:
im = _crop(im, 1, min_w, mode)
if m.shape[1] < min_w:
m = _pad(m, 1, min_w, pad_mode)
if im.shape[1] < min_w:
im = _pad(im, 1, min_w, pad_mode)
if m.shape[2] > min_h:
m = _crop(m, 2, min_h, mode)
if im.shape[2] > min_h:
im = _crop(im, 2, min_h, mode)
if m.shape[2] < min_h:
m = _pad(m, 2, min_h, pad_mode)
if im.shape[2] < min_h:
im = _pad(im, 2, min_h, pad_mode)
img_result.append(im)
msk_result.append(m)
return img_result, msk_result
def _crop(image, dim, nb_pixels, mode):
diff = image.shape[dim] - nb_pixels
if mode == 'equal':
l = int(np.ceil(diff / 2))
r = image.shape[dim] - l
elif mode == 'right':
l = 0
r = nb_pixels
elif mode == 'left':
l = diff
r = image.shape[dim]
else:
raise 'Unexpected mode: %s. Expected to be one of [equal, left, right].' % mode
if dim == 1:
return image[:, l:r, :, :]
elif dim == 2:
return image[:, :, l:r, :]
else:
return None
def _pad(image, dim, nb_pixels, mode):
diff = nb_pixels - image.shape[dim]
l = int(diff / 2)
r = int(diff - l)
if dim == 1:
pad_width = ((0, 0), (l, r), (0, 0), (0, 0))
elif dim == 2:
pad_width = ((0, 0), (0, 0), (l, r), (0, 0))
else:
return None
if mode == 'edge':
new_image = np.pad(image, pad_width, 'edge')
elif mode == 'constant':
new_image = np.pad(image, pad_width, 'constant', constant_values=0)
else:
raise Exception('Invalid pad mode: ' + mode)
return new_image
def sample(data, nb_samples, seed=-1):
if seed > -1:
np.random.seed(seed)
idx = np.random.choice(len(data), size=nb_samples, replace=False)
return np.array([data[i] for i in idx])
def generator(batch, mode, *x):
assert mode in ['overflow', 'no_overflow']
imshape = x[0].shape
for ar in x:
# case where all inputs are images
if len(ar.shape) == len(imshape):
assert ar.shape[:-1] == imshape[:-1], str(ar.shape) + ' vs ' + str(imshape)
# case where inputs might be arrays of different dimensions
else:
assert ar.shape[0] == imshape[0], str(ar.shape) + ' vs ' + str(imshape)
start = 0
while 1:
if isempty(*x): # if the arrays are empty do not process and yield empty arrays
log.info('Empty inputs. Return empty arrays')
res = []
for ar in x:
res.append(np.empty(shape=ar.shape))
if len(res) > 1:
yield res
else:
yield res[0]
else:
start, ims = generate(start, batch, mode, *x)
if len(ims) == 1:
yield ims[0]
else:
yield ims
def isempty(*x):
for ar in x:
if ar.shape[0] > 0:
return False
return True
def generate(start, batch, mode, *images):
result = []
if mode == 'no_overflow':
for ar in images:
result.append(ar[start:start + batch] + 0)
start += batch
if start >= len(images[0]):
index = np.array(range(len(images[0])))
np.random.shuffle(index)
for ar in images:
ar[:] = ar[index] # shuffle array
start = 0
return start, result
if start + batch <= len(images[0]):
for ar in images:
result.append(ar[start:start + batch] + 0)
start += batch
return start, result
else:
# shuffle images
index = np.array(range(len(images[0])))
np.random.shuffle(index)
extra = batch + start - len(images[0]) # extra images to use from the beginning
for ar in images:
ims = ar[start:] + 0 # last images of array
ar[:] = ar[index] # shuffle array
if extra > 0:
result.append(np.concatenate([ims, ar[0:extra]], axis=0))
return extra, result
|
[
"numpy.pad",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.ceil",
"numpy.concatenate",
"numpy.empty",
"numpy.min",
"numpy.array",
"logging.getLogger"
] |
[((50, 81), 'logging.getLogger', 'logging.getLogger', (['"""data_utils"""'], {}), "('data_utils')\n", (67, 81), False, 'import logging\n'), ((4156, 4188), 'numpy.array', 'np.array', (['[data[i] for i in idx]'], {}), '([data[i] for i in idx])\n', (4164, 4188), True, 'import numpy as np\n'), ((1692, 1731), 'numpy.min', 'np.min', (['[m.shape[1] for m in mask_list]'], {}), '([m.shape[1] for m in mask_list])\n', (1698, 1731), True, 'import numpy as np\n'), ((1776, 1815), 'numpy.min', 'np.min', (['[m.shape[2] for m in mask_list]'], {}), '([m.shape[2] for m in mask_list])\n', (1782, 1815), True, 'import numpy as np\n'), ((3764, 3796), 'numpy.pad', 'np.pad', (['image', 'pad_width', '"""edge"""'], {}), "(image, pad_width, 'edge')\n", (3770, 3796), True, 'import numpy as np\n'), ((4054, 4074), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4068, 4074), True, 'import numpy as np\n'), ((6027, 6051), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (6044, 6051), True, 'import numpy as np\n'), ((3003, 3020), 'numpy.ceil', 'np.ceil', (['(diff / 2)'], {}), '(diff / 2)\n', (3010, 3020), True, 'import numpy as np\n'), ((3846, 3901), 'numpy.pad', 'np.pad', (['image', 'pad_width', '"""constant"""'], {'constant_values': '(0)'}), "(image, pad_width, 'constant', constant_values=0)\n", (3852, 3901), True, 'import numpy as np\n'), ((5604, 5628), 'numpy.random.shuffle', 'np.random.shuffle', (['index'], {}), '(index)\n', (5621, 5628), True, 'import numpy as np\n'), ((4899, 4923), 'numpy.empty', 'np.empty', ([], {'shape': 'ar.shape'}), '(shape=ar.shape)\n', (4907, 4923), True, 'import numpy as np\n'), ((6331, 6373), 'numpy.concatenate', 'np.concatenate', (['[ims, ar[0:extra]]'], {'axis': '(0)'}), '([ims, ar[0:extra]], axis=0)\n', (6345, 6373), True, 'import numpy as np\n')]
|
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
import mmcv
import cv2
import numpy as np
import time
import sys
import glob
import os
from datetime import datetime
def process_video_crcnn(frame_offset, frame_count, config_file, checkpoint_file, video_path):
"""
frame_offset: skipping this many frames
frame_count: run detection on this many frames
"""
f_number = 0
frame_offset = int(frame_offset)
frame_count = int(frame_count)
video = mmcv.VideoReader(video_path)
model = init_detector(config_file, checkpoint_file, device='cuda:0')
model.cfg.data.test.pipeline[1]['img_scale'] = video.resolution
print('[config] img_scale: {}'.format(model.cfg.data.test.pipeline[1]['img_scale']))
print('[config] score threshold: {}'.format(model.cfg.test_cfg['rcnn']['score_thr']))
print('[config] iou threshold: {}'.format(model.cfg.test_cfg['rcnn']['nms']['iou_threshold']))
print('[config] rpn nms threshold: {}'.format(model.cfg.test_cfg['rpn']['nms_thr']))
now = datetime.now()
date_time = now.strftime("%m%d%Y_%H%M%S")
log_filename = './demo/dump/det.txt'
log_file = open(log_filename, 'w')
start_process = time.time()
slice_start = 0 if frame_offset == 0 else frame_offset-1
slice_end = frame_offset+frame_count
print('[DBG] processing frames from {} - {}'.format(range(slice_start,slice_end)[0], range(slice_start,slice_end)[-1]))
last_boxes = []
for index in range(slice_start,slice_end):
frame = video[index]
f_number = f_number + 1
if frame is None:
print('[DBG] Empty frame received!')
break
start_time = time.time()
result = inference_detector(model, frame)
end_time = time.time()
bbox_result, _ = result, None
bboxes = np.vstack(bbox_result)
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)]
labels = np.concatenate(labels)
if len(bboxes) == 0 or (len(bboxes) == 1 and labels[0] != 1):
if len(last_boxes) == 0:
print('[DBG] both current & previous detection lists for frame %d are empty' % (f_number))
log_file.write(str(f_number)+","+str(100.0)+","+str(100.0)+","+str(135.0)+","+str(228.0)+","+str(0.1) + "\n")
else:
print('[DBG] received empty detection list for frame %d copying boxes from previous frame' % (f_number))
for i in range(len(last_boxes)):
box = last_boxes[i]
d = (box[0], box[1], box[2], box[3], box[4])
# cv2.rectangle(frame, (int(d[0]), int(d[1])), (int(d[2]), int(d[3])), (255,0,0), 2)
log_file.write(str(f_number)+","+str(d[0])+","+str(d[1])+","+str(d[2])+","+str(d[3])+","+str(d[4]) + "\n")
else:
for i in range(len(bboxes)):
# bb [816.4531 265.64264 832.7383 311.08356 0.99859136]
bb = bboxes[i]
if labels[i] != 1:
continue
d = (bb[0], bb[1], bb[2], bb[3], bb[4])
if (d[2]-d[0]) <= 0. or (d[3]-d[1]) <= 0.:
print ('[DBG] wrong size of a box at frame: %d' % (f_number))
continue
cv2.rectangle(frame, (int(d[0]), int(d[1])), (int(d[2]), int(d[3])), (255,0,0), 2)
log_file.write(str(f_number)+","+str(d[0])+","+str(d[1])+","+str(d[2])+","+str(d[3])+","+str(d[4]) + "\n")
last_boxes = bboxes.copy()
if f_number == 1 or f_number % 300 == 0:
end_process = time.time()
print('[DBG][{}/{}] frame inference time: {} {}, elapsed time: {} {}'.format(f_number+slice_start, slice_end-1, end_time-start_time, '.s', (end_process-start_process), '.s'))
if f_number == 1 or f_number % 3000 == 0:
dump_path = "./demo/dump/dump-%06d.jpg" % (f_number)
cv2.imwrite(dump_path, frame)
log_file.flush()
os.fsync(log_file.fileno())
print('[DBG] detection complete!')
log_file.close()
def process_jpg_crcnn(config_file, checkpoint_file, image_dir):
model = init_detector(config_file, checkpoint_file, device='cuda:0')
now = datetime.now()
date_time = now.strftime("%m%d%Y_%H%M%S")
log_filename = './demo/dump/det.txt'
log_file = open(log_filename, 'w')
start_process = time.time()
#dsort_img_path = '/home/dmitriy.khvan/dsort-gcp/bepro-data/data/img1'
frame_count = len(glob.glob(os.path.join(image_dir,'*.jpg')))
for num, filename in enumerate(sorted(glob.glob(os.path.join(image_dir,'*.jpg')))):
f_number = num + 1
frame = cv2.imread(filename)
if frame is None:
break
start_time = time.time()
result = inference_detector(model, frame)
end_time = time.time()
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result)]
labels = np.concatenate(labels)
for i in range(len(bboxes)):
bb = bboxes[i]
if labels[i] != 0: continue
d = (int(bb[0]), int(bb[1]), int(bb[2]), int(bb[3]))
cv2.rectangle(frame, (d[0], d[1]), (d[2], d[3]), (255,0,0), 2)
log_file.write(str(f_number)+","+str(d[0])+","+str(d[1])+","+str(d[2])+","+str(d[3]) + "\n")
if f_number == 1 or f_number % 500 == 0:
end_process = time.time()
print('[DBG][{}/{}] frame inference time: {} {}, elapsed time: {} {}'.format(f_number, frame_count, end_time-start_time, '.s', (end_process-start_process), '.s'))
if f_number == 1 or f_number % 1000 == 0:
dump_path = "./demo/dump/dump-%06d.jpg" % (f_number)
cv2.imwrite(dump_path, frame)
log_file.flush()
os.fsync(log_file.fileno())
print('[DBG] detection complete!')
log_file.close()
if __name__ == '__main__':
data_dir = sys.argv[1]
config_file = sys.argv[2]
checkpoint_file = sys.argv[3]
frame_offset = sys.argv[4]
frame_count = sys.argv[5]
# python demo/mmdetection_demo.py PVO4R8Dh-trim.mp4 configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_bepro.py checkpoint/crcnn_r50_bepro_stitch.pth 0 87150 /home/dmitriy.khvan/tmp/
process_video_crcnn(frame_offset, frame_count, config_file, checkpoint_file, data_dir)
|
[
"numpy.full",
"numpy.concatenate",
"os.path.join",
"cv2.imwrite",
"mmdet.apis.init_detector",
"mmdet.apis.inference_detector",
"mmcv.VideoReader",
"time.time",
"cv2.imread",
"cv2.rectangle",
"datetime.datetime.now",
"numpy.vstack"
] |
[((504, 532), 'mmcv.VideoReader', 'mmcv.VideoReader', (['video_path'], {}), '(video_path)\n', (520, 532), False, 'import mmcv\n'), ((545, 605), 'mmdet.apis.init_detector', 'init_detector', (['config_file', 'checkpoint_file'], {'device': '"""cuda:0"""'}), "(config_file, checkpoint_file, device='cuda:0')\n", (558, 605), False, 'from mmdet.apis import init_detector, inference_detector, show_result_pyplot\n'), ((1057, 1071), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1069, 1071), False, 'from datetime import datetime\n'), ((1220, 1231), 'time.time', 'time.time', ([], {}), '()\n', (1229, 1231), False, 'import time\n'), ((4273, 4333), 'mmdet.apis.init_detector', 'init_detector', (['config_file', 'checkpoint_file'], {'device': '"""cuda:0"""'}), "(config_file, checkpoint_file, device='cuda:0')\n", (4286, 4333), False, 'from mmdet.apis import init_detector, inference_detector, show_result_pyplot\n'), ((4345, 4359), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4357, 4359), False, 'from datetime import datetime\n'), ((4508, 4519), 'time.time', 'time.time', ([], {}), '()\n', (4517, 4519), False, 'import time\n'), ((1718, 1729), 'time.time', 'time.time', ([], {}), '()\n', (1727, 1729), False, 'import time\n'), ((1747, 1779), 'mmdet.apis.inference_detector', 'inference_detector', (['model', 'frame'], {}), '(model, frame)\n', (1765, 1779), False, 'from mmdet.apis import init_detector, inference_detector, show_result_pyplot\n'), ((1799, 1810), 'time.time', 'time.time', ([], {}), '()\n', (1808, 1810), False, 'import time\n'), ((1875, 1897), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (1884, 1897), True, 'import numpy as np\n'), ((2015, 2037), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (2029, 2037), True, 'import numpy as np\n'), ((4798, 4818), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (4808, 4818), False, 'import cv2\n'), ((4890, 4901), 'time.time', 'time.time', ([], {}), '()\n', (4899, 4901), False, 'import time\n'), ((4919, 4951), 'mmdet.apis.inference_detector', 'inference_detector', (['model', 'frame'], {}), '(model, frame)\n', (4937, 4951), False, 'from mmdet.apis import init_detector, inference_detector, show_result_pyplot\n'), ((4971, 4982), 'time.time', 'time.time', ([], {}), '()\n', (4980, 4982), False, 'import time\n'), ((5057, 5079), 'numpy.vstack', 'np.vstack', (['bbox_result'], {}), '(bbox_result)\n', (5066, 5079), True, 'import numpy as np\n'), ((5197, 5219), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (5211, 5219), True, 'import numpy as np\n'), ((1917, 1958), 'numpy.full', 'np.full', (['bbox.shape[0]', 'i'], {'dtype': 'np.int32'}), '(bbox.shape[0], i, dtype=np.int32)\n', (1924, 1958), True, 'import numpy as np\n'), ((3709, 3720), 'time.time', 'time.time', ([], {}), '()\n', (3718, 3720), False, 'import time\n'), ((4036, 4065), 'cv2.imwrite', 'cv2.imwrite', (['dump_path', 'frame'], {}), '(dump_path, frame)\n', (4047, 4065), False, 'import cv2\n'), ((4628, 4660), 'os.path.join', 'os.path.join', (['image_dir', '"""*.jpg"""'], {}), "(image_dir, '*.jpg')\n", (4640, 4660), False, 'import os\n'), ((5099, 5140), 'numpy.full', 'np.full', (['bbox.shape[0]', 'i'], {'dtype': 'np.int32'}), '(bbox.shape[0], i, dtype=np.int32)\n', (5106, 5140), True, 'import numpy as np\n'), ((5403, 5467), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(d[0], d[1])', '(d[2], d[3])', '(255, 0, 0)', '(2)'], {}), '(frame, (d[0], d[1]), (d[2], d[3]), (255, 0, 0), 2)\n', (5416, 5467), False, 'import cv2\n'), ((5647, 5658), 'time.time', 'time.time', ([], {}), '()\n', (5656, 5658), False, 'import time\n'), ((5974, 6003), 'cv2.imwrite', 'cv2.imwrite', (['dump_path', 'frame'], {}), '(dump_path, frame)\n', (5985, 6003), False, 'import cv2\n'), ((4719, 4751), 'os.path.join', 'os.path.join', (['image_dir', '"""*.jpg"""'], {}), "(image_dir, '*.jpg')\n", (4731, 4751), False, 'import os\n')]
|
import numpy as np
def add_noise(a):
if len(a.shape) == 2:
b = np.random.rand(a.shape[0], a.shape[1])
return a + b
else:
return a
def dot_product(a, b):
if len(a.shape) == 2 and len(b.shape) == 1 and a.shape[1] == b.shape[0]:
return a.dot(b)
else:
return "Incompatible dimensions"
if __name__ == "__main__":
dim = 200
a = np.eye(dim)
b = np.ones((dim,))
res1 = add_noise(a)
res2 = dot_product(a, b)
print(res2)
assert res2.all() == b.all()
|
[
"numpy.random.rand",
"numpy.eye",
"numpy.ones"
] |
[((391, 402), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (397, 402), True, 'import numpy as np\n'), ((411, 426), 'numpy.ones', 'np.ones', (['(dim,)'], {}), '((dim,))\n', (418, 426), True, 'import numpy as np\n'), ((76, 114), 'numpy.random.rand', 'np.random.rand', (['a.shape[0]', 'a.shape[1]'], {}), '(a.shape[0], a.shape[1])\n', (90, 114), True, 'import numpy as np\n')]
|
import time
import numpy as np
import copy
import sys
sys.path.append(".")
import ai.parameters
import ai.actionplanner
import ai.energyplanner
# get/set/update/check/
# random.choice(d.keys())
class BehaviourPlanner:
def __init__(self):
self.energy = ai.energyplanner.EnergyPlanner()
self.last_behaviour = ''
self.last_behaviour_count = 0
self.last_behaviour_time_spend = 0
self.last_behaviour_time_left = 0
self.last_time = self.get_time()
def get_time(self):
return time.time()
def get_time_gap(self):
return self.get_time() - self.last_time
def get_time_cons(self, behaviour):
if behaviour in ai.parameters.TIME_MIN_CONS:
return ai.parameters.TIME_MIN_CONS[behaviour]
else:
return 0
def update_last_time(self):
self.last_time = self.get_time()
def update_last_time_left(self):
print (self.last_behaviour_time_left)
self.last_behaviour_time_left -= self.get_time_gap()
print (self.last_behaviour_time_left)
self.update_last_time()
def update_last_behaviour(self, this_behaviour):
self.last_behaviour_time_left = self.get_time_cons(this_behaviour)
if self.last_behaviour == this_behaviour:
self.last_behaviour_count += 1
else:
self.last_behaviour_count = 0
ai.actionplanner.ActionPlanner.need_stop = True
self.check_behaviour_times()
self.last_behaviour = this_behaviour
def check_behaviour_times(self):
if self.last_behaviour_count > 10:
print ("too many times")
def update_behaviour(self, input_mode, input_data):
behaviour, processed_data = self.get_behaviour_from_mode(input_mode, input_data)
print('BP(behaviour=' + behaviour + ', data=' + processed_data + ')')
if self.check_behaviour_in_behaviours(behaviour, ['relax', 'move', 'play']):
#print ('2.1 in relax, move play')
if self.last_behaviour_time_left > 0:
#print ('2.11 time left ' + str(self.last_behaviour_time_left))
self.update_last_time_left()
behaviour = self.last_behaviour
else:
#print ('2.12 new behaviour ')
self.update_last_behaviour(behaviour)
else:
#print ('2.2 other move')
self.update_last_behaviour(behaviour)
print('BP -> behaviour=' + behaviour + ', time_left=' + str(self.last_behaviour_time_left))
return behaviour, processed_data
def set_last_behaviour(self, behaviour):
if (self.last_behaviour == behaviour):
self.last_behaviour_count += 1
else:
self.last_behaviour_count = 0
self.last_behaviour = copy.deepcopy(behaviour)
def get_behaviour_from_mode(self, input_mode, input_data):
_behaviour = None
_data = input_data
if input_mode == ai.parameters.MODELS[0]: # ds
_behaviour = "run_away"
elif input_mode == ai.parameters.MODELS[1]: #tc
if input_data[0] == 1:
_behaviour = "touch_head"
elif input_data[2] == 1:
_behaviour = "touch_jaw"
elif input_data[4] == 1:
_behaviour = "touch_back"
elif input_mode == ai.parameters.MODELS[2]: #voice
_behaviour = self.process_voice(input_data)
elif input_mode == ai.parameters.MODELS[3]: #vision
_behaviour,_data = self.process_vision(input_data)
else:
_behaviour = self.process_random_behaviour()
return _behaviour, _data
def process_voice(self, input_data):
'''
1. kitten
2. mars
3. cat
4. mimi
5. hello
6. how are you
7 be quiet
8 look at me
9 sit
10 run
11 walk
12 turn
13 relax
14 stop
15 come here
'''
command = input_data
_behaviour = None
if command == "MARS" or command == "KITTEN" or command == "CAT": # call
_behaviour = 'start_listen'
pass
elif command == "MIMI" or command == "HELLO" or command == "HOW ARE YOU": # hello
_behaviour = 'make_sound'
pass
elif command == "BE QUIET": # be quiet
_behaviour = 'lower_sound'
# be quite
pass
elif command == "LOOK AT ME": # look at me
_behaviour = 'stare_at'
# look at me
pass
elif command == "SIT": # Go to your charger
_behaviour = 'sit'
pass
elif command == "RUN": # Play with me
_behaviour = 'run'
pass
elif command == "WALK": # Look at me
_behaviour = 'walk'
pass
elif command == "TURN": # Go foward/ left/ right/ stop
_behaviour = 'turn'
pass
elif command == "RELAX": # Are you sleepy?
_behaviour = 'lie_down'
pass
elif command == "STOP": # Be quiet
_behaviour = 'stop'
pass
elif command == "COME HERE": # Find your toy
_behaviour = 'walk_towards'
pass
else:
_behaviour = "lower_sound"
pass
return _behaviour
def process_vision(self, input_data):
if type(input_data) != dict or len(input_data)!= 1:
return "error process vision", 0
command = ''
for i in input_data:
command = i
_behaviour = None
_data = None
if command == 'human':
_behaviour = ai.parameters.BEHAVIOURS['ita_human'][self.get_rand(4,2)]
_data = input_data[command][0][0] # coords
elif command == 'qrcode':
_behaviour = 'qrcode'
_data = command[1]
elif command == 'obj':
obj_id = input_data[command][0]
if obj_id == 0: #high and teaser
_behaviour = 'flap_obj'
else:
_behaviour = 'pre_attack'
_data = input_data[command][1] #coord
return _behaviour,_data
def process_random_behaviour(self):
_behaviour = None
_rad = self.get_rand()
_rad_2 = self.get_rand()
if _rad > 0.15: # relax
if _rad_2 > 0.75: # sit
_behaviour = 'lie_down'
elif _rad_2 < 0.7: # lie_down
_behaviour = 'sit'
else: # stand
_behaviour = 'stand'
elif _rad < 0.1: # move
if _rad_2 > 0.3: # walk
_behaviour = 'walk'
elif _rad_2 <0.1:
_behaviour = 'turn'
else:
_behaviour = 'run'
else: # play
_behaviour = ai.parameters.BEHAVIOURS['play'][self.get_rand(4,3)]
return _behaviour
def get_rand(self, num_type = 0, _data=0):
# 0 is for random
# 1 is for normal
# 4 is for choice / swtich
if num_type == 0:
return np.random.random() # 0~1 percentage
elif num_type == 1:
return abs(np.random.normal(0,1)) # 68% in 1; 95% in 2
elif num_type == 2:
return np.random.normal(0,1) # 68% in 1; 95% in 2
elif num_type == 3:
return np.random.gamma(1,1) # 90% 0~2; 10% bigger than 2
elif num_type == 4:
return int(np.random.random()*_data) # 0,1,2 .. data-1
else:
return 0
def check_behaviour_in_behaviours(self,_behaviour, _behaviour_group):
if type(_behaviour_group) is str:
if _behaviour in ai.parameters.BEHAVIOURS[_behaviour_group]:
return True
else:
return False
elif type(_behaviour_group) is list:
for i in _behaviour_group:
if _behaviour in ai.parameters.BEHAVIOURS[i]:
return True
return False
else:
print ('Not in the group')
return False
pass
|
[
"sys.path.append",
"copy.deepcopy",
"time.time",
"numpy.random.gamma",
"numpy.random.random",
"numpy.random.normal"
] |
[((55, 75), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (70, 75), False, 'import sys\n'), ((538, 549), 'time.time', 'time.time', ([], {}), '()\n', (547, 549), False, 'import time\n'), ((2811, 2835), 'copy.deepcopy', 'copy.deepcopy', (['behaviour'], {}), '(behaviour)\n', (2824, 2835), False, 'import copy\n'), ((7335, 7353), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7351, 7353), True, 'import numpy as np\n'), ((7425, 7447), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (7441, 7447), True, 'import numpy as np\n'), ((7519, 7541), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (7535, 7541), True, 'import numpy as np\n'), ((7612, 7633), 'numpy.random.gamma', 'np.random.gamma', (['(1)', '(1)'], {}), '(1, 1)\n', (7627, 7633), True, 'import numpy as np\n'), ((7721, 7739), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7737, 7739), True, 'import numpy as np\n')]
|
"""
This module contains mathematically focused functions
"""
import numpy as np
from math import sin, cos
def normalise(vector):
"""Return a normalised vector"""
return vector / np.linalg.norm(vector)
def rotZ(theta):
"""
Return rotation matrix that rotates with repect to z axis with theta degress
"""
rot = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
return rot
def rotedEpsilon(Epsilon, theta):
"""Return an epsilon of an material rotated by theta degree with z as the rotation matrix"""
return rotZ(theta).dot(Epsilon.dot(rotZ(-theta)))
def rotVTheta(v, theta):
"""Return the rotation matrix for rotation against a unit vector v and angel theta"""
v = normalise(v) # we first normalise the vector
w = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
return np.cos(theta) * np.identity(3) + np.sin(theta) * w + (1 - np.cos(theta)) *\
np.outer(v,v)
def stackDot(array):
"""
Calculate the overall transfer matrix from a stack of arrays in the increasing
z direction. e.g. stack start at z=0
Psi(zb) = P_(zb, z_{N-1}) * ... * P(z1,zf) * Psi(zf)
= P(zb,zf) * Psi(zf)
"""
product = np.identity(len(array[0]))
for i in array:
product = product.dot(i)
return product
def buildDeltaMatrix(eps, Kx):
"""Returns Delta matrix for given relative permitivity and reduced wave number.
'Kx' : reduced wave number, Kx = kx/k0
'eps' : relative permitivity tensor
Returns : Delta 4x4 matrix, generator of infinitesimal translations
"""
return np.array([[
-Kx * eps[2, 0] / eps[2, 2], -Kx * eps[2, 1] / eps[2, 2], 0,
1 - Kx**2 / eps[2, 2]
], [0, 0, -1, 0], [
eps[1, 2] * eps[2, 0] / eps[2, 2] - eps[1, 0],
Kx**2 - eps[1, 1] + eps[1, 2] * eps[2, 1] / eps[2, 2], 0,
Kx * eps[1, 2] / eps[2, 2]
], [
eps[0, 0] - eps[0, 2] * eps[2, 0] / eps[2, 2],
eps[0, 1] - eps[0, 2] * eps[2, 1] / eps[2, 2], 0,
-Kx * eps[0, 2] / eps[2, 2]
]])
def rotXY(theta):
"""Return a roation matrix in 2D. Used in Jones Calculus"""
R = np.array([[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
return R
def polariserJ(theta):
"""Return the Jones matrix of a linear polariser"""
R = rotXY(theta)
Ri = rotXY(-theta)
J = np.array([[1, 0], [0, 0]])
return Ri.dot(J.dot(R))
def vectorFromTheta(theta):
"""
Return a unit vector in XY plane given the value of theta
'theta' : a list of angles to be calculated
return a 3xN array of N vectors calcuated
"""
X = np.cos(theta)
Y = np.sin(theta)
Z = np.zeros(len(theta))
return np.array([X, Y, Z])
######################DEPRECATED########################
# For the depricated Yeh's methods
def construct_epsilon_heli(epsilon_diag,
pitch,
divisions,
thickness,
handness="left"):
"""
construct the dielectric matrices of all layers
return a N*3*3 array where N is the number of layers
We define pitch to be the distance such the rotation is 180 degree e.g. apparant
period in z direction
"""
if pitch == thickness:
angles = np.linspace(0, -np.pi, divisions, endpoint=False)
elif pitch > thickness:
angles = np.linspace(
0, -np.pi * thickness / pitch, divisions, endpoint=False)
else:
raise NameError('Need thickness to be smaller than pitch')
return np.array(
[rotZ(i).dot(epsilon_diag.dot(rotZ(-i))) for i in angles])
def calc_c(e, a, b, u=1): # Check units
"""
calculate the z components of 4 partial waves in medium
e: dielectric tensor
a,b: components of wavevector in direction of x and y direction
return a list containting 4 roots for the z components of the partial waves
"""
# assign names
x = e * u
x11, x12, x13 = x[0]
x21, x22, x23 = x[1]
x31, x32, x33 = x[2]
# calculate the coeffciency based on symbolic expression
coef4 = x33
coef3 = a * x13 + a * x31 + b * x23 + b * x32
coef2 = a**2*x11 + a**2*x33 + a*b*x12 + a*b*x21 + b**2*x22 + b**2*x33 - \
x11*x33 + x13*x31 - x22*x33 + x23*x32
coef1 = a**3*x13 + a**3*x31 + a**2*b*x23 + a**2*b*x32 + a*b**2*x13 + \
a*b**2*x31 + a*x12*x23 - a*x13*x22 + a*x21*x32 - a*x22*x31 + b**3*x23 \
+ b**3*x32 - b*x11*x23 - b*x11*x32 + b*x12*x31 + b*x13*x21
coef0 = a**4*x11 + a**3*b*x12 + a**3*b*x21 + a**2*b**2*x11 + a**2*b**2*x22 \
- a**2*x11*x22 - a**2*x11*x33 + a**2*x12*x21 + a**2*x13*x31 + a*b**3*x12 + \
a*b**3*x21 - a*b*x12*x33 + a*b*x13*x32 - a*b*x21*x33 + a*b*x23*x31 + \
b**4*x22 - b**2*x11*x22 + b**2*x12*x21 - b**2*x22*x33 + b**2*x23*x32 + \
x11*x22*x33 - x11*x23*x32 - x12*x21*x33 + x12*x23*x31 + x13*x21*x32 - \
x13*x22*x31
# calculate the roots of the quartic equation
c = np.roots([coef4, coef3, coef2, coef1, coef0])
if len(c) == 2:
return np.append(c, c)
return c
def calc_k(e, a, b, u=1):
"""
A wrapper to calcualte k vector
"""
c = calc_c(e, a, b, u)
return np.array([[a, b, c[0]], [a, b, c[1]], [a, b, c[2]], [a, b, c[3]]])
def calc_p(e, k, u=1): #something is wrong with this function. Not giving
#correct directions
"""
Calculate the polarisation vector based on the calculated wavevector and frequency
equation(9.7-5)
e: dielectric tensor
k: 4x3 array of 4 k vectors
"""
x = e * u
p = []
x11, x12, x13 = x[0]
x21, x22, x23 = x[1]
x31, x32, x33 = x[2]
for i in k:
a = i[0]
b = i[1]
c = i[2]
coeff_m = np.array([[x11 - b**2 - c**2, x12 + a * b, x13 + a * c],
[x21 + a * b, x22 - a**2 - c**2, x23 + b * c],
[x31 + a * c, x32 + b * c, x33 - a**2 - b**2]])
# The function seems to return the normalised null spcae vector
p.append(null(coeff_m))
return np.array(p)
def calc_q(k, p, u=1):
"""
calcualte the direction of q vector based on the k and q vectors given
k: an 4x3 array of 4 k vectors
p: an 4x3 array of 4 p vectors
return a 4x3 array of 4 q vectors
use a special unit for the magnetic field such that c/2pi/mu_0 = 1
note these vectors are not normlised
"""
return np.cross(k, p) / u
def calc_D(p, q):
return np.array([p[:, 0], q[:, 1], p[:, 1], q[:, 0]])
def calc_P(k, t):
return np.diag(np.exp(1j * t * k[:, 2]))
def construct_D(e, a, b, omega, u=1):
"""
construct the dynamic matrix for one layer with know dielectric tensor
"""
k = calc_k(e, a, b, omega, u)
p = calc_p(e, k, omega, u)
q = calc_q(k, p, omega, u)
return calc_D(p, q)
def null(A, eps=1e-14):
"""
Return the null vector of matrix A, usefull for calculate the p vector with
known k vector
"""
u, s, vh = np.linalg.svd(A)
null_mask = (s <= eps)
# relax the threshold if no null singularity is identified
if null_mask.any() == False:
return null(A, eps * 10)
null_space = np.compress(null_mask, vh, axis=0).flatten()
return np.transpose(null_space)
def incident_p(k):
"""
Calculate the 4 polarisation vectors based on the incident wave wave vector.
Assuming the medium is isotropic and polarastion is splited into s and p
k is a 3-vector
return a array of ps+,ps-,pp+,pp-
"""
# For normal incidence, fix the direction of polarisation
# p is aligned with x and s is aligned with y
# note the p vector is reversed for reflected wave otherwise p,s,k don't form
# a right hand set
if k[0] == 0 and k[1] == 0:
return np.array([[0, 1, 0], [0, 1, 0], [1, 0, 0], [-1, 0, 0]])
# calculate the polarisation vectors see lab book for defined geometries
# Note the normal vector is [0,0,-1] as the incident wave is traving in postive
# z direction
si = normalise(np.cross(k, [0, 0, -1]))
sr = si
pi = normalise(np.cross(si, k))
pr = normalise(np.cross(sr, [k[0], k[1], -k[2]]))
# return a 4x3 array of the four polarisation vectors
return np.array([si, sr, pi, pr])
def calc_coeff(T):
"""
Given the transfer matrix calculate the transmission and reflection coefficients
Not currently in use
"""
deno = (T[0, 0] * T[2, 2] - T[0, 2] * T[2, 0])
rss = (T[1, 0] * T[2, 2] - T[1, 2] * T[2, 0]) / deno
rsp = (T[3, 0] * T[2, 2] - T[3, 2] * T[2, 0]) / deno
rps = (T[0, 0] * T[1, 2] - T[1, 0] * T[0, 2]) / deno
rpp = (T[0, 0] * T[3, 2] - T[3, 0] * T[0, 2]) / deno
tss = T[2, 2] / deno
tsp = -T[2, 0] / deno
tps = -T[0, 2] / deno
tpp = T[0, 0] / deno
return {
"rss": rss,
"rsp": rsp,
"rps": rps,
"rpp": rpp,
"tss": tss,
"tsp": tsp,
"tps": tps,
"tpp": tpp
}
def calc_coupling_matrices(T):
"""
Calculate the coupling matrix between reflected/transmitted light and incident light
T is the overall transfer matrix of the system. Return a dictionary of coupling matrice
Indice are always in the order of s,p or L,R
Note p direction is aligned with x and s is aligned with y in the frame that
wave is traveling in the z direction. Refer to geometry guideline in the lab
book.
"""
# Build the coupling matrice between transmitted light and incident/reflected light
T_ti = np.array([[T[0, 0], T[0, 2]], [T[2, 0], T[2, 2]]])
T_tr = np.array([[T[1, 0], T[1, 2]], [T[3, 0], T[3, 2]]])
# Connect reflected light to incident light using the coupling to transmitted light
T_ir = np.linalg.solve(T_ti, T_tr)
T_it = np.linalg.inv(T_ti)
# Switching to circular polarisation
# Coupling matrix between planar and circular polarsiation T_cp * [L,R] = [S,P]
T_cp = np.array([[1j, -1j], [1, 1]]) * np.sqrt(1 / 2)
T_ir_c = np.linalg.solve(T_cp, T_ir.dot(T_cp))
T_it_c = np.linalg.solve(T_cp, T_it.dot(T_cp))
coupling_matrices = {
"Plane_r": T_ir,
"Plane_t": T_it,
"Circular_r": T_ir_c,
"Circular_t": T_it_c
}
return coupling_matrices
if __name__ == '__main__':
e = np.diag([1, 1, 1])
print(calc_c(e, 1, 1, 1))
|
[
"numpy.roots",
"numpy.linalg.svd",
"numpy.sin",
"numpy.linalg.norm",
"numpy.exp",
"numpy.diag",
"numpy.linalg.solve",
"numpy.transpose",
"numpy.identity",
"numpy.append",
"math.cos",
"numpy.linspace",
"numpy.cross",
"math.sin",
"numpy.linalg.inv",
"numpy.cos",
"numpy.compress",
"numpy.outer",
"numpy.array",
"numpy.sqrt"
] |
[((833, 897), 'numpy.array', 'np.array', (['[[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]'], {}), '([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])\n', (841, 897), True, 'import numpy as np\n'), ((1666, 2065), 'numpy.array', 'np.array', (['[[-Kx * eps[2, 0] / eps[2, 2], -Kx * eps[2, 1] / eps[2, 2], 0, 1 - Kx ** 2 /\n eps[2, 2]], [0, 0, -1, 0], [eps[1, 2] * eps[2, 0] / eps[2, 2] - eps[1, \n 0], Kx ** 2 - eps[1, 1] + eps[1, 2] * eps[2, 1] / eps[2, 2], 0, Kx *\n eps[1, 2] / eps[2, 2]], [eps[0, 0] - eps[0, 2] * eps[2, 0] / eps[2, 2],\n eps[0, 1] - eps[0, 2] * eps[2, 1] / eps[2, 2], 0, -Kx * eps[0, 2] / eps\n [2, 2]]]'], {}), '([[-Kx * eps[2, 0] / eps[2, 2], -Kx * eps[2, 1] / eps[2, 2], 0, 1 -\n Kx ** 2 / eps[2, 2]], [0, 0, -1, 0], [eps[1, 2] * eps[2, 0] / eps[2, 2] -\n eps[1, 0], Kx ** 2 - eps[1, 1] + eps[1, 2] * eps[2, 1] / eps[2, 2], 0, \n Kx * eps[1, 2] / eps[2, 2]], [eps[0, 0] - eps[0, 2] * eps[2, 0] / eps[2,\n 2], eps[0, 1] - eps[0, 2] * eps[2, 1] / eps[2, 2], 0, -Kx * eps[0, 2] /\n eps[2, 2]]])\n', (1674, 2065), True, 'import numpy as np\n'), ((2425, 2451), 'numpy.array', 'np.array', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (2433, 2451), True, 'import numpy as np\n'), ((2692, 2705), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2698, 2705), True, 'import numpy as np\n'), ((2714, 2727), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2720, 2727), True, 'import numpy as np\n'), ((2768, 2787), 'numpy.array', 'np.array', (['[X, Y, Z]'], {}), '([X, Y, Z])\n', (2776, 2787), True, 'import numpy as np\n'), ((5035, 5080), 'numpy.roots', 'np.roots', (['[coef4, coef3, coef2, coef1, coef0]'], {}), '([coef4, coef3, coef2, coef1, coef0])\n', (5043, 5080), True, 'import numpy as np\n'), ((5263, 5329), 'numpy.array', 'np.array', (['[[a, b, c[0]], [a, b, c[1]], [a, b, c[2]], [a, b, c[3]]]'], {}), '([[a, b, c[0]], [a, b, c[1]], [a, b, c[2]], [a, b, c[3]]])\n', (5271, 5329), True, 'import numpy as np\n'), ((6123, 6134), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (6131, 6134), True, 'import numpy as np\n'), ((6539, 6585), 'numpy.array', 'np.array', (['[p[:, 0], q[:, 1], p[:, 1], q[:, 0]]'], {}), '([p[:, 0], q[:, 1], p[:, 1], q[:, 0]])\n', (6547, 6585), True, 'import numpy as np\n'), ((7058, 7074), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (7071, 7074), True, 'import numpy as np\n'), ((7305, 7329), 'numpy.transpose', 'np.transpose', (['null_space'], {}), '(null_space)\n', (7317, 7329), True, 'import numpy as np\n'), ((8297, 8323), 'numpy.array', 'np.array', (['[si, sr, pi, pr]'], {}), '([si, sr, pi, pr])\n', (8305, 8323), True, 'import numpy as np\n'), ((9580, 9630), 'numpy.array', 'np.array', (['[[T[0, 0], T[0, 2]], [T[2, 0], T[2, 2]]]'], {}), '([[T[0, 0], T[0, 2]], [T[2, 0], T[2, 2]]])\n', (9588, 9630), True, 'import numpy as np\n'), ((9642, 9692), 'numpy.array', 'np.array', (['[[T[1, 0], T[1, 2]], [T[3, 0], T[3, 2]]]'], {}), '([[T[1, 0], T[1, 2]], [T[3, 0], T[3, 2]]])\n', (9650, 9692), True, 'import numpy as np\n'), ((9792, 9819), 'numpy.linalg.solve', 'np.linalg.solve', (['T_ti', 'T_tr'], {}), '(T_ti, T_tr)\n', (9807, 9819), True, 'import numpy as np\n'), ((9831, 9850), 'numpy.linalg.inv', 'np.linalg.inv', (['T_ti'], {}), '(T_ti)\n', (9844, 9850), True, 'import numpy as np\n'), ((10344, 10362), 'numpy.diag', 'np.diag', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (10351, 10362), True, 'import numpy as np\n'), ((190, 212), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (204, 212), True, 'import numpy as np\n'), ((3358, 3407), 'numpy.linspace', 'np.linspace', (['(0)', '(-np.pi)', 'divisions'], {'endpoint': '(False)'}), '(0, -np.pi, divisions, endpoint=False)\n', (3369, 3407), True, 'import numpy as np\n'), ((5116, 5131), 'numpy.append', 'np.append', (['c', 'c'], {}), '(c, c)\n', (5125, 5131), True, 'import numpy as np\n'), ((5798, 5970), 'numpy.array', 'np.array', (['[[x11 - b ** 2 - c ** 2, x12 + a * b, x13 + a * c], [x21 + a * b, x22 - a **\n 2 - c ** 2, x23 + b * c], [x31 + a * c, x32 + b * c, x33 - a ** 2 - b ** 2]\n ]'], {}), '([[x11 - b ** 2 - c ** 2, x12 + a * b, x13 + a * c], [x21 + a * b, \n x22 - a ** 2 - c ** 2, x23 + b * c], [x31 + a * c, x32 + b * c, x33 - a **\n 2 - b ** 2]])\n', (5806, 5970), True, 'import numpy as np\n'), ((6488, 6502), 'numpy.cross', 'np.cross', (['k', 'p'], {}), '(k, p)\n', (6496, 6502), True, 'import numpy as np\n'), ((6625, 6651), 'numpy.exp', 'np.exp', (['(1.0j * t * k[:, 2])'], {}), '(1.0j * t * k[:, 2])\n', (6631, 6651), True, 'import numpy as np\n'), ((7847, 7902), 'numpy.array', 'np.array', (['[[0, 1, 0], [0, 1, 0], [1, 0, 0], [-1, 0, 0]]'], {}), '([[0, 1, 0], [0, 1, 0], [1, 0, 0], [-1, 0, 0]])\n', (7855, 7902), True, 'import numpy as np\n'), ((8101, 8124), 'numpy.cross', 'np.cross', (['k', '[0, 0, -1]'], {}), '(k, [0, 0, -1])\n', (8109, 8124), True, 'import numpy as np\n'), ((8157, 8172), 'numpy.cross', 'np.cross', (['si', 'k'], {}), '(si, k)\n', (8165, 8172), True, 'import numpy as np\n'), ((8193, 8226), 'numpy.cross', 'np.cross', (['sr', '[k[0], k[1], -k[2]]'], {}), '(sr, [k[0], k[1], -k[2]])\n', (8201, 8226), True, 'import numpy as np\n'), ((9987, 10020), 'numpy.array', 'np.array', (['[[1.0j, -1.0j], [1, 1]]'], {}), '([[1.0j, -1.0j], [1, 1]])\n', (9995, 10020), True, 'import numpy as np\n'), ((10019, 10033), 'numpy.sqrt', 'np.sqrt', (['(1 / 2)'], {}), '(1 / 2)\n', (10026, 10033), True, 'import numpy as np\n'), ((989, 1003), 'numpy.outer', 'np.outer', (['v', 'v'], {}), '(v, v)\n', (997, 1003), True, 'import numpy as np\n'), ((3453, 3522), 'numpy.linspace', 'np.linspace', (['(0)', '(-np.pi * thickness / pitch)', 'divisions'], {'endpoint': '(False)'}), '(0, -np.pi * thickness / pitch, divisions, endpoint=False)\n', (3464, 3522), True, 'import numpy as np\n'), ((7249, 7283), 'numpy.compress', 'np.compress', (['null_mask', 'vh'], {'axis': '(0)'}), '(null_mask, vh, axis=0)\n', (7260, 7283), True, 'import numpy as np\n'), ((350, 363), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (356, 363), True, 'import numpy as np\n'), ((406, 419), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (412, 419), True, 'import numpy as np\n'), ((421, 434), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (427, 434), True, 'import numpy as np\n'), ((909, 922), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (915, 922), True, 'import numpy as np\n'), ((925, 939), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (936, 939), True, 'import numpy as np\n'), ((942, 955), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (948, 955), True, 'import numpy as np\n'), ((967, 980), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (973, 980), True, 'import numpy as np\n'), ((2226, 2236), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2229, 2236), False, 'from math import sin, cos\n'), ((2238, 2248), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2241, 2248), False, 'from math import sin, cos\n'), ((2265, 2275), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2268, 2275), False, 'from math import sin, cos\n'), ((366, 379), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (372, 379), True, 'import numpy as np\n'), ((2253, 2263), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2256, 2263), False, 'from math import sin, cos\n')]
|
import io
import re
import numpy as np
class NamedPoints():
def __init__(self, fl):
data = np.genfromtxt(fl, dtype=None)
self.xyz = np.array([[l[1], l[2], l[3]] for l in data])
self.names = [l[0].decode('ascii') for l in data]
self.name_to_xyz = dict(zip(self.names, self.xyz))
class Contacts(NamedPoints):
contact_single_regex = re.compile("^([A-Za-z]+[']?)([0-9]+)$")
contact_pair_regex_1 = re.compile("^([A-Za-z]+[']?)([0-9]+)-([0-9]+)$")
contact_pair_regex_2 = re.compile("^([A-Za-z]+[']?)([0-9]+)-([A-Za-z]+[']?)([0-9]+)$")
def __init__(self, filename):
super().__init__(filename)
self.electrodes = {}
for i, name in enumerate(self.names):
match = self.contact_single_regex.match(name)
if match is None:
raise ValueError("Unexpected contact name %s" % name)
elec_name, _ = match.groups()
if elec_name not in self.electrodes:
self.electrodes[elec_name] = []
self.electrodes[elec_name].append(i)
def get_elec(self, name):
match = self.contact_single_regex.match(name)
if match is None:
return None
return match.groups()[0]
def get_coords(self, name):
"""Get the coordinates of a specified contact or contact pair. Allowed formats are:
A1 : Single contact.
A1-2 or A1-A2 : Contact pair. The indices must be adjacent.
Examples:
>>> np.set_printoptions(formatter={'float': lambda x: "{0:0.1f}".format(x)})
>>> contacts = Contacts(io.BytesIO("A1 0.0 0.0 1.0\\nA2 0.0 0.0 2.0".encode()))
>>> contacts.get_coords("A1")
array([0.0, 0.0, 1.0])
>>> contacts.get_coords("A1-2")
array([0.0, 0.0, 1.5])
>>> contacts.get_coords("A2-A1")
array([0.0, 0.0, 1.5])
"""
match = self.contact_single_regex.match(name)
if match is not None:
return self.name_to_xyz[name]
match = self.contact_pair_regex_1.match(name)
if match is not None:
assert abs(int(match.group(2)) - int(match.group(3))) == 1
contact1 = match.group(1) + match.group(2)
contact2 = match.group(1) + match.group(3)
return (self.name_to_xyz[contact1] + self.name_to_xyz[contact2])/2.
match = self.contact_pair_regex_2.match(name)
if match is not None:
assert match.group(1) == match.group(3)
assert abs(int(match.group(2)) - int(match.group(4))) == 1
contact1 = match.group(1) + match.group(2)
contact2 = match.group(3) + match.group(4)
return (self.name_to_xyz[contact1] + self.name_to_xyz[contact2])/2.
raise ValueError("Given name '%s' does not follow any expected pattern." % name)
|
[
"numpy.array",
"numpy.genfromtxt",
"re.compile"
] |
[((374, 413), 're.compile', 're.compile', (['"""^([A-Za-z]+[\']?)([0-9]+)$"""'], {}), '("^([A-Za-z]+[\']?)([0-9]+)$")\n', (384, 413), False, 'import re\n'), ((441, 489), 're.compile', 're.compile', (['"""^([A-Za-z]+[\']?)([0-9]+)-([0-9]+)$"""'], {}), '("^([A-Za-z]+[\']?)([0-9]+)-([0-9]+)$")\n', (451, 489), False, 'import re\n'), ((517, 580), 're.compile', 're.compile', (['"""^([A-Za-z]+[\']?)([0-9]+)-([A-Za-z]+[\']?)([0-9]+)$"""'], {}), '("^([A-Za-z]+[\']?)([0-9]+)-([A-Za-z]+[\']?)([0-9]+)$")\n', (527, 580), False, 'import re\n'), ((106, 135), 'numpy.genfromtxt', 'np.genfromtxt', (['fl'], {'dtype': 'None'}), '(fl, dtype=None)\n', (119, 135), True, 'import numpy as np\n'), ((155, 199), 'numpy.array', 'np.array', (['[[l[1], l[2], l[3]] for l in data]'], {}), '([[l[1], l[2], l[3]] for l in data])\n', (163, 199), True, 'import numpy as np\n')]
|
import math
import numpy as np
from numerical_analysis.splines.bezier import Bezier
from numerical_analysis.dependencies import Polynomial
from numerical_analysis.root_finding import newton_raphson_2x2
from numerical_analysis.dependencies.geometry import StraightLine, Circle
from output_lib.csv_lib import ScvExporter
from output_lib.plot_lib import PlotExporter
from output_lib.screen_lib import ScreenPrinter
# Provides a more appropriate parameterization for the specific application
class CustomCircle(Circle):
def x_t(self, t): return self.R * math.cos(math.pi - 2 * math.pi * t) + self.C[0]
def y_t(self, t): return self.R * math.sin(math.pi - 2 * math.pi * t) + self.C[1]
class CircleApproacher:
def __init__(self, circle_parameters, initial_parameters, num_of_parameters=5):
self.r = circle_parameters["radius"]
self.c = circle_parameters["center"]
self.circle = CustomCircle(self.c, self.r)
self.circle_graph = self.circle.graph(0.01)
self.line = StraightLine([[0, [0, 0]], [1, [1, 1]]])
self.parameters = initial_parameters
self.bezier = self.initialize_bezier()
self.num_of_parameters = num_of_parameters
def initialize_bezier(self):
a, b, c, d, e = self.parameters
cp = np.array([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])
return Bezier(cp)
def refresh_bezier(self):
a, b, c, d, e = self.parameters
CP = np.array([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])
self.bezier.refresh_control_points(CP)
def refresh_parameters(self, new_parameters):
self.parameters = new_parameters
def least_squares(self, point_pairs):
def p0(t): return Polynomial(np.array([1, -5, 10, -10, 5])).value(t)
def p1(t): return Polynomial(np.array([0, 1, -4, 6, -3])).value(t)
def p2(t): return Polynomial(np.array([0, 0, 1, -2, 1])).value(t)
def p3(t): return Polynomial(np.array([0, 1, -4, 6, -5, 2])).value(t)
def p4(t): return Polynomial(np.array([0, 0, 1, -4, 5, -2])).value(t)
def sigma1(f, g, table): return sum([f(table[i][0]) * g(table[i][0]) for i in range(len(table))])
def sigma2(f, index, table): return sum([table[i][1][0][index] * f(table[i][0]) for i in range(len(table))])
if self.num_of_parameters == 5:
A11 = sigma1(p0, p0, point_pairs)
A12 = sigma1(p0, p1, point_pairs)
A13 = sigma1(p0, p2, point_pairs)
A21 = A12
A22 = sigma1(p1, p1, point_pairs)
A23 = sigma1(p1, p2, point_pairs)
A31 = A13
A32 = A23
A33 = sigma1(p2, p2, point_pairs)
A = [[A11, A12, A13],
[A21, A22, A23],
[A31, A32, A33]]
B11 = sigma2(p0, 0, point_pairs)
B21 = sigma2(p1, 0, point_pairs)
B31 = sigma2(p2, 0, point_pairs)
B = [B11, B21, B31]
solution_0 = np.linalg.solve(A, B)
a_new = solution_0[0]
b_new = solution_0[1] / 5
d_new = solution_0[2] / 10
elif self.num_of_parameters == 3:
a_new = 0
b_new = 0
d_new = 0.1 * sigma2(p2, 0, point_pairs) / sigma1(p2, p2, point_pairs)
else: return
C11 = sigma1(p3, p3, point_pairs)
C12 = sigma1(p3, p4, point_pairs)
C21 = C12
C22 = sigma1(p4, p4, point_pairs)
C = [[C11, C12], [C21, C22]]
D11 = sigma2(p3, 1, point_pairs)
D21 = sigma2(p4, 1, point_pairs)
D = [D11, D21]
solution_1 = np.linalg.solve(C, D)
c_new = solution_1[0] / 5
e_new = solution_1[1] / 10
return [a_new, b_new, c_new, d_new, e_new]
def calculate_point_pairs(self, d_phi):
def dx(tb, tl): return self.bezier.x_t(tb) - self.line.x_t(tl)
def dy(tb, tl): return self.bezier.y_t(tb) - self.line.y_t(tl)
def dxb(tb, tl): return Polynomial(self.bezier.c[0]).derivative().value(tb)
def dxl(tb, tl): return - Polynomial(self.line.c[0]).derivative().value(tl)
def dyb(tb, tl): return Polynomial(self.bezier.c[1]).derivative().value(tb)
def dyl(tb, tl): return - Polynomial(self.line.c[1]).derivative().value(tl)
point_pairs = []
dt = d_phi / (2 * math.pi)
for t in np.arange(0., 1., dt):
self.line.modify_points([[0, [self.r, 0]], [1, self.circle.point_t(t)]])
tb, tl = newton_raphson_2x2(dx, dy, dxb, dxl, dyb, dyl, t, 1, 1.e-12)
K = self.line.point_t(1)
Q = self.bezier.point_t(tb)
point_pairs.append([tb, [K, Q]])
return point_pairs
@staticmethod
def error_function(point_pairs, divisions):
# 1st Approach
# Ex = sum([(pair[1][0][0] - pair[1][1][0]) ** 2 for pair in point_pairs])
# Ey = sum([(pair[1][0][1] - pair[1][1][1]) ** 2 for pair in point_pairs])
# return math.sqrt(Ex + Ey)
# 2nd Approach
return sum([math.sqrt((pair[1][0][0] - pair[1][1][0]) ** 2 + (pair[1][0][1] - pair[1][1][1]) ** 2)
for pair in point_pairs]) / divisions
def solve(self, d_phi, iterations=3000, error=1e-12, csv_fname=None, plots_path=None):
def convergence():
nonlocal new_parameters
for i in range(len(self.parameters)):
if abs(self.parameters[i] - new_parameters[i]) > error:
return False
return True
def create_csv():
nonlocal csv_exporter
csv_exporter.create_csv()
csv_exporter.write_headers("Iter", "Parameter a", "Parameter b", "Parameter c", "Parameter d",
"Parameter e", "Error Function")
def give_output(i, error_f):
screen_printer.print_results(i, self.parameters, error_f)
if csv_fname:
csv_exporter.append_row(i, *self.parameters, error_f)
if plots_path and (i < 20 or i % 10 == 0.):
bezier_graph = self.bezier.graph(0.01)
plot_exporter.create_plot(self.circle_graph, bezier_graph,
title="Approach Circle w/ Bezier (Iteration {})".format(i), axes_equal=True)
plot_exporter.export_plot("gen_{}".format(str(i).zfill(4)))
divisions = 2 * math.pi / d_phi
csv_exporter = None
if csv_fname:
csv_exporter = ScvExporter(csv_fname, r"results/")
create_csv()
screen_printer = ScreenPrinter()
plot_exporter = None
if plots_path:
plot_exporter = PlotExporter(plots_path)
for i in range(iterations):
point_pairs = self.calculate_point_pairs(d_phi)
error_f = self.error_function(point_pairs, divisions)
give_output(i, error_f)
new_parameters = self.least_squares(point_pairs)
if convergence():
self.refresh_parameters(new_parameters)
point_pairs = self.calculate_point_pairs(d_phi)
error_f = self.error_function(point_pairs, divisions)
give_output(i, error_f)
break
else:
self.refresh_parameters(new_parameters)
self.refresh_bezier()
|
[
"numerical_analysis.root_finding.newton_raphson_2x2",
"numerical_analysis.dependencies.Polynomial",
"output_lib.plot_lib.PlotExporter",
"output_lib.csv_lib.ScvExporter",
"math.sqrt",
"numerical_analysis.splines.bezier.Bezier",
"output_lib.screen_lib.ScreenPrinter",
"math.sin",
"numerical_analysis.dependencies.geometry.StraightLine",
"numpy.array",
"numpy.arange",
"math.cos",
"numpy.linalg.solve"
] |
[((1016, 1056), 'numerical_analysis.dependencies.geometry.StraightLine', 'StraightLine', (['[[0, [0, 0]], [1, [1, 1]]]'], {}), '([[0, [0, 0]], [1, [1, 1]]])\n', (1028, 1056), False, 'from numerical_analysis.dependencies.geometry import StraightLine, Circle\n'), ((1287, 1347), 'numpy.array', 'np.array', (['[[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]]'], {}), '([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])\n', (1295, 1347), True, 'import numpy as np\n'), ((1363, 1373), 'numerical_analysis.splines.bezier.Bezier', 'Bezier', (['cp'], {}), '(cp)\n', (1369, 1373), False, 'from numerical_analysis.splines.bezier import Bezier\n'), ((1458, 1518), 'numpy.array', 'np.array', (['[[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]]'], {}), '([[a, 0], [b, c], [d, e], [d, -e], [b, -c], [a, 0]])\n', (1466, 1518), True, 'import numpy as np\n'), ((3629, 3650), 'numpy.linalg.solve', 'np.linalg.solve', (['C', 'D'], {}), '(C, D)\n', (3644, 3650), True, 'import numpy as np\n'), ((4376, 4399), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', 'dt'], {}), '(0.0, 1.0, dt)\n', (4385, 4399), True, 'import numpy as np\n'), ((6601, 6616), 'output_lib.screen_lib.ScreenPrinter', 'ScreenPrinter', ([], {}), '()\n', (6614, 6616), False, 'from output_lib.screen_lib import ScreenPrinter\n'), ((2990, 3011), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (3005, 3011), True, 'import numpy as np\n'), ((4505, 4564), 'numerical_analysis.root_finding.newton_raphson_2x2', 'newton_raphson_2x2', (['dx', 'dy', 'dxb', 'dxl', 'dyb', 'dyl', 't', '(1)', '(1e-12)'], {}), '(dx, dy, dxb, dxl, dyb, dyl, t, 1, 1e-12)\n', (4523, 4564), False, 'from numerical_analysis.root_finding import newton_raphson_2x2\n'), ((6515, 6549), 'output_lib.csv_lib.ScvExporter', 'ScvExporter', (['csv_fname', '"""results/"""'], {}), "(csv_fname, 'results/')\n", (6526, 6549), False, 'from output_lib.csv_lib import ScvExporter\n'), ((6698, 6722), 'output_lib.plot_lib.PlotExporter', 'PlotExporter', (['plots_path'], {}), '(plots_path)\n', (6710, 6722), False, 'from output_lib.plot_lib import PlotExporter\n'), ((558, 593), 'math.cos', 'math.cos', (['(math.pi - 2 * math.pi * t)'], {}), '(math.pi - 2 * math.pi * t)\n', (566, 593), False, 'import math\n'), ((644, 679), 'math.sin', 'math.sin', (['(math.pi - 2 * math.pi * t)'], {}), '(math.pi - 2 * math.pi * t)\n', (652, 679), False, 'import math\n'), ((5053, 5144), 'math.sqrt', 'math.sqrt', (['((pair[1][0][0] - pair[1][1][0]) ** 2 + (pair[1][0][1] - pair[1][1][1]) ** 2)'], {}), '((pair[1][0][0] - pair[1][1][0]) ** 2 + (pair[1][0][1] - pair[1][1\n ][1]) ** 2)\n', (5062, 5144), False, 'import math\n'), ((1739, 1768), 'numpy.array', 'np.array', (['[1, -5, 10, -10, 5]'], {}), '([1, -5, 10, -10, 5])\n', (1747, 1768), True, 'import numpy as np\n'), ((1816, 1843), 'numpy.array', 'np.array', (['[0, 1, -4, 6, -3]'], {}), '([0, 1, -4, 6, -3])\n', (1824, 1843), True, 'import numpy as np\n'), ((1891, 1917), 'numpy.array', 'np.array', (['[0, 0, 1, -2, 1]'], {}), '([0, 0, 1, -2, 1])\n', (1899, 1917), True, 'import numpy as np\n'), ((1965, 1995), 'numpy.array', 'np.array', (['[0, 1, -4, 6, -5, 2]'], {}), '([0, 1, -4, 6, -5, 2])\n', (1973, 1995), True, 'import numpy as np\n'), ((2043, 2073), 'numpy.array', 'np.array', (['[0, 0, 1, -4, 5, -2]'], {}), '([0, 0, 1, -4, 5, -2])\n', (2051, 2073), True, 'import numpy as np\n'), ((3993, 4021), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.bezier.c[0]'], {}), '(self.bezier.c[0])\n', (4003, 4021), False, 'from numerical_analysis.dependencies import Polynomial\n'), ((4161, 4189), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.bezier.c[1]'], {}), '(self.bezier.c[1])\n', (4171, 4189), False, 'from numerical_analysis.dependencies import Polynomial\n'), ((4079, 4105), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.line.c[0]'], {}), '(self.line.c[0])\n', (4089, 4105), False, 'from numerical_analysis.dependencies import Polynomial\n'), ((4247, 4273), 'numerical_analysis.dependencies.Polynomial', 'Polynomial', (['self.line.c[1]'], {}), '(self.line.c[1])\n', (4257, 4273), False, 'from numerical_analysis.dependencies import Polynomial\n')]
|
from __future__ import division
import numpy as np
from sklearn import preprocessing as skpp
__all__ = ['pre', 'post', '_remove_constant', '_add_constant']
def pre(matrix):
"""
Take the training data and put everything needed to undo this operation later into a dictionary.
:param matrixTrain:
:return: matrixTrainPost:
preprocDict:
"""
preprocDict = {}
# constants
matrix, index, constant = _remove_constant(matrix)
preprocDict.update({'index': index, 'constant': constant})
# scale
scaler = skpp.StandardScaler().fit(matrix)
matrix = scaler.transform(matrix)
preprocDict.update({'scaler': scaler})
return matrix, preprocDict
def post(matrix, preprocDict):
"""
Given a reduced matrix, find the full and unnormalised matrix.
:param matrixPost:
preprocDict:
:return:
"""
# scale
scaler = preprocDict['scaler']
matrix = scaler.inverse_transform(matrix)
# constants
matrix = _add_constant(matrix, preprocDict['index'], preprocDict['constant'])
return matrix
def _remove_constant(matrix):
"""
Remove constant features.
:param matrix: matrix with constant features
:return: matrixR: matrix with constant features removed
index: vector of indexes where True == keep, False == constant and removed.
constants: matrix of constants removed for adding back later
"""
index = (np.var(matrix, 0) != 0)
matrix_r = np.zeros([np.shape(matrix)[0], sum(index)])
i = 0
for n in range(len(index)):
if index[n]:
matrix_r[:, i] = matrix[:, n]
i += 1
constants = matrix[0, np.invert(index)]
return matrix_r, index, constants
def _add_constant(matrix, index, constants):
"""
Add constant features back in.
:param matrix: matrix with constant features removed
index: vector of indexes where True == kept, False == constant and removed.
constants: vector of constants previously removed
:return: matrixF matrix with constant features added back in.
"""
# tile the constants for each data point
constants = np.matlib.repmat(constants, np.shape(matrix)[0], 1)
# add constants back in to a full matrix
matrixF = np.zeros((np.shape(matrix)[0], np.shape(index)[0]))
matrixF[:, index] = matrix
matrixF[:, np.invert(index)] = constants
return matrixF
|
[
"numpy.shape",
"numpy.var",
"sklearn.preprocessing.StandardScaler",
"numpy.invert"
] |
[((1521, 1538), 'numpy.var', 'np.var', (['matrix', '(0)'], {}), '(matrix, 0)\n', (1527, 1538), True, 'import numpy as np\n'), ((567, 588), 'sklearn.preprocessing.StandardScaler', 'skpp.StandardScaler', ([], {}), '()\n', (586, 588), True, 'from sklearn import preprocessing as skpp\n'), ((1756, 1772), 'numpy.invert', 'np.invert', (['index'], {}), '(index)\n', (1765, 1772), True, 'import numpy as np\n'), ((2325, 2341), 'numpy.shape', 'np.shape', (['matrix'], {}), '(matrix)\n', (2333, 2341), True, 'import numpy as np\n'), ((2507, 2523), 'numpy.invert', 'np.invert', (['index'], {}), '(index)\n', (2516, 2523), True, 'import numpy as np\n'), ((1571, 1587), 'numpy.shape', 'np.shape', (['matrix'], {}), '(matrix)\n', (1579, 1587), True, 'import numpy as np\n'), ((2419, 2435), 'numpy.shape', 'np.shape', (['matrix'], {}), '(matrix)\n', (2427, 2435), True, 'import numpy as np\n'), ((2440, 2455), 'numpy.shape', 'np.shape', (['index'], {}), '(index)\n', (2448, 2455), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
from small_text.utils.data import list_length
class DataUtilsTest(unittest.TestCase):
def test_list_length(self):
self.assertEqual(10, list_length(list(range(10))))
self.assertEqual(10, list_length(np.random.rand(10, 2)))
|
[
"numpy.random.rand"
] |
[((257, 278), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (271, 278), True, 'import numpy as np\n')]
|
import os
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import cm
from matplotlib import rcParams
from sklearn import metrics
from sklearn import tree
rcParams["font.serif"] = "Times New Roman"
rcParams["font.family"] = "serif"
dirs = dict(main="F:\\Masterarbeit\\DLR\\project\\1_truck_detection")
dirs["plots"] = os.path.join("F:" + os.sep + "Masterarbeit", "THESIS", "general", "plots")
dirs["truth"] = os.path.join(dirs["main"], "truth")
rf_file = os.path.join(dirs["main"], "code", "detect_trucks", "rf_model.pickle")
rf = pickle.load(open(rf_file, "rb"))
# read test variables and labels in order to calculate metrics
variables_list = pickle.load(open(os.path.join(dirs["truth"], "validation_variables.pickle"), "rb"))
labels_list = pickle.load(open(os.path.join(dirs["truth"], "validation_labels.pickle"), "rb"))
def plot_random_forest(rf_model, test_variables, test_labels):
test_pred = rf._predict(test_variables)
plot_confusion_matrix(metrics.confusion_matrix(test_labels, test_pred, labels=[2, 3, 4, 1]))
accuracy = metrics.accuracy_score(test_labels, test_pred)
report = metrics.classification_report(test_labels, test_pred)
labels = np.unique(test_labels)
summary = np.zeros((len(labels) + 3, 4), dtype=np.float16)
for i, label in enumerate(labels):
for j, fun in enumerate([metrics.precision_score, metrics.recall_score, metrics.f1_score]):
summary[i, j] = fun(test_labels, test_pred, average="micro", labels=[label])
summary[-3, j] = fun(test_labels, test_pred, average="macro")
summary[-2, j] = fun(test_labels, test_pred, average="weighted")
summary[i, 3] = np.count_nonzero(np.int8(test_labels) == label)
summary[-3, 3] = len(test_labels)
summary[-2, 3] = len(test_labels)
summary[-1, 3] = len(test_labels)
summary[-1, 2] = metrics.accuracy_score(test_labels, test_pred)
columns = ["Precision", "Recall", "F1-score", "Support"]
shape = summary.shape
fig, ax = plt.subplots()
summary_altered = summary.copy() # copy in order to set n label column to 0 for imshow
summary_altered[:, -1] = 0 # np.min(summary[0:-1, 0:3]) - 0.1
summary_altered[summary_altered == 0] = np.nan
cmap = cm.Greens.__copy__()
im = ax.imshow(summary_altered.astype(np.float32), cmap=cmap, aspect=0.3)
ax.set_xticks(np.arange(shape[1]))
ax.set_yticks(np.arange(shape[0]))
ax.set_yticklabels(["Background", "Blue", "Green", "Red", "Macro avg.", "Weighted avg.", "Accuracy"])
ax.set_xticklabels(columns)
ax.xaxis.set_tick_params(labelsize=10)
ax.yaxis.set_tick_params(labelsize=10)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
plt.subplots_adjust(bottom=0.2)
for i in range(shape[0]):
for j in range(shape[1]):
value = summary[i, j]
value = np.round(value, 2) if value <= 1 else np.int32(value)
if value != 0:
text = ax.text(j, i, value, ha="center", va="center", color="black")
fig.tight_layout()
plt.savefig(os.path.join(dirs["plots"], "rf_classification_summary_heatmap.png"), dpi=500)
def plot_confusion_matrix(conf_matrix):
labels = ["blue", "green", "red", "background"]
fig, ax = plt.subplots(figsize=(3.5, 3.5))
cmap = cm.YlGn.__copy__()
im = plt.imshow(conf_matrix, cmap=cmap)
shape = conf_matrix.shape
ax.xaxis.tick_top()
ax.set_xticks(np.arange(shape[1]))
ax.set_yticks(np.arange(shape[0]))
ax.set_yticklabels(labels)
ax.set_xticklabels(labels)
ax.xaxis.set_tick_params(labelsize=11)
ax.yaxis.set_tick_params(labelsize=11)
plt.subplots_adjust(bottom=0.25, left=0.25)
# add numeric labels inside plot
for i in range(shape[0]):
for j in range(shape[1]):
value = str(conf_matrix[i, j])
if len(value) == 2:
value = " %s" % value
elif len(value) == 1:
value = " %s" % value
plt.text(i - 0.2, j + 0.11, value, fontsize=11)
plt.text(1.2, -1.2, "True", fontsize=12, fontweight="bold")
plt.text(-2.8, 2, "Predicted", fontsize=12, fontweight="bold", rotation=90)
plt.tight_layout()
plt.savefig(os.path.join(dirs["plots"], "confusion_matrix.png"), dpi=500)
plt.close()
def plot_feature_importance(rf_model):
fig, ax = plt.subplots(figsize=(10, 1))
left = 0
feature_importances = np.round(rf_model.feature_importances_, 2)
argsort = np.argsort(feature_importances)[::-1]
labels = np.array(["reflectance_variance", "B04_B02_ratio", "B03_B02_ratio", "B04_centered", "B03_centered",
"B02_centered", "B08_centered"])[argsort]
colors = np.array(["#757575", "#dc4ff0", "#39e7ad", "#ff0000", "#00ff00", "#0000ff", "#7c0912"])[argsort]
feature_importances = feature_importances[argsort]
offsets = [0.18, 0.12, 0, -0.1, -0.1, -0.5, -0.3]
for c, importance, label, idx in zip(colors, feature_importances, labels, range(len(labels))):
ax.barh(0, importance, height=0.2, color=c, left=left, edgecolor="black", label="label")
text = ax.text(left + importance * 0.5, -0.01, "%s" % importance, ha="center",
va="center", color="w", weight="bold", fontsize=16)
text = ax.text(left + importance * offsets[idx], [-0.24, 0.16][int(int(idx / 2) == idx / 2)], label, fontsize=16)
left += importance
text = ax.text(-0.015, -0.05, "0", fontsize=16)
text = ax.text(1.005, -0.05, "1", fontsize=16)
ax.set_xlabel("")
plt.ylabel("")
plt.subplots_adjust(bottom=0.8)
plt.subplots_adjust(top=0.9)
plt.xlim(0, left)
positions = feature_importances.copy()
for i in range(len(feature_importances)):
positions[i] = np.sum(feature_importances[:i])
ax.set_xticks([])
ax.set_yticks([])
ax.set_yticklabels("")
plt.tight_layout()
plt.subplots_adjust(left=0.05, bottom=0.3)
plt.savefig(os.path.join(dirs["plots"], "rf_feature_importances_barplot.png"), dpi=500)
plt.close()
if __name__ == "__main__":
#plot_random_forest(rf, variables_list, labels_list)
plot_feature_importance(rf)
|
[
"numpy.sum",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.classification_report",
"numpy.argsort",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.round",
"os.path.join",
"numpy.unique",
"numpy.int8",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.close",
"numpy.int32",
"matplotlib.pyplot.subplots",
"matplotlib.cm.YlGn.__copy__",
"matplotlib.cm.Greens.__copy__",
"matplotlib.pyplot.text",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"numpy.array",
"sklearn.metrics.confusion_matrix"
] |
[((393, 467), 'os.path.join', 'os.path.join', (["('F:' + os.sep + 'Masterarbeit')", '"""THESIS"""', '"""general"""', '"""plots"""'], {}), "('F:' + os.sep + 'Masterarbeit', 'THESIS', 'general', 'plots')\n", (405, 467), False, 'import os\n'), ((484, 519), 'os.path.join', 'os.path.join', (["dirs['main']", '"""truth"""'], {}), "(dirs['main'], 'truth')\n", (496, 519), False, 'import os\n'), ((531, 601), 'os.path.join', 'os.path.join', (["dirs['main']", '"""code"""', '"""detect_trucks"""', '"""rf_model.pickle"""'], {}), "(dirs['main'], 'code', 'detect_trucks', 'rf_model.pickle')\n", (543, 601), False, 'import os\n'), ((1120, 1166), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_labels', 'test_pred'], {}), '(test_labels, test_pred)\n', (1142, 1166), False, 'from sklearn import metrics\n'), ((1180, 1233), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['test_labels', 'test_pred'], {}), '(test_labels, test_pred)\n', (1209, 1233), False, 'from sklearn import metrics\n'), ((1247, 1269), 'numpy.unique', 'np.unique', (['test_labels'], {}), '(test_labels)\n', (1256, 1269), True, 'import numpy as np\n'), ((1919, 1965), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['test_labels', 'test_pred'], {}), '(test_labels, test_pred)\n', (1941, 1965), False, 'from sklearn import metrics\n'), ((2067, 2081), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2079, 2081), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2323), 'matplotlib.cm.Greens.__copy__', 'cm.Greens.__copy__', ([], {}), '()\n', (2321, 2323), False, 'from matplotlib import cm\n'), ((2792, 2823), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.2)'}), '(bottom=0.2)\n', (2811, 2823), True, 'import matplotlib.pyplot as plt\n'), ((3334, 3366), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3.5, 3.5)'}), '(figsize=(3.5, 3.5))\n', (3346, 3366), True, 'import matplotlib.pyplot as plt\n'), ((3378, 3396), 'matplotlib.cm.YlGn.__copy__', 'cm.YlGn.__copy__', ([], {}), '()\n', (3394, 3396), False, 'from matplotlib import cm\n'), ((3406, 3440), 'matplotlib.pyplot.imshow', 'plt.imshow', (['conf_matrix'], {'cmap': 'cmap'}), '(conf_matrix, cmap=cmap)\n', (3416, 3440), True, 'import matplotlib.pyplot as plt\n'), ((3725, 3768), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.25)', 'left': '(0.25)'}), '(bottom=0.25, left=0.25)\n', (3744, 3768), True, 'import matplotlib.pyplot as plt\n'), ((4120, 4179), 'matplotlib.pyplot.text', 'plt.text', (['(1.2)', '(-1.2)', '"""True"""'], {'fontsize': '(12)', 'fontweight': '"""bold"""'}), "(1.2, -1.2, 'True', fontsize=12, fontweight='bold')\n", (4128, 4179), True, 'import matplotlib.pyplot as plt\n'), ((4184, 4259), 'matplotlib.pyplot.text', 'plt.text', (['(-2.8)', '(2)', '"""Predicted"""'], {'fontsize': '(12)', 'fontweight': '"""bold"""', 'rotation': '(90)'}), "(-2.8, 2, 'Predicted', fontsize=12, fontweight='bold', rotation=90)\n", (4192, 4259), True, 'import matplotlib.pyplot as plt\n'), ((4264, 4282), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4280, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4365, 4376), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4374, 4376), True, 'import matplotlib.pyplot as plt\n'), ((4432, 4461), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 1)'}), '(figsize=(10, 1))\n', (4444, 4461), True, 'import matplotlib.pyplot as plt\n'), ((4501, 4543), 'numpy.round', 'np.round', (['rf_model.feature_importances_', '(2)'], {}), '(rf_model.feature_importances_, 2)\n', (4509, 4543), True, 'import numpy as np\n'), ((5629, 5643), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (5639, 5643), True, 'import matplotlib.pyplot as plt\n'), ((5648, 5679), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.8)'}), '(bottom=0.8)\n', (5667, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5684, 5712), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (5703, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5717, 5734), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'left'], {}), '(0, left)\n', (5725, 5734), True, 'import matplotlib.pyplot as plt\n'), ((5954, 5972), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5970, 5972), True, 'import matplotlib.pyplot as plt\n'), ((5977, 6019), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.05)', 'bottom': '(0.3)'}), '(left=0.05, bottom=0.3)\n', (5996, 6019), True, 'import matplotlib.pyplot as plt\n'), ((6116, 6127), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6125, 6127), True, 'import matplotlib.pyplot as plt\n'), ((737, 795), 'os.path.join', 'os.path.join', (["dirs['truth']", '"""validation_variables.pickle"""'], {}), "(dirs['truth'], 'validation_variables.pickle')\n", (749, 795), False, 'import os\n'), ((835, 890), 'os.path.join', 'os.path.join', (["dirs['truth']", '"""validation_labels.pickle"""'], {}), "(dirs['truth'], 'validation_labels.pickle')\n", (847, 890), False, 'import os\n'), ((1034, 1103), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['test_labels', 'test_pred'], {'labels': '[2, 3, 4, 1]'}), '(test_labels, test_pred, labels=[2, 3, 4, 1])\n', (1058, 1103), False, 'from sklearn import metrics\n'), ((2420, 2439), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (2429, 2439), True, 'import numpy as np\n'), ((2459, 2478), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (2468, 2478), True, 'import numpy as np\n'), ((3147, 3215), 'os.path.join', 'os.path.join', (["dirs['plots']", '"""rf_classification_summary_heatmap.png"""'], {}), "(dirs['plots'], 'rf_classification_summary_heatmap.png')\n", (3159, 3215), False, 'import os\n'), ((3513, 3532), 'numpy.arange', 'np.arange', (['shape[1]'], {}), '(shape[1])\n', (3522, 3532), True, 'import numpy as np\n'), ((3552, 3571), 'numpy.arange', 'np.arange', (['shape[0]'], {}), '(shape[0])\n', (3561, 3571), True, 'import numpy as np\n'), ((4299, 4350), 'os.path.join', 'os.path.join', (["dirs['plots']", '"""confusion_matrix.png"""'], {}), "(dirs['plots'], 'confusion_matrix.png')\n", (4311, 4350), False, 'import os\n'), ((4558, 4589), 'numpy.argsort', 'np.argsort', (['feature_importances'], {}), '(feature_importances)\n', (4568, 4589), True, 'import numpy as np\n'), ((4609, 4745), 'numpy.array', 'np.array', (["['reflectance_variance', 'B04_B02_ratio', 'B03_B02_ratio', 'B04_centered',\n 'B03_centered', 'B02_centered', 'B08_centered']"], {}), "(['reflectance_variance', 'B04_B02_ratio', 'B03_B02_ratio',\n 'B04_centered', 'B03_centered', 'B02_centered', 'B08_centered'])\n", (4617, 4745), True, 'import numpy as np\n'), ((4787, 4878), 'numpy.array', 'np.array', (["['#757575', '#dc4ff0', '#39e7ad', '#ff0000', '#00ff00', '#0000ff', '#7c0912']"], {}), "(['#757575', '#dc4ff0', '#39e7ad', '#ff0000', '#00ff00', '#0000ff',\n '#7c0912'])\n", (4795, 4878), True, 'import numpy as np\n'), ((5847, 5878), 'numpy.sum', 'np.sum', (['feature_importances[:i]'], {}), '(feature_importances[:i])\n', (5853, 5878), True, 'import numpy as np\n'), ((6036, 6101), 'os.path.join', 'os.path.join', (["dirs['plots']", '"""rf_feature_importances_barplot.png"""'], {}), "(dirs['plots'], 'rf_feature_importances_barplot.png')\n", (6048, 6101), False, 'import os\n'), ((4068, 4115), 'matplotlib.pyplot.text', 'plt.text', (['(i - 0.2)', '(j + 0.11)', 'value'], {'fontsize': '(11)'}), '(i - 0.2, j + 0.11, value, fontsize=11)\n', (4076, 4115), True, 'import matplotlib.pyplot as plt\n'), ((1753, 1773), 'numpy.int8', 'np.int8', (['test_labels'], {}), '(test_labels)\n', (1760, 1773), True, 'import numpy as np\n'), ((2942, 2960), 'numpy.round', 'np.round', (['value', '(2)'], {}), '(value, 2)\n', (2950, 2960), True, 'import numpy as np\n'), ((2980, 2995), 'numpy.int32', 'np.int32', (['value'], {}), '(value)\n', (2988, 2995), True, 'import numpy as np\n')]
|
import string
import numpy as np
import sys
import random
import os
from shutil import copyfile
import subprocess
from rpt_ele import rpt_ele
import update_process_model_input_file as up
import swmm_mpc as sm
def get_flood_cost_from_dict(rpt, node_flood_weight_dict):
node_flood_costs = []
for nodeid, weight in node_flood_weight_dict.iteritems():
# if user put "Node J3" for nodeid instead of just "J3" make \
# nodeid "J3"
if len(nodeid.split()) > 0:
nodeid = nodeid.split()[-1]
# try/except used here in case there is no flooding for one or \
# more of the nodes
if nodeid not in rpt.node_ids:
print("warning node {} is not in model".format(nodeid))
try:
# flood volume is in column, 5
node_flood_volume = float(rpt.flooding_df.loc[nodeid, 5])
node_flood_cost = (weight*node_flood_volume)
node_flood_costs.append(node_flood_cost)
except:
pass
return sum(node_flood_costs)
def get_flood_cost(rpt, node_flood_weight_dict):
if rpt.total_flooding > 0 and node_flood_weight_dict:
return get_flood_cost_from_dict(rpt, node_flood_weight_dict)
else:
return rpt.total_flooding
def get_deviation_cost(rpt, target_depth_dict):
node_deviation_costs = []
if target_depth_dict:
for nodeid, data in target_depth_dict.iteritems():
depth = rpt.get_ele_df(nodeid)['Depth']
depth_dev = abs(depth - data['target'])
avg_dev = depth_dev.sum()/len(depth_dev)
weighted_deviation = avg_dev*data['weight']
node_deviation_costs.append(weighted_deviation)
return sum(node_deviation_costs)
def get_cost(rpt_file, node_flood_weight_dict, flood_weight, target_depth_dict,
dev_weight):
# read the output file
rpt = rpt_ele('{}'.format(rpt_file))
# get flooding costs
node_fld_cost = get_flood_cost(rpt, node_flood_weight_dict)
# get deviation costs
deviation_cost = get_deviation_cost(rpt, target_depth_dict)
# convert the contents of the output file into a cost
cost = flood_weight*node_fld_cost + dev_weight*deviation_cost
return cost
def bits_to_decimal(bits):
bits_as_string = "".join(str(i) for i in bits)
return float(int(bits_as_string, 2))
def bits_max_val(bit_len):
bit_ones = [1 for i in range(bit_len)]
return bits_to_decimal(bit_ones)
def bits_to_perc(bits):
bit_dec = bits_to_decimal(bits)
max_bits = bits_max_val(len(bits))
return round(bit_dec/max_bits, 3)
def bit_to_on_off(bit):
"""
convert single bit to "ON" or "OFF"
bit: [int] or [list]
"""
if type(bit) == list:
if len(bit) > 1:
raise ValueError('you passed more than one bit to this fxn')
else:
bit = bit[0]
if bit == 1:
return "ON"
elif bit == 0:
return "OFF"
else:
raise ValueError('was expecting 1 or 0 and got {}'.format(bit))
def split_gene_by_ctl_ts(gene, control_str_ids, n_steps):
"""
split a list of bits representing a gene into the bits that correspond with
each control id according to the control type for each time step
ASSUMPTION: 3 bits for ORIFICE or WEIR, 1 for PUMP
gene: [list] bits for a gene (e.g., [1, 0, 1, 1, 1, 0, 0, 1])
control_str_ids: [list] control ids (e.g., ['ORIFICE r1', 'PUMP p1'])
n_steps: [int] number of control steps (e.g., 2)
returns: [list of lists] [[[1, 0, 1], [1, 1, 0]], [[0], [1]]]
"""
split_gene = []
for control_id in control_str_ids:
# get the control type (i.e. PUMP, WEIR, ORIFICE)
control_type = control_id.split()[0]
if control_type == 'ORIFICE' or control_type == 'WEIR':
bits_per_type = 3
# get the number of control elements that are for the current ctl
elif control_type == 'PUMP':
bits_per_type = 1
# the number of bits per control structure
n_bits = bits_per_type*n_steps
# get the segment for the control
gene_seg = gene[:n_bits]
# split to get the different time steps
gene_seg_per_ts = split_list(gene_seg, n_steps)
# add the gene segment to the overall list
split_gene.append(gene_seg_per_ts)
# move the beginning of the gene to the end of the current ctl segment
gene = gene[n_bits:]
return split_gene
def split_list(a_list, n):
"""
split one list into n lists of equal size. In this case, we are splitting
the list that represents the policy of a single each control structure
so that each time step is separate
"""
portions = len(a_list)/n
split_lists = []
for i in range(n):
split_lists.append(a_list[i*portions: (i+1)*portions])
return split_lists
def gene_to_policy_dict(gene, control_str_ids, n_control_steps):
"""
converts a gene to a policy dictionary that with the format specified in
up.update_controls_and_hotstart
format a policy given the control_str_ids and splitted_gene
control_str_ids: [list] control ids (e.g., ['ORIFICE r1', 'PUMP p1'])
splitted_gene: [list of lists] [[[1, 0, 1], [1, 1, 0]], [[0], [1]]]
returns: [dict] (e.g., {'ORIFICE r1'}
"""
fmted_policies = dict()
splitted_gene = split_gene_by_ctl_ts(gene, control_str_ids,
n_control_steps)
for i, control_id in enumerate(control_str_ids):
control_type = control_id.split()[0]
seg = splitted_gene[i]
if control_type == 'ORIFICE' or control_type == 'WEIR':
# change the lists of bits into percent openings
fmtd_seg = [bits_to_perc(setting) for setting in seg]
elif control_type == 'PUMP':
# change the lists of bits into on/off
fmtd_seg = [bit_to_on_off(bit[0]) for bit in seg]
fmted_policies[control_id] = fmtd_seg
return fmted_policies
def list_to_policy(policy, control_str_ids, n_control_steps):
"""
ASSUMPTION: round decimal number to BOOLEAN
"""
split_policies = split_list(policy, len(control_str_ids))
fmted_policies = dict()
for i, control_id in enumerate(control_str_ids):
control_type = control_id.split()[0]
if control_type == 'ORIFICE' or control_type == 'WEIR':
fmted_policies[control_id] = split_policies[i]
elif control_type == 'PUMP':
on_off = [bit_to_on_off(round(p)) for p in split_policies[i]]
fmted_policies[control_id] = on_off
return fmted_policies
def format_policies(policy, control_str_ids, n_control_steps, opt_method):
if opt_method == 'genetic_algorithm':
return gene_to_policy_dict(policy, control_str_ids, n_control_steps)
elif opt_method == 'bayesian_opt':
return list_to_policy(policy, control_str_ids, n_control_steps)
def prep_tmp_files(proc_inp, work_dir):
# make process model tmp file
rand_string = ''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(9))
# make a copy of the process model input file
tmp_proc_base = proc_inp.replace('.inp',
'_tmp_{}'.format(rand_string))
tmp_proc_inp = tmp_proc_base + '.inp'
tmp_proc_rpt = tmp_proc_base + '.rpt'
copyfile(proc_inp, tmp_proc_inp)
# make copy of hs file
hs_file_path = up.read_hs_filename(proc_inp)
hs_file_name = os.path.split(hs_file_path)[-1]
tmp_hs_file_name = hs_file_name.replace('.hsf',
'_{}.hsf'.format(rand_string))
tmp_hs_file_path = os.path.join(sm.run.work_dir, tmp_hs_file_name)
copyfile(hs_file_path, tmp_hs_file_path)
return tmp_proc_inp, tmp_proc_rpt, tmp_hs_file_path
def evaluate(*individual):
"""
evaluate the performance of an individual given the inp file of the process
model, the individual, the control params (ctl_str_ids, horizon, step),
and the cost function params (dev_weight/dict, flood weight/dict)
"""
FNULL = open(os.devnull, 'w')
# prep files
tmp_inp, tmp_rpt, tmp_hs = prep_tmp_files(sm.run.inp_process_file_path,
sm.run.work_dir)
# format policies
if sm.run.opt_method == 'genetic_algorithm':
individual = individual[0]
elif sm.run.opt_method == 'bayesian_opt':
individual = np.squeeze(individual)
fmted_policies = format_policies(individual, sm.run.ctl_str_ids,
sm.run.n_ctl_steps, sm.run.opt_method)
# update controls
up.update_controls_and_hotstart(tmp_inp,
sm.run.ctl_time_step,
fmted_policies,
tmp_hs)
# run the swmm model
if os.name == 'nt':
swmm_exe_cmd = 'swmm5.exe'
elif sys.platform.startswith('linux'):
swmm_exe_cmd = 'swmm5'
cmd = '{} {} {}'.format(swmm_exe_cmd, tmp_inp,
tmp_rpt)
subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
# get cost
cost = get_cost(tmp_rpt,
sm.run.node_flood_weight_dict,
sm.run.flood_weight,
sm.run.target_depth_dict,
sm.run.dev_weight)
os.remove(tmp_inp)
os.remove(tmp_rpt)
os.remove(tmp_hs)
return cost
|
[
"sys.platform.startswith",
"os.remove",
"update_process_model_input_file.update_controls_and_hotstart",
"update_process_model_input_file.read_hs_filename",
"random.choice",
"subprocess.call",
"shutil.copyfile",
"numpy.squeeze",
"os.path.split",
"os.path.join"
] |
[((7428, 7460), 'shutil.copyfile', 'copyfile', (['proc_inp', 'tmp_proc_inp'], {}), '(proc_inp, tmp_proc_inp)\n', (7436, 7460), False, 'from shutil import copyfile\n'), ((7508, 7537), 'update_process_model_input_file.read_hs_filename', 'up.read_hs_filename', (['proc_inp'], {}), '(proc_inp)\n', (7527, 7537), True, 'import update_process_model_input_file as up\n'), ((7739, 7786), 'os.path.join', 'os.path.join', (['sm.run.work_dir', 'tmp_hs_file_name'], {}), '(sm.run.work_dir, tmp_hs_file_name)\n', (7751, 7786), False, 'import os\n'), ((7791, 7831), 'shutil.copyfile', 'copyfile', (['hs_file_path', 'tmp_hs_file_path'], {}), '(hs_file_path, tmp_hs_file_path)\n', (7799, 7831), False, 'from shutil import copyfile\n'), ((8719, 8809), 'update_process_model_input_file.update_controls_and_hotstart', 'up.update_controls_and_hotstart', (['tmp_inp', 'sm.run.ctl_time_step', 'fmted_policies', 'tmp_hs'], {}), '(tmp_inp, sm.run.ctl_time_step,\n fmted_policies, tmp_hs)\n', (8750, 8809), True, 'import update_process_model_input_file as up\n'), ((9165, 9237), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)', 'stdout': 'FNULL', 'stderr': 'subprocess.STDOUT'}), '(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)\n', (9180, 9237), False, 'import subprocess\n'), ((9465, 9483), 'os.remove', 'os.remove', (['tmp_inp'], {}), '(tmp_inp)\n', (9474, 9483), False, 'import os\n'), ((9488, 9506), 'os.remove', 'os.remove', (['tmp_rpt'], {}), '(tmp_rpt)\n', (9497, 9506), False, 'import os\n'), ((9511, 9528), 'os.remove', 'os.remove', (['tmp_hs'], {}), '(tmp_hs)\n', (9520, 9528), False, 'import os\n'), ((7557, 7584), 'os.path.split', 'os.path.split', (['hs_file_path'], {}), '(hs_file_path)\n', (7570, 7584), False, 'import os\n'), ((9008, 9040), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (9031, 9040), False, 'import sys\n'), ((7094, 7147), 'random.choice', 'random.choice', (['(string.ascii_lowercase + string.digits)'], {}), '(string.ascii_lowercase + string.digits)\n', (7107, 7147), False, 'import random\n'), ((8523, 8545), 'numpy.squeeze', 'np.squeeze', (['individual'], {}), '(individual)\n', (8533, 8545), True, 'import numpy as np\n')]
|
from _context import sparse
from sparse import util
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.distributions as dist
import numpy as np
from argparse import ArgumentParser
from torch.utils.tensorboard import SummaryWriter
import random, tqdm, sys, math
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from util import d
# import warnings
# warnings.simplefilter("error")
# warnings.simplefilter("ignore", DeprecationWarning)
# from util import tic, toc
# NB, the enwik8 data contains tokens from 9 to 240
NUM_TOKENS = 256
LOG2E = math.log2(math.e)
MARGIN = 0.1
def sample(lnprobs, temperature=1.0):
if temperature == 0.0:
return lnprobs.argmax()
p = F.softmax(lnprobs / temperature, dim=0)
cd = dist.Categorical(p)
return cd.sample()
def mask_(matrices, maskval=0.0, mask_diagonal=True):
"""
Masks out all values in the given batch of matrices where i <= j holds,
i < j if mask_diagonal is false
In place operation
:param tns:
:return:
"""
b, h, w = matrices.size()
indices = torch.triu_indices(h, w, offset=0 if mask_diagonal else 1)
matrices[:, indices[0], indices[1]] = maskval
class MSparseSelfAttention(nn.Module):
"""
Masked sparse self attention (two degrees of freedom)
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, mask=False, min_sigma=0.05, sigma_scale=1.0):
"""
:param emb:
:param k: Number of connections to the input in total
:param gadditional:
:param radditional:
:param region:
:param heads:
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.means = nn.Parameter(torch.randn((k, 2)))
self.sigmas = nn.Parameter(torch.randn((k, )))
self.register_buffer('mvalues', torch.ones((k, )))
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
# generate the continuous parameters
means = self.means[None, None, :, :].expand(b, 1, k, 2)
sigmas = self.sigmas[None, None, :].expand(b, 1, k)
values = self.mvalues[None, None, :].expand(b, 1, k)
means = util.flip(means.contiguous()) # flip everything to below the diagonal of the matrix
s = (t, t)
means, sigmas = sparse.transform_means(means, s), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
s = (t, t)
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
means, sigmas, mvalues = self.hyper(x)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t, t),
relative_range=(self.region, self.region), cuda=x.is_cuda)
indices = util.flip(indices)
indfl = indices.float()
vs = k * (4 + self.radditional + self.gadditional)
assert indices.size() == (b, 1, vs, 2), f'{indices.size()}, {(b, 1, vs, 2)}'
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props = sparse.densities(indfl, means, sigmas).clone()
props[dups, :] = 0
props = props / props.sum(dim=2, keepdim=True) # normalize over all points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
assert indices.size() == (b, 1, vs, 2), f'{indices.size()}, {(b, 1, vs, 2)}'
assert weights.size() == (b, 1, vs), f'{weights.size()}, {(b, 1, vs)}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, 1, vs, 2).contiguous().view(b*h, vs, 2)
weights = weights[:, None, :, :].expand(b, h, 1, vs).contiguous().view(b*h, vs)
# compute keys, queries, values
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, vs).contiguous().view(b*h*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h, vs)
dot = sparse.logsoftmax(indices, weights * dot, s)
# - dot now has row-wise self-attention probabilities
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=(t, t), xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return self.unifyheads(out)
class ASH2DSelfAttention(nn.Module):
"""
Masked sparse self attention. One degree of freedom, the receptive field is adaptive, based on the incoming
embedding vector, position embedding and coordinate.
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, mask=False, min_sigma=0.05,
sigma_scale=0.1, mmult = 1.0):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.mmult = mmult
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.register_buffer('mvalues', torch.ones((k, )))
# network that generates the coordinates and sigmas
hidden = emb * 4
self.toparams = nn.Sequential(
nn.Linear(emb + 1, hidden), nn.ReLU(),
nn.Linear(hidden, k * 3) # two means, one sigma
)
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
# Generate coords
coords = torch.arange(t, dtype=torch.float, device=d(x)) / t
coords = coords[None, :, None,].expand(b, t, 1)
input = torch.cat([x, coords], dim=2)
params = self.toparams(input) # (b, t, k*3)
assert not util.contains_nan(params), \
f'params contain NaN\n intput {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
# Generate the logits that correspond to the diagonals of the matrix
diags = torch.arange(t, dtype=torch.float, device=d(x))
diags = util.inv(diags, mx=t)
diags = diags[None, :, None, None].expand(b, t, k, 2)
means = params[:, :, :k*2].view(b, t, k, 2)
sigmas = params[:, :, k*2:].view(b, t, k)
values = self.mvalues[None, None, :].expand(b, t, k)
means = diags + self.mmult * means
means = util.flip(means)
# means = util.flip(means.contiguous()) # flip everything to below the diagonal of the matrix
s = (t, t)
means, sigmas = sparse.transform_means(means, s), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
s = (t, t)
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
means, sigmas, mvalues = self.hyper(x)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t, t),
relative_range=(self.region, self.region), cuda=x.is_cuda)
indices = util.flip(indices)
indfl = indices.float()
vs = k * (4 + self.radditional + self.gadditional)
assert indices.size() == (b, t, vs, 2), f'{indices.size()}, {(b, t, vs, 2)}'
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props = sparse.densities(indfl, means, sigmas).clone()
props[dups, :] = 0
props = props / props.sum(dim=2, keepdim=True) # normalize over all points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
assert indices.size() == (b, t, vs, 2), f'{indices.size()}, {(b, t, vs, 2)}'
assert weights.size() == (b, t, vs), f'{weights.size()}, {(b, t, vs)}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, t, vs, 2).contiguous().view(b*h, t*vs, 2)
weights = weights[:, None, :, :].expand(b, h, t, vs).contiguous().view(b*h, t*vs)
# compute keys, queries, values
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*t*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, t*vs).contiguous().view(b*h*t*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,t*vs)
#print(f'dot before {dot.min()}, {dot.mean()}, {dot.max()}')
assert not util.contains_nan(dot), f'dot contains nan (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
#print(f'dot after {dot.min()}, {dot.mean()}, {dot.max()}\n')
dot = sparse.logsoftmax(indices, weights * dot, s).exp()
# - dot now has row-wise self-attention probabilities
assert not util.contains_nan(dot), f'dot contains nan (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=(t, t), xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}'
return out
class ASH1DSelfAttention(nn.Module):
"""
Masked sparse self attention. One degree of freedom, the receptive field is adaptive, based on the incoming
embedding vector, position embedding and coordinate.
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, mask=False, min_sigma=0.05, sigma_scale=0.1,
mmult = 1.0, norm_method='softmax', outputs=-1, clamp=True):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param outputs: The number of units (at the end of the sequence) to compute new vectors for.
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.mmult, self.norm_method, self.clamp = mmult, norm_method, clamp
if clamp:
self.mmult *= 3.0
self.outputs = outputs
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.register_buffer('mvalues', torch.ones((k, )))
# network that generates the coordinates and sigmas
hidden = emb * 4
self.toparams = nn.Sequential(
nn.Linear(emb + 1, hidden), nn.ReLU(),
nn.Linear(hidden, k * 2) # one mean, one sigma
)
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
o = t if self.outputs < -1 else self.outputs
# Generate coords
coords = torch.arange(t, dtype=torch.float, device=d(x)) / t
coords = coords[None, :, None,].expand(b, t, 1)
input = torch.cat([x, coords], dim=2)
params = self.toparams(input) # (b, o, k*2)
assert not util.contains_nan(params), \
f'params contain NaN\n intput {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
# Generate the logits that correspond to the horizontal coordinate of the current word
diags = torch.arange(t, dtype=torch.float, device=d(x))
if not self.clamp:
diags = util.inv(diags, mx=t)
diags = diags[None, :, None, None].expand(b, t, k, 1)
means = params[:, :, :k].view(b, t, k, 1)
sigmas = params[:, :, k:].view(b, t, k)
values = self.mvalues[None, None, :].expand(b, t, k)
means = diags - self.mmult * F.softplus(means)
s = (t,)
means, sigmas = sparse.transform_means(means, s, method='clamp' if self.clamp else 'sigmoid'), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
s = (t, t)
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
means, sigmas, mvalues = self.hyper(x)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t,),
relative_range=(self.region, ), cuda=x.is_cuda)
indfl = indices.float()
vs = k * (2 + self.radditional + self.gadditional)
assert indices.size() == (b, t, vs, 1), f'{indices.size()}, {(b, t, vs, 1)}'
m = torch.arange(t, dtype=torch.long, device=d(indices))[None, :, None, None].expand(b, t, vs, k)
props = sparse.densities(indfl, means, sigmas).clone() # (b, t, vs, k)
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props[dups, :] = 0
props[indices > m] = 0
props = props / props.sum(dim=2, keepdim=True) # normalize over all points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
out = torch.arange(t, device=d(indices))[None, :, None, None].expand(b, t, vs, 1)
indices = torch.cat([out, indices], dim=3)
assert indices.size() == (b, t, vs, 2), f'{indices.size()}, {(b, t, vs, 2)}'
assert weights.size() == (b, t, vs), f'{weights.size()}, {(b, t, vs)}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, t, vs, 2).contiguous().view(b*h, t*vs, 2)
weights = weights[:, None, :, :].expand(b, h, t, vs).contiguous().view(b*h, t*vs)
# compute keys, queries, values
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*t*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, t*vs).contiguous().view(b*h*t*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,t*vs)
assert not util.contains_inf(dot), f'dot contains inf (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
assert not util.contains_nan(dot), f'dot contains nan (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
if self.norm_method == 'softmax':
dot = sparse.logsoftmax(indices, weights * dot, s).exp()
else:
dot = sparse.simple_normalize(indices, weights * dot, s, method=self.norm_method)
# - dot now has row-wise self-attention probabilities
assert not util.contains_inf(dot), f'dot contains inf (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
assert not util.contains_nan(dot), f'dot contains nan (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=(t, t), xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}, dot min/max: {dot.min()}/{dot.max()}'
return out
class StridedSparseSelfAttention(nn.Module):
"""
Masked sparse self attention. One degree of freedom, the receptive field is adaptive, based on the incoming
embedding vector, position embedding and coordinate.
"""
def __init__(self, emb, k, gadditional, radditional, region, heads=8, stride=32, mask=False, min_sigma=0.05, sigma_scale=0.1,
mmult = 1.0, norm_method='softmax', clamp=True, **kwargs):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param outputs: The number of units (at the end of the sequence) to compute new vectors for.
:param mask:
"""
super().__init__()
self.emb, self.heads, self.mask, self.min_sigma, self.sigma_scale = emb, heads, mask, min_sigma, sigma_scale
self.mmult, self.norm_method, self.clamp = mmult, norm_method, clamp
self.stride = stride
if clamp:
self.mmult *= 3.0
s = emb // heads
self.tokeys = nn.Linear(s, s, bias=False)
self.toqueries = nn.Linear(s, s, bias=False)
self.tovalues = nn.Linear(s, s, bias=False)
self.unifyheads = nn.Linear(s * heads, emb)
self.gadditional = gadditional
self.radditional = radditional
self.region = region
self.k = k
self.register_buffer('mvalues', torch.ones((k, )))
# network that generates the coordinates and sigmas
hidden = emb * 4
self.toparams = nn.Sequential(
nn.Linear(2 * emb + 1, hidden), nn.ReLU(),
nn.Linear(hidden, k * 2) # one mean, one sigma
)
# -- input is the current token's embedding vector, the sum of preceding embedding vectors, and the coordinate.
def hyper(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
r = self.stride
s = (t,)
# Generate input selection
selection = torch.arange(t//r, dtype=torch.long, device=d(x))
selection = (selection + 1) * r - 1
tp = selection.size(0)
# Generate coords
coords = torch.arange(tp, dtype=torch.float, device=d(x)) / tp
coords = coords[None, :, None,].expand(b, tp, 1)
summed = (x.cumsum(dim=1) - x) / torch.arange(start=1, end=t+1, device=d(), dtype=torch.float)[None, :, None]
input = torch.cat([x[:, selection, :], coords, summed[:, selection, :]], dim=2)
params = self.toparams(input) # (b, tp, k*2)
assert not util.contains_nan(params), \
f'params contain NaN\n input {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
assert not util.contains_inf(params), \
f'params contain inf\n input {input.min()} {input.max()} \n {list(self.toparams.parameters())}'
# Generate the logits/coordinates that correspond to the horizontal coordinate of the current word
diags = selection.to(torch.float)
if not self.clamp:
diags = util.inv(diags, mx=t)
diags = diags[None, :, None, None].expand(b, tp, k, 1)
means = params[:, :, :k].view(b, tp, k, 1)
sigmas = params[:, :, k:].view(b, tp, k)
values = self.mvalues[None, None, :].expand(b, tp, k) # all ones atm
means = diags - self.mmult * F.softplus(means)
means, sigmas = sparse.transform_means(means, s, method='clamp' if self.clamp else 'sigmoid'), \
sparse.transform_sigmas(sigmas, s, min_sigma=self.min_sigma) * self.sigma_scale
return means, sigmas, values
def forward(self, x):
b, t, e = x.size()
h, k, reg = self.heads, self.k, self.region
r = self.stride
# Generate input selection (the fixed output indices, which are 'stride' units apart)
selection = torch.arange(t//r, dtype=torch.long, device=d(x))
selection = (selection + 1) * r - 1
tp = selection.size(0)
size = (t, t)
means, sigmas, mvalues = self.hyper(x)
s = e // h
x = x.view(b, t, h, s)
# sample integer indices and values
indices = sparse.ngenerate(means, self.gadditional, self.radditional, rng=(t,),
relative_range=(self.region, ), cuda=x.is_cuda, epsilon=10e-5)
indfl = indices.float()
vs = k * (2 + self.radditional + self.gadditional) # number of sampled integer index tuples
assert indices.size() == (b, tp, vs, 1), f'{indices.size()}, {(b, tp, vs, 1)}'
m = selection[None, :, None, None].expand(b, tp, vs, k)
props = sparse.densities(indfl, means, sigmas).clone() # (b, tp, vs, k)
# Mask for duplicate indices
dups = util.nduplicates(indices).to(torch.bool)
# compute (unnormalized) densities under the given MVNs (proportions)
props[dups, :] = 0
props[indices > m] = 0 # mask out any forward connections
# -- note that while all the continuous index tuples are guaranteed to point backwards, the sampled discrete
# index tuples might point forward, so they still need to be zeroed out here.
props = props / props.sum(dim=2, keepdim=True) # normalize over all remaining points of a given index tuple
# weight the values by the proportions
weights = mvalues[:, :, None, :].expand_as(props)
# - add a dim for the MVNs
weights = props * weights
weights = weights.sum(dim=3) # - sum out the MVNs
out = selection[None, :, None, None].expand(b, tp, vs, 1) # output indices
indices = torch.cat([out, indices], dim=3)
assert indices.size() == (b, tp, vs, 2), f'{indices.size()}, {(b, tp, vs, 2)}'
assert weights.size() == (b, tp, vs), f'{weights.size()}, {(b, tp, vs)}'
assert not util.contains_inf(weights), f'weights contains inf (before norm) {weights.min()}, {weights.mean()}, {weights.max()}'
assert not util.contains_nan(weights), f'weights contains nan (before norm) {weights.min()}, {weights.mean()}, {weights.max()}'
# expand for heads, fold heads into batch
indices = indices[:, None, :, :, :].expand(b, h, tp, vs, 2).contiguous().view(b*h, tp*vs, 2)
weights = weights[:, None, :, :].expand(b, h, tp, vs).contiguous().view(b*h, tp*vs)
# compute keys, queries, values
keys = self.tokeys(x) # note: t not tp, we compute _all_ queries, keys and values
queries = self.toqueries(x)
values = self.tovalues(x)
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous() .view(b * h, t, s)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, s)
values = values.transpose(1, 2).contiguous() .view(b * h, t, s)
# -- We could actually select first, and _then_ transform to kqv's. May be better for very large contexts and
# small batches
queries = queries / (e ** (1/4)) # b*h, t, e
keys = keys / (e ** (1/4))
# get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# select the queries
indflat = indices.view(b*h*tp*vs, 2)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, tp*vs).contiguous().view(b*h*tp*vs)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,tp*vs)
dot_logits = dot.data.clone()
assert not util.contains_inf(dot), f'dot contains inf (before norm) {dot.min()}, {dot.mean()}, {dot.max()}'
assert not util.contains_nan(dot), f'dot contains nan (before norm) {dot.min()}, {dot.mean()}, {dot.max()}'
if self.norm_method == 'softmax':
dot = sparse.logsoftmax(indices, weights * dot, size).exp()
else:
dot = sparse.simple_normalize(indices, weights * dot, size, method=self.norm_method)
# - dot now has row-wise self-attention probabilities
assert not util.contains_inf(dot), f'dot contains inf (after norm) {dot.min()}, {dot.mean()}, {dot.max()}'
try:
assert not util.contains_nan(dot), f'dot contains nan (after norm) {dot.min()}, {dot.mean()}, {dot.max()}'
except AssertionError:
print(dot.sum(dim=1))
print('\n\n\n')
for i in range(b*h):
print(f'*** {i}')
print(indices[i])
print(dot_logits[i])
print((weights * dot_logits)[i])
print('\n\n\n')
sys.exit()
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=size, xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * s)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}, dot min/max: {dot.min()}/{dot.max()}'
return out
class ConvSelfAttention(nn.Module):
"""
Self-attention with a hardwired convolutional sparsity pattern. That is, each node depends on the k
nodes before.
Wiring is always "causal" (ie. layer only looks into the past).
Padding is addded to the input to ensure the input and output have the same length.
"""
def __init__(self, emb, heads=8, norm_method='softmax', k=32, **kwargs):
"""
:param emb:
:param k: Number of connections to the input for each output
:param gadditional:
:param radditional:
:param region:
:param heads:
:param outputs: The number of units (at the end of the sequence) to compute new vectors for.
:param mask:
"""
super().__init__()
self.emb, self.heads = emb, heads
self.norm_method = norm_method
s = emb // heads
self.tokeys = nn.Linear(s, s, bias=False)
self.toqueries = nn.Linear(s, s, bias=False)
self.tovalues = nn.Linear(s, s, bias=False)
self.unifyheads = nn.Linear(s * heads, emb)
self.k = k
def forward(self, x):
b, t, e = x.size()
h, k = self.heads, self.k
s = e // h
x = x.view(b, t, h, s)
tp = t + k - 1
size = (t, tp)
xp = F.pad(x, [0, 0, 0, 0, k-1, 0, 0, 0]) # zero pad the beginning of x
assert xp.size() == (b, tp, h, s), f'{xp.size()} vs {(b, tp, h, s)}'
# compute keys, queries, values (note that the self attention matrix is slightly rectangular)
queries = self.toqueries(x)
keys = self.tokeys(xp)
values = self.tovalues(xp)
# - fold heads into the batch dimension
queries = queries.transpose(1, 2) .contiguous().view(b * h, t, s)
keys = keys.transpose(1, 2) .contiguous().view(b * h, tp, s)
values = values.transpose(1, 2) .contiguous().view(b * h, tp, s)
queries = queries / (e ** (1/4)) # shoudl this be s?
keys = keys / (e ** (1/4))
# Get dot product of queries and keys
# - this will be a sparse matrix with the indices we've just computed, and values
# defined by the dot product
# generate the indices (t*k pairs of integers per attention head)
indices = torch.arange(t, dtype=torch.long, device=d(x))[:, None, None].expand(t, k, 2).contiguous()
deltas = torch.arange(k, dtype=torch.long, device=d(x))[None, :, None].expand(t, k, 1)
indices[:, :, 1:] += deltas
indices = indices[None, None, :, :, :].expand(b, h, t, k, 2).contiguous()
indflat = indices.view(b*h*t*k, 2)
# select the queries and the keys (left and right column of index matrix) and take their dot
# product (note that they are already scaled)
ar = torch.arange(b*h, dtype=torch.long, device=d(x))[:, None].expand(b*h, t*k).contiguous().view(b*h*t*k)
squeries = queries[ar, indflat[:, 0], :]
skeys = keys [ar, indflat[:, 1], :]
dot = torch.bmm(squeries[:, None, :], skeys[:, :, None]).view(b*h,t*k)
indices = indices.view(b*h, t*k, 2)
# assert not util.contains_inf(dot), f'dot contains inf (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# assert not util.contains_nan(dot), f'dot contains nan (before softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
if self.norm_method == 'softmax':
dot = sparse.logsoftmax(indices, dot, size).exp()
else:
dot = sparse.simple_normalize(indices, dot, size, method=self.norm_method)
# - dot now has row-wise self-attention probabilities
# assert not util.contains_inf(dot), f'dot contains inf (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# assert not util.contains_nan(dot), f'dot contains nan (after softmax) {dot.min()}, {dot.mean()}, {dot.max()}'
# apply the self attention to the values
out = sparse.batchmm(indices, dot, size=size, xmatrix=values)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * s)
out = self.unifyheads(out)
assert not util.contains_nan(out), f'output contains nan {out}, dot min/max: {dot.min()}/{dot.max()}'
return out
class SelfAttention(nn.Module):
"""
Plain, dense self attention
"""
def __init__(self, emb, heads=8, mask=False):
"""
:param emb:
:param heads:
:param mask:
"""
super().__init__()
self.emb = emb
self.heads = heads
self.mask = mask
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
def forward(self, x):
b, t, e = x.size()
h = self.heads
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# compute scaled dot-product self-attention
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4))
keys = keys / (e ** (1/4))
# - Instead of dividing the dot products by sqrt(e), we scale the keys and values.
# This should be more memory efficient
# - get dot product of queries and keys, and scale
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b*h, t, t), f'Matrix has size {dot.size()}, expected {(b*h, t, t)}.'
if self.mask: # mask out the lower half of the dot matrix,including the diagonal
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
dot = F.softmax(dot, dim=2) # dot now has row-wise self-attention probabilities
assert not util.contains_nan(dot[:, 1:, :]) # only the forst row may contain nan
if self.mask == 'first':
dot = dot.clone()
dot[:, :1, :] = 0.0
# - The first row of the first attention matrix is entirely masked out, so the softmax operation results
# in a division by zero. We set this row to zero by hand to get rid of the NaNs
# apply the self attention to the values
out = torch.bmm(dot, values).view(b, h, t, e)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return self.unifyheads(out)
class TransformerBlock(nn.Module):
def __init__(self, emb, heads, mask, ff_hidden_mult=4, dropout=0.0, type='dense', oned=True, **kwargs):
super().__init__()
if type == 'sparse':
if mask:
if oned:
self.attention = ASH1DSelfAttention(emb, heads=heads, **kwargs)
else:
self.attention = ASH2DSelfAttention(emb, heads=heads, **kwargs)
else:
raise Exception('Not implemented yet')
elif type == 'strided':
self.attention = StridedSparseSelfAttention(emb, heads=heads, **kwargs)
elif type == 'conv':
self.attention = ConvSelfAttention(emb, heads, **kwargs)
elif type == 'dense':
self.attention = SelfAttention(emb, heads=heads, mask=mask)
elif type == 'mixed':
layers = []
for type in kwargs['mixture']:
if type == 'c':
layers.append(ConvSelfAttention(emb, heads, **kwargs))
elif type == 's':
strided = StridedSparseSelfAttention(emb, heads=heads, **kwargs)
layers.append(strided)
self.toplot = strided
else:
raise Exception(f'layer type {type} not recognized/')
self.attention = nn.Sequential(*layers)
else:
raise Exception('Not implemented yet')
self.norm1 = nn.LayerNorm(emb)
self.norm2 = nn.LayerNorm(emb)
self.ff = nn.Sequential(
nn.Linear(emb, ff_hidden_mult * emb),
nn.ReLU(),
nn.Linear(ff_hidden_mult * emb, emb)
)
self.do = nn.Dropout(dropout)
def forward(self, x):
b, t, e = x.size()
attended = self.attention(x)
x = self.norm1(attended + x)
x = self.do(x)
fedforward = self.ff(x)
x = self.norm2(fedforward + x)
x = self.do(x)
return x
class GTransformer(nn.Module):
"""
Transformer for generating text (character by character).
"""
def __init__(self, emb, heads, depth, seq_length, num_tokens, sparse=False, **kwargs):
"""
:param emb:
:param heads:
:param depth:
:param seq_length:
:param num_tokens:
:param sparse:
:param kwargs: Are passed to the sparse self attention
"""
super().__init__()
self.num_tokens = num_tokens
self.token_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=num_tokens)
self.pos_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=seq_length)
tblocks = []
for i in range(depth):
tblocks.append(
TransformerBlock(emb=emb, heads=heads, seq_length=seq_length, mask=True, sparse=sparse, **kwargs))
self.tblocks = nn.Sequential(*tblocks)
self.toprobs = nn.Linear(emb, num_tokens)
def forward(self, x):
"""
:param x: A batch by sequence length integer tensor of token indices.
:return: predicted log-probability vectors for each token based on the preceding tokens.
"""
tokens = self.token_embedding(x)
b, t, e = tokens.size()
positions = self.pos_embedding(torch.arange(t, device=d(x)))[None, :, :].expand(b, t, e)
x = tokens + positions
x = self.tblocks(x)
x = self.toprobs(x.view(b*t, e)).view(b, t, self.num_tokens)
return F.log_softmax(x, dim=2)
def forward_for_plot(self, x):
"""
:param x: A batch by sequence length integer tensor of token indices.
:return: predicted log-probability vectors for each token based on the preceding tokens.
"""
means, sigmas, values = [], [], []
tokens = self.token_embedding(x)
b, t, e = tokens.size()
positions = self.pos_embedding(torch.arange(t, device=d(x)))[None, :, :].expand(b, t, e)
x = tokens + positions
for tblock in self.tblocks:
if type(tblock.attention) is not nn.Sequential:
m, s, v = tblock.attention.hyper(x)
means.append(m)
sigmas.append(s)
values.append(v)
else:
xc = x.clone()
for layer in tblock.attention: # walk through the attention layers
if type(layer) == StridedSparseSelfAttention:
m, s, v = layer.hyper(xc)
means.append(m)
sigmas.append(s)
values.append(v)
xc = layer(xc)
x = tblock(x)
return means, sigmas, values
def enwik8(path, n_train=int(90e6), n_valid=int(5e6), n_test=int(5e6)):
"""
From https://github.com/openai/blocksparse/blob/master/examples/transformer/enwik8.py
:param path:
:param n_train:
:param n_valid:
:param n_test:
:return:
"""
X = np.fromstring(open(path).read(n_train + n_valid + n_test), dtype=np.uint8)
trX, vaX, teX = np.split(X, [n_train, n_train + n_valid])
return torch.from_numpy(trX), torch.from_numpy(vaX), torch.from_numpy(teX)
def go(arg):
util.makedirs('./transformer-plots/')
if arg.seed < 0:
seed = random.randint(0, 1000000)
print('random seed: ', seed)
else:
torch.manual_seed(arg.seed)
dv = 'cuda' if arg.cuda else 'cpu'
tbw = SummaryWriter(log_dir=arg.tb_dir)
# load the data
data_train, data_val, data_test = enwik8(arg.data)
data_test = data_test if arg.final else data_val
# create the model
if arg.model.startswith('sparse'):
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS, sparse=True, gadditional=arg.gadditional, radditional=arg.radditional,
region=arg.region, k=arg.k, min_sigma=arg.min_sigma, sigma_scale=arg.sigma_mult,
oned=(arg.model == 'sparse1d'), norm_method=arg.norm_method, clamp=arg.clamp)
elif arg.model == 'strided':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS, gadditional=arg.gadditional, radditional=arg.radditional,
region=arg.region, k=arg.k, min_sigma=arg.min_sigma, sigma_scale=arg.sigma_mult,
norm_method=arg.norm_method, clamp=arg.clamp, stride=arg.stride, type='strided')
elif arg.model == 'conv':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context, k=arg.kconv,
num_tokens=NUM_TOKENS, type='conv', norm_method=arg.norm_method)
elif arg.model == 'dense':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS)
elif arg.model == 'mixed':
model = GTransformer(emb=arg.embedding_size, heads=arg.num_heads, depth=arg.depth, seq_length=arg.context,
num_tokens=NUM_TOKENS, gadditional=arg.gadditional, radditional=arg.radditional,
region=arg.region, k=arg.k, min_sigma=arg.min_sigma, sigma_scale=arg.sigma_mult,
norm_method=arg.norm_method, clamp=arg.clamp, stride=arg.stride, type='mixed',
kconv=arg.kconv, mixture=arg.mixture)
else:
raise Exception(f'Model name unknown: {arg.model}')
if arg.cuda:
model.cuda()
opt = torch.optim.Adam(lr=arg.lr, params=model.parameters())
# training loop
for i in tqdm.trange(arg.num_batches):
if arg.lr_warmup > 0 and i < arg.lr_warmup:
lr = max( (arg.lr / arg.lr_warmup) * i, 1e-10)
opt.lr = lr
opt.zero_grad()
# sample batches
starts = torch.randint(size=(arg.batch_size, ), low=0, high=data_train.size(0) - arg.context - 1)
seqs_source = [data_train[start :start+arg.context ] for start in starts]
seqs_target = [data_train[start+1:start+arg.context+1] for start in starts]
source = torch.cat([s[None, :] for s in seqs_source ], dim=0).to(torch.long)
target = torch.cat([s[None, :] for s in seqs_target ], dim=0).to(torch.long)
if arg.cuda:
source, target = source.cuda(), target.cuda()
source, target = Variable(source), Variable(target)
output = model(source)
loss = F.nll_loss(output.transpose(2, 1), target, reduction='none')
loss = loss.mean()
tbw.add_scalar('transformer/train-loss', float(loss.item()) * LOG2E, i * arg.batch_size)
assert loss.item() == loss.item(), f'Loss is nan {loss}'
loss.backward()
assert not util.contains_nan(model.parameters()), f'Parameters have become NaN {model.parameters()}'
if arg.cuda and (i == 0 or random.random() < 0.0005): # occasionally print peak GPU memory usage
print(f'\nPeak gpu memory use is {torch.cuda.max_memory_cached() / 1e9:.2} Gb')
# clip gradients
if arg.gradient_clipping is not None:
nn.utils.clip_grad_norm_(model.parameters(), arg.gradient_clipping)
opt.step()
if (arg.model.startswith('sparse') or arg.model == 'strided' or arg.model == 'mixed') and arg.plot_every > 0 and i % arg.plot_every == 0:
shape = (arg.context, arg.context)
means, sigmas, values = model.forward_for_plot(source)
for t, (m, s, v) in enumerate(zip(means, sigmas, values)):
b, c, k, r = m.size()
m = m.view(b, c*k, r)
s = s.view(b, c*k, r)
v = v.reshape(b, c*k)
plt.figure(figsize=(7, 7))
plt.cla()
if arg.model == 'sparse1d':
ind = torch.arange(c, dtype=torch.float, device=d(m))[None, :, None].expand(b, c, k).reshape(b, c*k, 1)
m = torch.cat([ind, m], dim=2)
util.plot1d(m[0].data, s[0].data, v[0].data, shape=shape)
elif arg.model == 'strided' or arg.model == 'mixed':
r = arg.stride
ind = torch.arange(c, dtype=torch.float, device=d(m))
ind = (ind + 1) * r - 1
ind = ind[None, :, None].expand(b, c, k).reshape(b, c*k, 1)
m = torch.cat([ind, m], dim=2)
util.plot1d(m[0].data, s[0].data, v[0].data, shape=shape)
else:
util.plot(m, s, v, shape=shape)
plt.xlim((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))
plt.ylim((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))
plt.savefig(f'./transformer-plots/means{i:06}.{t}.pdf')
if i != 0 and (i % arg.test_every == 0 or i == arg.num_batches - 1):
upto = data_test.size(0) if i == arg.num_batches - 1 else arg.test_subset
data_sub = data_test[:upto]
with torch.no_grad():
bits, tot = 0.0, 0
batch = []
for current in range(data_sub.size(0)):
fr = max(0, current - arg.context)
to = current + 1
context = data_sub[fr:to].to(torch.long)
if context.size(0) < arg.context + 1:
pad = torch.zeros(size=(arg.context + 1 - context.size(0),), dtype=torch.long)
context = torch.cat([pad, context], dim=0)
assert context.size(0) == arg.context + 1
if arg.cuda:
context = context.cuda()
batch.append(context[None, :])
if len(batch) == arg.test_batchsize or current == data_sub.size(0) - 1:
b = len(batch)
tot += b
all = torch.cat(batch, dim=0)
source = all[:, :-1]
target = all[:, -1]
output = model(source)
lnprobs = output[torch.arange(b, device=dv), -1, target]
log2probs = lnprobs * LOG2E
bits += - log2probs.sum()
batch = []
assert tot == data_sub.size(0)
bits_per_byte = bits / data_sub.size(0)
print(f'epoch{i}: {bits_per_byte:.4} bits per byte')
# print(f'epoch{i}: {bits:.4} total bits')
tbw.add_scalar(f'transformer/eval-loss', bits_per_byte, i * arg.batch_size)
# Generate from seed
GENSIZE = 600
TEMP = 0.5
seedfr = random.randint(0, data_test.size(0) - arg.context)
input = data_test[seedfr:seedfr + arg.context].to(torch.long)
if arg.cuda:
input = input.cuda()
input = Variable(input)
print('[', end='', flush=True)
for c in input:
print(str(chr(c)), end='', flush=True)
print(']', end='', flush=True)
for _ in range(GENSIZE):
output = model(input[None, :])
c = sample(output[0, -1, :], TEMP)
print(str(chr(max(32, c))), end='', flush=True)
input = torch.cat([input[1:], c[None]], dim=0)
print()
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-N", "--num-batches",
dest="num_batches",
help="Number of batches to train on. Each batch contains randomly sampled subsequences of the data.",
default=1_000_000, type=int)
parser.add_argument("-m", "--model",
dest="model",
help="Which model to train (dense, sparse1d, sparse2d, conv, mixed).",
default='dense', type=str)
parser.add_argument("--mixture",
dest="mixture",
help="Character string describing the sequence of convotlutions (c) and strided attentions (s).",
default='cccs', type=str)
parser.add_argument("--norm",
dest="norm_method",
help="How to normalize the attention matrix (softmax, softplus, abs).",
default='softmax', type=str)
parser.add_argument("-b", "--batch-size",
dest="batch_size",
help="The batch size.",
default=64, type=int)
parser.add_argument("-k", "--num-points",
dest="k",
help="Number of index tuples per output in the sparse transformer.",
default=32, type=int)
parser.add_argument("--k-conv",
dest="kconv",
help="Convolution kernel size.",
default=3, type=int)
parser.add_argument("-a", "--gadditional",
dest="gadditional",
help="Number of additional points sampled globally",
default=8, type=int)
parser.add_argument("-A", "--radditional",
dest="radditional",
help="Number of additional points sampled locally",
default=8, type=int)
parser.add_argument("-R", "--region",
dest="region",
help="Size of the (square) region to use for local sampling.",
default=8, type=int)
parser.add_argument("-c", "--cuda", dest="cuda",
help="Whether to use cuda.",
action="store_true")
parser.add_argument("-D", "--data", dest="data",
help="Data file",
default=None)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.0001, type=float)
parser.add_argument("-S", "--sigma-mult",
dest="sigma_mult",
help="Sigma multiplier.",
default=0.1, type=float)
parser.add_argument("-M", "--min-sigma",
dest="min_sigma",
help="Minimum value of sigma.",
default=0.01, type=float)
parser.add_argument("-T", "--tb_dir", dest="tb_dir",
help="Data directory",
default=None)
parser.add_argument("-f", "--final", dest="final",
help="Whether to run on the real test set (if not included, the validation set is used).",
action="store_true")
parser.add_argument("-E", "--embedding", dest="embedding_size",
help="Size of the character embeddings.",
default=70, type=int)
parser.add_argument("-H", "--heads", dest="num_heads",
help="Number of attention heads.",
default=8, type=int)
parser.add_argument("-C", "--context", dest="context",
help="Length of the sequences extracted from the corpus (and the context used during inference).",
default=300, type=int)
parser.add_argument("-d", "--depth", dest="depth",
help="Depth of the network (nr of self-attention layers)",
default=4, type=int)
parser.add_argument("-r", "--random-seed",
dest="seed",
help="RNG seed. Negative for random",
default=1, type=int)
parser.add_argument("--stride",
dest="stride",
help="Stride length for the strided self attention",
default=32, type=int)
parser.add_argument("--test-every",
dest="test_every",
help="How many batches between tests.",
default=1000, type=int)
parser.add_argument("--plot-every",
dest="plot_every",
help="How many batches between plotting the sparse indices.",
default=1000, type=int)
parser.add_argument("--test-subset",
dest="test_subset",
help="A subset for the validation tests.",
default=100000, type=int)
parser.add_argument("--test-batchsize",
dest="test_batchsize",
help="Batch size for computing the validation loss.",
default=1024, type=int)
parser.add_argument("--gradient-clipping",
dest="gradient_clipping",
help="Gradient clipping.",
default=1.0, type=float)
parser.add_argument("--lr-warmup",
dest="lr_warmup",
help="Learning rate warmup.",
default=5000, type=int)
parser.add_argument("--clamp", dest="clamp",
help="Use the clamp operation to fit the parameters to the space of index tuples.",
action="store_true")
options = parser.parse_args()
print('OPTIONS ', options)
go(options)
|
[
"torch.nn.Dropout",
"torch.distributions.Categorical",
"argparse.ArgumentParser",
"sparse.util.inv",
"torch.bmm",
"torch.nn.Embedding",
"torch.cat",
"torch.randn",
"matplotlib.pyplot.figure",
"sparse.util.plot1d",
"torch.arange",
"torch.no_grad",
"torch.nn.functional.pad",
"sparse.util.plot",
"torch.ones",
"_context.sparse.batchmm",
"random.randint",
"torch.triu_indices",
"torch.nn.LayerNorm",
"torch.cuda.max_memory_cached",
"matplotlib.pyplot.cla",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.Linear",
"torch.nn.functional.log_softmax",
"sparse.util.contains_nan",
"_context.sparse.logsoftmax",
"util.d",
"_context.sparse.transform_sigmas",
"tqdm.trange",
"matplotlib.pyplot.ylim",
"torch.manual_seed",
"sparse.util.makedirs",
"torch.autograd.Variable",
"random.random",
"matplotlib.use",
"math.log2",
"sys.exit",
"torch.from_numpy",
"matplotlib.pyplot.xlim",
"_context.sparse.transform_means",
"torch.nn.ReLU",
"_context.sparse.ngenerate",
"sparse.util.flip",
"torch.nn.Sequential",
"_context.sparse.simple_normalize",
"torch.nn.functional.softmax",
"numpy.split",
"sparse.util.nduplicates",
"sparse.util.contains_inf",
"torch.nn.functional.softplus",
"matplotlib.pyplot.savefig",
"_context.sparse.densities"
] |
[((355, 369), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (362, 369), True, 'import matplotlib as mpl\n'), ((635, 652), 'math.log2', 'math.log2', (['math.e'], {}), '(math.e)\n', (644, 652), False, 'import random, tqdm, sys, math\n'), ((774, 813), 'torch.nn.functional.softmax', 'F.softmax', (['(lnprobs / temperature)'], {'dim': '(0)'}), '(lnprobs / temperature, dim=0)\n', (783, 813), True, 'import torch.nn.functional as F\n'), ((823, 842), 'torch.distributions.Categorical', 'dist.Categorical', (['p'], {}), '(p)\n', (839, 842), True, 'import torch.distributions as dist\n'), ((1150, 1208), 'torch.triu_indices', 'torch.triu_indices', (['h', 'w'], {'offset': '(0 if mask_diagonal else 1)'}), '(h, w, offset=0 if mask_diagonal else 1)\n', (1168, 1208), False, 'import torch\n'), ((41273, 41314), 'numpy.split', 'np.split', (['X', '[n_train, n_train + n_valid]'], {}), '(X, [n_train, n_train + n_valid])\n', (41281, 41314), True, 'import numpy as np\n'), ((41413, 41450), 'sparse.util.makedirs', 'util.makedirs', (['"""./transformer-plots/"""'], {}), "('./transformer-plots/')\n", (41426, 41450), False, 'from sparse import util\n'), ((41649, 41682), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'arg.tb_dir'}), '(log_dir=arg.tb_dir)\n', (41662, 41682), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((44009, 44037), 'tqdm.trange', 'tqdm.trange', (['arg.num_batches'], {}), '(arg.num_batches)\n', (44020, 44037), False, 'import random, tqdm, sys, math\n'), ((50009, 50025), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (50023, 50025), False, 'from argparse import ArgumentParser\n'), ((1891, 1930), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (1900, 1930), False, 'from torch import nn\n'), ((1956, 1995), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (1965, 1995), False, 'from torch import nn\n'), ((2020, 2059), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (2029, 2059), False, 'from torch import nn\n'), ((2087, 2114), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (2096, 2114), False, 'from torch import nn\n'), ((3416, 3550), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t, t)', 'relative_range': '(self.region, self.region)', 'cuda': 'x.is_cuda'}), '(means, self.gadditional, self.radditional, rng=(t, t),\n relative_range=(self.region, self.region), cuda=x.is_cuda)\n', (3432, 3550), False, 'from _context import sparse\n'), ((3600, 3618), 'sparse.util.flip', 'util.flip', (['indices'], {}), '(indices)\n', (3609, 3618), False, 'from sparse import util\n'), ((5910, 5954), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 's'], {}), '(indices, weights * dot, s)\n', (5927, 5954), False, 'from _context import sparse\n'), ((6081, 6138), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': '(t, t)', 'xmatrix': 'values'}), '(indices, dot, size=(t, t), xmatrix=values)\n', (6095, 6138), False, 'from _context import sparse\n'), ((7084, 7123), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (7093, 7123), False, 'from torch import nn\n'), ((7149, 7188), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (7158, 7188), False, 'from torch import nn\n'), ((7213, 7252), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (7222, 7252), False, 'from torch import nn\n'), ((7280, 7307), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (7289, 7307), False, 'from torch import nn\n'), ((8015, 8044), 'torch.cat', 'torch.cat', (['[x, coords]'], {'dim': '(2)'}), '([x, coords], dim=2)\n', (8024, 8044), False, 'import torch\n'), ((8414, 8435), 'sparse.util.inv', 'util.inv', (['diags'], {'mx': 't'}), '(diags, mx=t)\n', (8422, 8435), False, 'from sparse import util\n'), ((8724, 8740), 'sparse.util.flip', 'util.flip', (['means'], {}), '(means)\n', (8733, 8740), False, 'from sparse import util\n'), ((9410, 9544), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t, t)', 'relative_range': '(self.region, self.region)', 'cuda': 'x.is_cuda'}), '(means, self.gadditional, self.radditional, rng=(t, t),\n relative_range=(self.region, self.region), cuda=x.is_cuda)\n', (9426, 9544), False, 'from _context import sparse\n'), ((9595, 9613), 'sparse.util.flip', 'util.flip', (['indices'], {}), '(indices)\n', (9604, 9613), False, 'from sparse import util\n'), ((12473, 12530), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': '(t, t)', 'xmatrix': 'values'}), '(indices, dot, size=(t, t), xmatrix=values)\n', (12487, 12530), False, 'from _context import sparse\n'), ((13848, 13887), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (13857, 13887), False, 'from torch import nn\n'), ((13913, 13952), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (13922, 13952), False, 'from torch import nn\n'), ((13977, 14016), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (13986, 14016), False, 'from torch import nn\n'), ((14044, 14071), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (14053, 14071), False, 'from torch import nn\n'), ((14832, 14861), 'torch.cat', 'torch.cat', (['[x, coords]'], {'dim': '(2)'}), '([x, coords], dim=2)\n', (14841, 14861), False, 'import torch\n'), ((16190, 16310), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t,)', 'relative_range': '(self.region,)', 'cuda': 'x.is_cuda'}), '(means, self.gadditional, self.radditional, rng=(t,),\n relative_range=(self.region,), cuda=x.is_cuda)\n', (16206, 16310), False, 'from _context import sparse\n'), ((17389, 17421), 'torch.cat', 'torch.cat', (['[out, indices]'], {'dim': '(3)'}), '([out, indices], dim=3)\n', (17398, 17421), False, 'import torch\n'), ((19750, 19807), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': '(t, t)', 'xmatrix': 'values'}), '(indices, dot, size=(t, t), xmatrix=values)\n', (19764, 19807), False, 'from _context import sparse\n'), ((21206, 21233), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (21215, 21233), False, 'from torch import nn\n'), ((21259, 21286), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (21268, 21286), False, 'from torch import nn\n'), ((21312, 21339), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (21321, 21339), False, 'from torch import nn\n'), ((21367, 21392), 'torch.nn.Linear', 'nn.Linear', (['(s * heads)', 'emb'], {}), '(s * heads, emb)\n', (21376, 21392), False, 'from torch import nn\n'), ((22567, 22638), 'torch.cat', 'torch.cat', (['[x[:, selection, :], coords, summed[:, selection, :]]'], {'dim': '(2)'}), '([x[:, selection, :], coords, summed[:, selection, :]], dim=2)\n', (22576, 22638), False, 'import torch\n'), ((24329, 24465), '_context.sparse.ngenerate', 'sparse.ngenerate', (['means', 'self.gadditional', 'self.radditional'], {'rng': '(t,)', 'relative_range': '(self.region,)', 'cuda': 'x.is_cuda', 'epsilon': '(0.0001)'}), '(means, self.gadditional, self.radditional, rng=(t,),\n relative_range=(self.region,), cuda=x.is_cuda, epsilon=0.0001)\n', (24345, 24465), False, 'from _context import sparse\n'), ((25789, 25821), 'torch.cat', 'torch.cat', (['[out, indices]'], {'dim': '(3)'}), '([out, indices], dim=3)\n', (25798, 25821), False, 'import torch\n'), ((28985, 29040), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': 'size', 'xmatrix': 'values'}), '(indices, dot, size=size, xmatrix=values)\n', (28999, 29040), False, 'from _context import sparse\n'), ((30220, 30247), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (30229, 30247), False, 'from torch import nn\n'), ((30273, 30300), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (30282, 30300), False, 'from torch import nn\n'), ((30326, 30353), 'torch.nn.Linear', 'nn.Linear', (['s', 's'], {'bias': '(False)'}), '(s, s, bias=False)\n', (30335, 30353), False, 'from torch import nn\n'), ((30381, 30406), 'torch.nn.Linear', 'nn.Linear', (['(s * heads)', 'emb'], {}), '(s * heads, emb)\n', (30390, 30406), False, 'from torch import nn\n'), ((30629, 30667), 'torch.nn.functional.pad', 'F.pad', (['x', '[0, 0, 0, 0, k - 1, 0, 0, 0]'], {}), '(x, [0, 0, 0, 0, k - 1, 0, 0, 0])\n', (30634, 30667), True, 'import torch.nn.functional as F\n'), ((33286, 33341), '_context.sparse.batchmm', 'sparse.batchmm', (['indices', 'dot'], {'size': 'size', 'xmatrix': 'values'}), '(indices, dot, size=size, xmatrix=values)\n', (33300, 33341), False, 'from _context import sparse\n'), ((33958, 33997), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (33967, 33997), False, 'from torch import nn\n'), ((34023, 34062), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (34032, 34062), False, 'from torch import nn\n'), ((34087, 34126), 'torch.nn.Linear', 'nn.Linear', (['emb', '(emb * heads)'], {'bias': '(False)'}), '(emb, emb * heads, bias=False)\n', (34096, 34126), False, 'from torch import nn\n'), ((34154, 34181), 'torch.nn.Linear', 'nn.Linear', (['(heads * emb)', 'emb'], {}), '(heads * emb, emb)\n', (34163, 34181), False, 'from torch import nn\n'), ((35448, 35469), 'torch.nn.functional.softmax', 'F.softmax', (['dot'], {'dim': '(2)'}), '(dot, dim=2)\n', (35457, 35469), True, 'import torch.nn.functional as F\n'), ((37637, 37654), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['emb'], {}), '(emb)\n', (37649, 37654), False, 'from torch import nn\n'), ((37676, 37693), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['emb'], {}), '(emb)\n', (37688, 37693), False, 'from torch import nn\n'), ((37879, 37898), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (37889, 37898), False, 'from torch import nn\n'), ((38695, 38753), 'torch.nn.Embedding', 'nn.Embedding', ([], {'embedding_dim': 'emb', 'num_embeddings': 'num_tokens'}), '(embedding_dim=emb, num_embeddings=num_tokens)\n', (38707, 38753), False, 'from torch import nn\n'), ((38783, 38841), 'torch.nn.Embedding', 'nn.Embedding', ([], {'embedding_dim': 'emb', 'num_embeddings': 'seq_length'}), '(embedding_dim=emb, num_embeddings=seq_length)\n', (38795, 38841), False, 'from torch import nn\n'), ((39062, 39085), 'torch.nn.Sequential', 'nn.Sequential', (['*tblocks'], {}), '(*tblocks)\n', (39075, 39085), False, 'from torch import nn\n'), ((39110, 39136), 'torch.nn.Linear', 'nn.Linear', (['emb', 'num_tokens'], {}), '(emb, num_tokens)\n', (39119, 39136), False, 'from torch import nn\n'), ((39680, 39703), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(2)'}), '(x, dim=2)\n', (39693, 39703), True, 'import torch.nn.functional as F\n'), ((41326, 41347), 'torch.from_numpy', 'torch.from_numpy', (['trX'], {}), '(trX)\n', (41342, 41347), False, 'import torch\n'), ((41349, 41370), 'torch.from_numpy', 'torch.from_numpy', (['vaX'], {}), '(vaX)\n', (41365, 41370), False, 'import torch\n'), ((41372, 41393), 'torch.from_numpy', 'torch.from_numpy', (['teX'], {}), '(teX)\n', (41388, 41393), False, 'import torch\n'), ((41488, 41514), 'random.randint', 'random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (41502, 41514), False, 'import random, tqdm, sys, math\n'), ((41570, 41597), 'torch.manual_seed', 'torch.manual_seed', (['arg.seed'], {}), '(arg.seed)\n', (41587, 41597), False, 'import torch\n'), ((2278, 2297), 'torch.randn', 'torch.randn', (['(k, 2)'], {}), '((k, 2))\n', (2289, 2297), False, 'import torch\n'), ((2334, 2351), 'torch.randn', 'torch.randn', (['(k,)'], {}), '((k,))\n', (2345, 2351), False, 'import torch\n'), ((2394, 2410), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (2404, 2410), False, 'import torch\n'), ((2895, 2927), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {}), '(means, s)\n', (2917, 2927), False, 'from _context import sparse\n'), ((7476, 7492), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (7486, 7492), False, 'import torch\n'), ((7632, 7658), 'torch.nn.Linear', 'nn.Linear', (['(emb + 1)', 'hidden'], {}), '(emb + 1, hidden)\n', (7641, 7658), False, 'from torch import nn\n'), ((7660, 7669), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7667, 7669), False, 'from torch import nn\n'), ((7683, 7707), 'torch.nn.Linear', 'nn.Linear', (['hidden', '(k * 3)'], {}), '(hidden, k * 3)\n', (7692, 7707), False, 'from torch import nn\n'), ((8117, 8142), 'sparse.util.contains_nan', 'util.contains_nan', (['params'], {}), '(params)\n', (8134, 8142), False, 'from sparse import util\n'), ((8889, 8921), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {}), '(means, s)\n', (8911, 8921), False, 'from _context import sparse\n'), ((11991, 12013), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (12008, 12013), False, 'from sparse import util\n'), ((12310, 12332), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (12327, 12332), False, 'from sparse import util\n'), ((12690, 12712), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (12707, 12712), False, 'from sparse import util\n'), ((14240, 14256), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (14250, 14256), False, 'import torch\n'), ((14396, 14422), 'torch.nn.Linear', 'nn.Linear', (['(emb + 1)', 'hidden'], {}), '(emb + 1, hidden)\n', (14405, 14422), False, 'from torch import nn\n'), ((14424, 14433), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14431, 14433), False, 'from torch import nn\n'), ((14447, 14471), 'torch.nn.Linear', 'nn.Linear', (['hidden', '(k * 2)'], {}), '(hidden, k * 2)\n', (14456, 14471), False, 'from torch import nn\n'), ((14934, 14959), 'sparse.util.contains_nan', 'util.contains_nan', (['params'], {}), '(params)\n', (14951, 14959), False, 'from sparse import util\n'), ((15280, 15301), 'sparse.util.inv', 'util.inv', (['diags'], {'mx': 't'}), '(diags, mx=t)\n', (15288, 15301), False, 'from sparse import util\n'), ((15624, 15701), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {'method': "('clamp' if self.clamp else 'sigmoid')"}), "(means, s, method='clamp' if self.clamp else 'sigmoid')\n", (15646, 15701), False, 'from _context import sparse\n'), ((18948, 18970), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (18965, 18970), False, 'from sparse import util\n'), ((19067, 19089), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (19084, 19089), False, 'from sparse import util\n'), ((19311, 19386), '_context.sparse.simple_normalize', 'sparse.simple_normalize', (['indices', '(weights * dot)', 's'], {'method': 'self.norm_method'}), '(indices, weights * dot, s, method=self.norm_method)\n', (19334, 19386), False, 'from _context import sparse\n'), ((19469, 19491), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (19486, 19491), False, 'from sparse import util\n'), ((19587, 19609), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (19604, 19609), False, 'from sparse import util\n'), ((19967, 19989), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (19984, 19989), False, 'from sparse import util\n'), ((21561, 21577), 'torch.ones', 'torch.ones', (['(k,)'], {}), '((k,))\n', (21571, 21577), False, 'import torch\n'), ((21717, 21747), 'torch.nn.Linear', 'nn.Linear', (['(2 * emb + 1)', 'hidden'], {}), '(2 * emb + 1, hidden)\n', (21726, 21747), False, 'from torch import nn\n'), ((21749, 21758), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (21756, 21758), False, 'from torch import nn\n'), ((21772, 21796), 'torch.nn.Linear', 'nn.Linear', (['hidden', '(k * 2)'], {}), '(hidden, k * 2)\n', (21781, 21796), False, 'from torch import nn\n'), ((22712, 22737), 'sparse.util.contains_nan', 'util.contains_nan', (['params'], {}), '(params)\n', (22729, 22737), False, 'from sparse import util\n'), ((22869, 22894), 'sparse.util.contains_inf', 'util.contains_inf', (['params'], {}), '(params)\n', (22886, 22894), False, 'from sparse import util\n'), ((23204, 23225), 'sparse.util.inv', 'util.inv', (['diags'], {'mx': 't'}), '(diags, mx=t)\n', (23212, 23225), False, 'from sparse import util\n'), ((23550, 23627), '_context.sparse.transform_means', 'sparse.transform_means', (['means', 's'], {'method': "('clamp' if self.clamp else 'sigmoid')"}), "(means, s, method='clamp' if self.clamp else 'sigmoid')\n", (23572, 23627), False, 'from _context import sparse\n'), ((26012, 26038), 'sparse.util.contains_inf', 'util.contains_inf', (['weights'], {}), '(weights)\n', (26029, 26038), False, 'from sparse import util\n'), ((26148, 26174), 'sparse.util.contains_nan', 'util.contains_nan', (['weights'], {}), '(weights)\n', (26165, 26174), False, 'from sparse import util\n'), ((27832, 27854), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (27849, 27854), False, 'from sparse import util\n'), ((27948, 27970), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (27965, 27970), False, 'from sparse import util\n'), ((28192, 28270), '_context.sparse.simple_normalize', 'sparse.simple_normalize', (['indices', '(weights * dot)', 'size'], {'method': 'self.norm_method'}), '(indices, weights * dot, size, method=self.norm_method)\n', (28215, 28270), False, 'from _context import sparse\n'), ((28353, 28375), 'sparse.util.contains_inf', 'util.contains_inf', (['dot'], {}), '(dot)\n', (28370, 28375), False, 'from sparse import util\n'), ((29200, 29222), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (29217, 29222), False, 'from sparse import util\n'), ((32850, 32918), '_context.sparse.simple_normalize', 'sparse.simple_normalize', (['indices', 'dot', 'size'], {'method': 'self.norm_method'}), '(indices, dot, size, method=self.norm_method)\n', (32873, 32918), False, 'from _context import sparse\n'), ((33501, 33523), 'sparse.util.contains_nan', 'util.contains_nan', (['out'], {}), '(out)\n', (33518, 33523), False, 'from sparse import util\n'), ((35542, 35574), 'sparse.util.contains_nan', 'util.contains_nan', (['dot[:, 1:, :]'], {}), '(dot[:, 1:, :])\n', (35559, 35574), False, 'from sparse import util\n'), ((37740, 37776), 'torch.nn.Linear', 'nn.Linear', (['emb', '(ff_hidden_mult * emb)'], {}), '(emb, ff_hidden_mult * emb)\n', (37749, 37776), False, 'from torch import nn\n'), ((37790, 37799), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (37797, 37799), False, 'from torch import nn\n'), ((37813, 37849), 'torch.nn.Linear', 'nn.Linear', (['(ff_hidden_mult * emb)', 'emb'], {}), '(ff_hidden_mult * emb, emb)\n', (37822, 37849), False, 'from torch import nn\n'), ((44777, 44793), 'torch.autograd.Variable', 'Variable', (['source'], {}), '(source)\n', (44785, 44793), False, 'from torch.autograd import Variable\n'), ((44795, 44811), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (44803, 44811), False, 'from torch.autograd import Variable\n'), ((2955, 3015), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (2978, 3015), False, 'from _context import sparse\n'), ((3850, 3875), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (3866, 3875), False, 'from sparse import util\n'), ((3986, 4024), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (4002, 4024), False, 'from _context import sparse\n'), ((5831, 5881), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (5840, 5881), False, 'import torch\n'), ((8392, 8396), 'util.d', 'd', (['x'], {}), '(x)\n', (8393, 8396), False, 'from util import d\n'), ((8949, 9009), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (8972, 9009), False, 'from _context import sparse\n'), ((9845, 9870), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (9861, 9870), False, 'from sparse import util\n'), ((9981, 10019), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (9997, 10019), False, 'from _context import sparse\n'), ((11836, 11886), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (11845, 11886), False, 'import torch\n'), ((12177, 12221), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 's'], {}), '(indices, weights * dot, s)\n', (12194, 12221), False, 'from _context import sparse\n'), ((15227, 15231), 'util.d', 'd', (['x'], {}), '(x)\n', (15228, 15231), False, 'from util import d\n'), ((15564, 15581), 'torch.nn.functional.softplus', 'F.softplus', (['means'], {}), '(means)\n', (15574, 15581), True, 'import torch.nn.functional as F\n'), ((15729, 15789), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (15752, 15789), False, 'from _context import sparse\n'), ((16644, 16682), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (16660, 16682), False, 'from _context import sparse\n'), ((16760, 16785), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (16776, 16785), False, 'from sparse import util\n'), ((18862, 18912), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (18871, 18912), False, 'import torch\n'), ((22195, 22199), 'util.d', 'd', (['x'], {}), '(x)\n', (22196, 22199), False, 'from util import d\n'), ((23507, 23524), 'torch.nn.functional.softplus', 'F.softplus', (['means'], {}), '(means)\n', (23517, 23524), True, 'import torch.nn.functional as F\n'), ((23655, 23715), '_context.sparse.transform_sigmas', 'sparse.transform_sigmas', (['sigmas', 's'], {'min_sigma': 'self.min_sigma'}), '(sigmas, s, min_sigma=self.min_sigma)\n', (23678, 23715), False, 'from _context import sparse\n'), ((24063, 24067), 'util.d', 'd', (['x'], {}), '(x)\n', (24064, 24067), False, 'from util import d\n'), ((24799, 24837), '_context.sparse.densities', 'sparse.densities', (['indfl', 'means', 'sigmas'], {}), '(indfl, means, sigmas)\n', (24815, 24837), False, 'from _context import sparse\n'), ((24916, 24941), 'sparse.util.nduplicates', 'util.nduplicates', (['indices'], {}), '(indices)\n', (24932, 24941), False, 'from sparse import util\n'), ((27707, 27757), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (27716, 27757), False, 'import torch\n'), ((28486, 28508), 'sparse.util.contains_nan', 'util.contains_nan', (['dot'], {}), '(dot)\n', (28503, 28508), False, 'from sparse import util\n'), ((28910, 28920), 'sys.exit', 'sys.exit', ([], {}), '()\n', (28918, 28920), False, 'import random, tqdm, sys, math\n'), ((32361, 32411), 'torch.bmm', 'torch.bmm', (['squeries[:, None, :]', 'skeys[:, :, None]'], {}), '(squeries[:, None, :], skeys[:, :, None])\n', (32370, 32411), False, 'import torch\n'), ((35983, 36005), 'torch.bmm', 'torch.bmm', (['dot', 'values'], {}), '(dot, values)\n', (35992, 36005), False, 'import torch\n'), ((44518, 44569), 'torch.cat', 'torch.cat', (['[s[None, :] for s in seqs_source]'], {'dim': '(0)'}), '([s[None, :] for s in seqs_source], dim=0)\n', (44527, 44569), False, 'import torch\n'), ((44603, 44654), 'torch.cat', 'torch.cat', (['[s[None, :] for s in seqs_target]'], {'dim': '(0)'}), '([s[None, :] for s in seqs_target], dim=0)\n', (44612, 44654), False, 'import torch\n'), ((46119, 46145), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (46129, 46145), True, 'import matplotlib.pyplot as plt\n'), ((46162, 46171), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (46169, 46171), True, 'import matplotlib.pyplot as plt\n'), ((46993, 47062), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN))'], {}), '((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))\n', (47001, 47062), True, 'import matplotlib.pyplot as plt\n'), ((47079, 47148), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN))'], {}), '((-MARGIN * (shape[0] - 1), (shape[0] - 1) * (1.0 + MARGIN)))\n', (47087, 47148), True, 'import matplotlib.pyplot as plt\n'), ((47166, 47221), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""./transformer-plots/means{i:06}.{t}.pdf"""'], {}), "(f'./transformer-plots/means{i:06}.{t}.pdf')\n", (47177, 47221), True, 'import matplotlib.pyplot as plt\n'), ((47445, 47460), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (47458, 47460), False, 'import torch\n'), ((49418, 49433), 'torch.autograd.Variable', 'Variable', (['input'], {}), '(input)\n', (49426, 49433), False, 'from torch.autograd import Variable\n'), ((7932, 7936), 'util.d', 'd', (['x'], {}), '(x)\n', (7933, 7936), False, 'from util import d\n'), ((14749, 14753), 'util.d', 'd', (['x'], {}), '(x)\n', (14750, 14753), False, 'from util import d\n'), ((19228, 19272), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 's'], {}), '(indices, weights * dot, s)\n', (19245, 19272), False, 'from _context import sparse\n'), ((22363, 22367), 'util.d', 'd', (['x'], {}), '(x)\n', (22364, 22367), False, 'from util import d\n'), ((28106, 28153), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', '(weights * dot)', 'size'], {}), '(indices, weights * dot, size)\n', (28123, 28153), False, 'from _context import sparse\n'), ((32774, 32811), '_context.sparse.logsoftmax', 'sparse.logsoftmax', (['indices', 'dot', 'size'], {}), '(indices, dot, size)\n', (32791, 32811), False, 'from _context import sparse\n'), ((45282, 45297), 'random.random', 'random.random', ([], {}), '()\n', (45295, 45297), False, 'import random, tqdm, sys, math\n'), ((46365, 46391), 'torch.cat', 'torch.cat', (['[ind, m]'], {'dim': '(2)'}), '([ind, m], dim=2)\n', (46374, 46391), False, 'import torch\n'), ((46412, 46469), 'sparse.util.plot1d', 'util.plot1d', (['m[0].data', 's[0].data', 'v[0].data'], {'shape': 'shape'}), '(m[0].data, s[0].data, v[0].data, shape=shape)\n', (46423, 46469), False, 'from sparse import util\n'), ((49865, 49903), 'torch.cat', 'torch.cat', (['[input[1:], c[None]]'], {'dim': '(0)'}), '([input[1:], c[None]], dim=0)\n', (49874, 49903), False, 'import torch\n'), ((22511, 22514), 'util.d', 'd', ([], {}), '()\n', (22512, 22514), False, 'from util import d\n'), ((46796, 46822), 'torch.cat', 'torch.cat', (['[ind, m]'], {'dim': '(2)'}), '([ind, m], dim=2)\n', (46805, 46822), False, 'import torch\n'), ((46843, 46900), 'sparse.util.plot1d', 'util.plot1d', (['m[0].data', 's[0].data', 'v[0].data'], {'shape': 'shape'}), '(m[0].data, s[0].data, v[0].data, shape=shape)\n', (46854, 46900), False, 'from sparse import util\n'), ((46944, 46975), 'sparse.util.plot', 'util.plot', (['m', 's', 'v'], {'shape': 'shape'}), '(m, s, v, shape=shape)\n', (46953, 46975), False, 'from sparse import util\n'), ((47931, 47963), 'torch.cat', 'torch.cat', (['[pad, context]'], {'dim': '(0)'}), '([pad, context], dim=0)\n', (47940, 47963), False, 'import torch\n'), ((48364, 48387), 'torch.cat', 'torch.cat', (['batch'], {'dim': '(0)'}), '(batch, dim=0)\n', (48373, 48387), False, 'import torch\n'), ((16574, 16584), 'util.d', 'd', (['indices'], {}), '(indices)\n', (16575, 16584), False, 'from util import d\n'), ((17318, 17328), 'util.d', 'd', (['indices'], {}), '(indices)\n', (17319, 17328), False, 'from util import d\n'), ((31778, 31782), 'util.d', 'd', (['x'], {}), '(x)\n', (31779, 31782), False, 'from util import d\n'), ((37526, 37548), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (37539, 37548), False, 'from torch import nn\n'), ((45398, 45428), 'torch.cuda.max_memory_cached', 'torch.cuda.max_memory_cached', ([], {}), '()\n', (45426, 45428), False, 'import torch\n'), ((39499, 39503), 'util.d', 'd', (['x'], {}), '(x)\n', (39500, 39503), False, 'from util import d\n'), ((40119, 40123), 'util.d', 'd', (['x'], {}), '(x)\n', (40120, 40123), False, 'from util import d\n'), ((46642, 46646), 'util.d', 'd', (['m'], {}), '(m)\n', (46643, 46646), False, 'from util import d\n'), ((48567, 48593), 'torch.arange', 'torch.arange', (['b'], {'device': 'dv'}), '(b, device=dv)\n', (48579, 48593), False, 'import torch\n'), ((31669, 31673), 'util.d', 'd', (['x'], {}), '(x)\n', (31670, 31673), False, 'from util import d\n'), ((5661, 5665), 'util.d', 'd', (['x'], {}), '(x)\n', (5662, 5665), False, 'from util import d\n'), ((11662, 11666), 'util.d', 'd', (['x'], {}), '(x)\n', (11663, 11666), False, 'from util import d\n'), ((18688, 18692), 'util.d', 'd', (['x'], {}), '(x)\n', (18689, 18692), False, 'from util import d\n'), ((27531, 27535), 'util.d', 'd', (['x'], {}), '(x)\n', (27532, 27535), False, 'from util import d\n'), ((32189, 32193), 'util.d', 'd', (['x'], {}), '(x)\n', (32190, 32193), False, 'from util import d\n'), ((46285, 46289), 'util.d', 'd', (['m'], {}), '(m)\n', (46286, 46289), False, 'from util import d\n')]
|
"""Unit tests for instrupy.radiometer_model.
References: [1] Chapter 6,7 in "Microwave Radar and Radiometric Remote Sensing," <NAME> , <NAME> 2014
@TODO Include rectangular antenna tests
"""
import unittest
import json
import numpy as np
import sys, os
from instrupy.radiometer_model import PredetectionSectionParams, SystemParams
from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, \
BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, \
ScanTech, FixedScan, CrossTrackScan, ConicalScan
from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver
class TestTotalPowerRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
# See [1] Section 7-3.1 for the source of some of the receiver specs specified below. Section 7.5 lists a normalaized gain variation specs of 10^-2.
cls.tpr_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 100e-3,' \
'"bandwidth": 10e6,' \
'"@id": 121}'
cls.tpr_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 200,' \
'"predetectionGainVariation": 2000000,' \
'"integrationTime": 100e-3,' \
'"bandwidth": 10e6,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the total power radiometer system.
"""
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys1_json)
self.assertIsInstance(o, TotalPowerRadiometerSystem)
self.assertEqual(o._id, 121)
self.assertEqual(o._type, "TotalPowerRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 100e-3)
self.assertEqual(o.bandwidth, 10e6)
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys2_json)
self.assertIsInstance(o, TotalPowerRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "TotalPowerRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 200)
self.assertEqual(o.predetectionGainVariation, 2000000)
self.assertEqual(o.integrationTime, 100e-3)
self.assertEqual(o.bandwidth, 10e6)
def test_to_dict(self):
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 0.1, 'bandwidth': 10000000.0, '@id': 121, '@type': 'TOTAL_POWER'}
)
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 200.0,
'predetectionGainVariation': 2000000.0, 'integrationTime': 0.1, 'bandwidth': 10000000.0, '@id': None, '@type': 'TOTAL_POWER'}
)
def test_compute_integration_time(self):
self.assertEqual(TotalPowerRadiometerSystem.compute_integration_time(td=1.5, integration_time_spec=0.5), 0.5)
self.assertEqual(TotalPowerRadiometerSystem.compute_integration_time(td=1.5, integration_time_spec=2), 1.5)
self.assertEqual(TotalPowerRadiometerSystem.compute_integration_time(td=1.5, integration_time_spec=None), 1.5)
def test_compute_predetection_sec_params(self):
x = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=10e6, tlLoss=0.5, tlPhyTemp=290,
rfAmpGain=30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10, mixerGainVariation=2, ifAmpGainVariation=10,
rfAmpInpNoiseTemp=200, mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)
self.assertIsInstance(x, PredetectionSectionParams)
self.assertAlmostEqual(x.G, 177827941.00389218)
self.assertAlmostEqual(x.G_p, 180510851.84124476)
self.assertAlmostEqual(x.G_m, 175171746.5823525)
self.assertAlmostEqual(x.T_REC_q, 261.1355769549698)
self.assertAlmostEqual(x.B, 10000000.0)
x = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=15e6, predetectionGain=90, predetectionGainVariation=10000000, predetectionInpNoiseTemp=300)
self.assertIsInstance(x, PredetectionSectionParams)
self.assertAlmostEqual(x.G, 1000000000)
self.assertAlmostEqual(x.G_p, 1005000000)
self.assertAlmostEqual(x.G_m, 995000000)
self.assertAlmostEqual(x.T_REC_q, 300)
self.assertAlmostEqual(x.B, 15000000.0)
# no RF amplifier
x = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=10e6, tlLoss=0.5, tlPhyTemp=290,
rfAmpGain=1, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=0, mixerGainVariation=2, ifAmpGainVariation=10,
rfAmpInpNoiseTemp=0, mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)
self.assertIsInstance(x, PredetectionSectionParams)
self.assertAlmostEqual(x.G, 223872.1138568339)
self.assertAlmostEqual(x.G_p, 226119.10297269153)
self.assertAlmostEqual(x.G_m, 221636.34492551928)
self.assertAlmostEqual(x.T_REC_q, 1104.9756026018772)
self.assertAlmostEqual(x.B, 10000000.0)
def test_compute_system_params(self):
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 270})
pd_sec_params = TotalPowerRadiometerSystem.compute_predetection_sec_params(predetectionBandwidth=10e6, tlLoss=0.5, tlPhyTemp=290,
rfAmpGain=30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10, mixerGainVariation=2, ifAmpGainVariation=10,
rfAmpInpNoiseTemp=200, mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)
G = 180000000
pd_sec_params = PredetectionSectionParams(G=G, G_p=G+0.01*G, G_m=G-0.01*G, T_REC_q=260, B=10e6)
x = TotalPowerRadiometerSystem.compute_system_params(antenna, pd_sec_params, integratorVoltageGain=1000, T_A_q=290)
self.assertIsInstance(x, SystemParams)
self.assertAlmostEqual(x.G_s_delta/x.G_s_bar, 0.02)
self.assertAlmostEqual(x.T_A, 286)
self.assertAlmostEqual(x.T_SYS, 546)
def test_compute_radiometric_resolution(self):
# system 1
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 270})
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys1_json) # note that these is 100ms integration time specification
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 16.676630237262927)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=600), 23.886384796495147)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=500e-3, antenna=antenna, T_A_q=300), 16.676630237262927)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=50e-3, antenna=antenna, T_A_q=300), 16.685867420640534)
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 350})
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 17.15728054121174)
antenna = Antenna.from_dict({"radiationEfficiency": 0.5, "phyTemp": 270})
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 16.406264441291718) # reduced radiantion-efficiency appears to make the radiometer more sensitive
# system 2
o = TotalPowerRadiometerSystem.from_json(self.tpr_sys2_json) # note that these is 100ms integration time specification
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 270})
self.assertAlmostEqual(o.compute_radiometric_resolution(td=200e-3, antenna=antenna, T_A_q=300), 4.976310348842347)
class TestUnbalancedDikeRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.udr_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"dickeSwitchOutputNoiseTemperature": 90,' \
'"referenceTemperature": 300,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"@id": "abc"}'
# See Section 7-6, end of Pg. 282.
cls.udr_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 700,' \
'"predetectionGainVariation": 1995262.314968883,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"referenceTemperature": 300,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the unbalanced Dicke radiometer system.
"""
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys1_json)
self.assertIsInstance(o, UnbalancedDikeRadiometerSystem)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "UnbalancedDikeRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.dickeSwitchOutputNoiseTemperature, 90)
self.assertEqual(o.referenceTemperature, 300)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json)
self.assertIsInstance(o, UnbalancedDikeRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "UnbalancedDikeRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertIsNone(o.dickeSwitchOutputNoiseTemperature)
self.assertEqual(o.referenceTemperature, 300)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 700)
self.assertEqual(o.predetectionGainVariation, 1995262.314968883)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
def test_to_dict(self):
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'dickeSwitchOutputNoiseTemperature':90.0, 'referenceTemperature':300.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': "abc", '@type': 'UNBALANCED_DICKE'}
)
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'dickeSwitchOutputNoiseTemperature':None, 'referenceTemperature':300.0, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0,
'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'UNBALANCED_DICKE'}
)
def test_compute_radiometric_resolution(self):
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 300})
#################################################### System 1 ####################################################
############# Test with T_A equal to the reference temperature #############
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys1_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.13022711539099396) # note that these is 1s integration time specification
#################################################### System 2 ####################################################
############# See Section 7-6, end of Pg. 282. for truth values for the below calculation. #############
############# Test with T_A equal to the reference temperature
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.2)
# Compare with total-power radiometer
# Initialize a total-power radiometer with the same specifications. Note that however the predetection noise temperature shall be lower
# for a total-power radiometer since it does not include the Dicke switch.
o = TotalPowerRadiometerSystem.from_json(self.udr_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 10.000499987500632)
############# Test with T_A not equal to the reference temperature
o = UnbalancedDikeRadiometerSystem.from_json(self.udr_sys2_json) # note that these is 1s integration time specification
antenna = Antenna.from_dict({"radiationEfficiency": 1, "phyTemp": 300}) # setting efficiency to 100% to remove effect of antenna physical temperature
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 3.0049625621627984)
# Compare with total-power radiometer
# Initialize a total-power radiometer with the same specifications. Note that however the predetection noise temperature shall be lower
# for a total-power radiometer since it does not include the Dicke switch.
o = TotalPowerRadiometerSystem.from_json(self.udr_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 7.000349991250442)
class TestBalancedDikeRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bdr_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"dickeSwitchOutputNoiseTemperature": 90,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"@id": "abc"}'
# See Section 7-6, end of Pg. 282.
cls.bdr_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 700,' \
'"predetectionGainVariation": 1995262.314968883,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the balanced Dicke radiometer system.
"""
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys1_json)
self.assertIsInstance(o, BalancedDikeRadiometerSystem)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "BalancedDikeRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.dickeSwitchOutputNoiseTemperature, 90)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys2_json)
self.assertIsInstance(o, BalancedDikeRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "BalancedDikeRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertIsNone(o.dickeSwitchOutputNoiseTemperature)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 700)
self.assertEqual(o.predetectionGainVariation, 1995262.314968883)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
def test_to_dict(self):
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'dickeSwitchOutputNoiseTemperature':90.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': "abc", '@type': 'BALANCED_DICKE'}
)
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'dickeSwitchOutputNoiseTemperature':None, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0,
'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'BALANCED_DICKE'}
)
def test_compute_radiometric_resolution(self):
antenna = Antenna.from_dict({"radiationEfficiency": 1, "phyTemp": 300}) # setting efficiency to 100% to remove effect of antenna physical temperature
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys1_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 0.07022711539099395) # note that these is 1s integration time specification
############# Test with T_A not equal to the reference temperature
o = BalancedDikeRadiometerSystem.from_json(self.bdr_sys2_json) # note that these is 1s integration time specification
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=0), 0.14)
class TestNoiseAddingRadiometerSystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.nar_sys1_json = '{"tlLoss": 0.5,' \
'"tlPhyTemp": 290,' \
'"rfAmpGain": 30,' \
'"rfAmpInpNoiseTemp": 200,' \
'"rfAmpGainVariation": 10,' \
'"mixerGain": 23,' \
'"mixerInpNoiseTemp": 1200,' \
'"mixerGainVariation": 2,' \
'"ifAmpGain": 30,' \
'"ifAmpInputNoiseTemp": 100,' \
'"ifAmpGainVariation": 10,' \
'"excessNoiseTemperature": 1000,' \
'"integratorVoltageGain": 1,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"@id": "abc"}'
# See Section 7-6, end of Pg. 282.
cls.nar_sys2_json = '{"predetectionGain": 83,' \
'"predetectionInpNoiseTemp": 700,' \
'"predetectionGainVariation": 1995262.314968883,' \
'"excessNoiseTemperature": 10000,' \
'"integrationTime": 1,' \
'"bandwidth": 100e6,' \
'"integratorVoltageGain": 1 }'
def test_from_json(self):
""" Test typical initialization of the noise-adding radiometer system.
"""
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys1_json)
self.assertIsInstance(o, NoiseAddingRadiometerSystem)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "NoiseAddingRadiometerSystem")
self.assertEqual(o.tlLoss, 0.5)
self.assertEqual(o.tlPhyTemp, 290)
self.assertEqual(o.rfAmpGain, 30)
self.assertEqual(o.rfAmpInpNoiseTemp, 200)
self.assertEqual(o.rfAmpGainVariation, 10)
self.assertEqual(o.mixerGain, 23)
self.assertEqual(o.mixerInpNoiseTemp, 1200)
self.assertEqual(o.mixerGainVariation, 2)
self.assertEqual(o.ifAmpGain, 30)
self.assertEqual(o.ifAmpInputNoiseTemp, 100)
self.assertEqual(o.ifAmpGainVariation, 10)
self.assertEqual(o.excessNoiseTemperature, 1000)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertIsNone(o.predetectionGain)
self.assertIsNone(o.predetectionInpNoiseTemp)
self.assertIsNone(o.predetectionGainVariation)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys2_json)
self.assertIsInstance(o, NoiseAddingRadiometerSystem)
self.assertIsNone(o._id)
self.assertEqual(o._type, "NoiseAddingRadiometerSystem")
self.assertIsNone(o.tlLoss)
self.assertIsNone(o.tlPhyTemp)
self.assertIsNone(o.rfAmpGain)
self.assertIsNone(o.rfAmpInpNoiseTemp)
self.assertIsNone(o.rfAmpGainVariation)
self.assertIsNone(o.mixerGain)
self.assertIsNone(o.mixerInpNoiseTemp)
self.assertIsNone(o.mixerGainVariation)
self.assertIsNone(o.ifAmpGain)
self.assertIsNone(o.ifAmpInputNoiseTemp)
self.assertIsNone(o.ifAmpGainVariation)
self.assertEqual(o.excessNoiseTemperature, 10000)
self.assertEqual(o.integratorVoltageGain, 1)
self.assertEqual(o.predetectionGain, 83)
self.assertEqual(o.predetectionInpNoiseTemp, 700)
self.assertEqual(o.predetectionGainVariation, 1995262.314968883)
self.assertEqual(o.integrationTime, 1)
self.assertEqual(o.bandwidth, 100e6)
def test_to_dict(self):
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys1_json)
self.assertEqual(o.to_dict(), {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0, 'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0,
'ifAmpGainVariation': 10.0, 'excessNoiseTemperature':1000.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': "abc", '@type': 'NOISE_ADDING'}
)
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys2_json)
self.assertEqual(o.to_dict(), {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None, 'ifAmpGain': None, 'ifAmpInputNoiseTemp': None,
'ifAmpGainVariation': None, 'excessNoiseTemperature':10000.0, 'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0,
'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'NOISE_ADDING'}
)
def test_compute_radiometric_resolution(self):
antenna = Antenna.from_dict({"radiationEfficiency": 0.8, "phyTemp": 300})
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys1_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.23817636968082867) # note that these is 1s integration time specification
o = NoiseAddingRadiometerSystem.from_json(self.nar_sys2_json)
self.assertAlmostEqual(o.compute_radiometric_resolution(td=1.5, antenna=antenna, T_A_q=300), 0.24) # note that these is 1s integration time specification
class TestFixedScan(unittest.TestCase):
def test_from_json(self):
""" Test typical initialization of the FixedScan object
"""
o = FixedScan.from_json('{"@id": 123}')
self.assertIsInstance(o, FixedScan)
self.assertEqual(o._id, 123)
self.assertEqual(o._type, "FixedScan")
o = FixedScan.from_json('{"@id": "abc"}')
self.assertIsInstance(o, FixedScan)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "FixedScan")
o = FixedScan.from_json('{}')
self.assertIsInstance(o, FixedScan)
self.assertIsNone(o._id)
self.assertEqual(o._type, "FixedScan")
def test_to_dict(self):
o = FixedScan.from_json('{"@id": 123}')
self.assertEqual(o.to_dict(), {'@id': 123, '@type': 'FIXED'})
o = FixedScan.from_json('{"@id": "abc"}')
self.assertEqual(o.to_dict(), {'@id': "abc", '@type': 'FIXED'})
o = FixedScan.from_json('{}')
self.assertEqual(o.to_dict(), {'@id': None, '@type': 'FIXED'})
def test_compute_instru_field_of_view(self):
o = FixedScan.from_json('{"@id": "abc"}')
instru_orientation = Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK","sideLookAngle":10})
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
instru_fov_sph_geom = antenna_fov_sph_geom
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 10, "angleWidth": 20})
instru_fov_sph_geom = antenna_fov_sph_geom
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
def test_compute_dwell_time_per_ground_pixel(self):
o = FixedScan.from_json('{"@id": 123}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=1000, sat_speed_kmps=7.8), 0.1282051282051282)
def test_compute_swath_width(self):
o = FixedScan.from_json('{"@id": 123}')
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
# using approximate swath formula as the truth data
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 30*np.pi/180*500, delta=25)
self.assertAlmostEqual(o.compute_swath_width(alt_km=700, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 30*np.pi/180*700, delta=25)
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=15, antenna_fov_sph_geom=antenna_fov_sph_geom), 30*np.pi/180*(500/np.cos(np.deg2rad(15))), delta=25)
class TestCrossTrackScan(unittest.TestCase):
def test_from_json(self):
""" Test typical initialization of the CrossTrackScan object
"""
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
self.assertIsInstance(o, CrossTrackScan)
self.assertEqual(o._id, 123)
self.assertEqual(o._type, "CrossTrackScan")
self.assertEqual(o.scanWidth, 120)
self.assertEqual(o.interScanOverheadTime, 1e-3)
def test_to_dict(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
self.assertEqual(o.to_dict(), {'@id': 123, '@type': 'CROSS_TRACK', "scanWidth": 120.0, "interScanOverheadTime": 0.001})
def test_compute_instru_field_of_view(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
instru_orientation = Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK","sideLookAngle":10})
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
instru_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 30, "angleWidth": 150})
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 15, "angleWidth": 60})
instru_fov_sph_geom = SphericalGeometry.from_dict({"shape": "RECTANGULAR", "angleHeight": 15, "angleWidth": 180})
self.assertEqual(o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation), ViewGeometry(orien=instru_orientation, sph_geom=instru_fov_sph_geom))
def test_compute_dwell_time_per_ground_pixel(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021334188034188035)
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=10000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of double along-track pixel resolution
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=8), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of cross-track iFOV
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 10e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021034188034188037)
def test_compute_swath_width(self):
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 20, "interScanOverheadTime": 1e-3}')
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 1})
# using approximate swath formula as the truth data
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 20*np.pi/180*500, delta=25)
self.assertAlmostEqual(o.compute_swath_width(alt_km=700, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 20*np.pi/180*700, delta=25)
o = CrossTrackScan.from_json('{"@id": 123, "scanWidth": 60, "interScanOverheadTime": 1e-3}')
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0, antenna_fov_sph_geom=antenna_fov_sph_geom), 60*np.pi/180*500, delta=75)
class TestConicalScan(unittest.TestCase):
def test_from_json(self):
""" Test typical initialization of the ConicalScan object
"""
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}')
self.assertIsInstance(o, ConicalScan)
self.assertEqual(o._id, "abc")
self.assertEqual(o._type, "ConicalScan")
self.assertEqual(o.offNadirAngle, 30)
self.assertEqual(o.clockAngleRange, 60)
self.assertEqual(o.interScanOverheadTime, 1e-3)
def test_to_dict(self):
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}')
self.assertEqual(o.to_dict(), {'@id': "abc", '@type': 'CONICAL', "offNadirAngle": 30.0, "clockAngleRange": 60.0, "interScanOverheadTime": 0.001})
def test_compute_instru_field_of_view(self):
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}')
instru_orientation = Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK","sideLookAngle":10})
antenna_fov_sph_geom = SphericalGeometry.from_dict({"shape": "CIRCULAR", "diameter": 30})
with self.assertRaises(NotImplementedError):
o.compute_instru_field_of_view(antenna_fov_sph_geom=antenna_fov_sph_geom, instru_orientation=instru_orientation)
def test_compute_dwell_time_per_ground_pixel(self):
# results are the same as that of the CrossTrackScan
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021334188034188035)
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=10000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of double along-track pixel resolution
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=8), 2*0.021334188034188035, places=4) # dwell time should be around doubled in case of cross-track iFOV
o = CrossTrackScan.from_json('{"scanWidth": 120, "interScanOverheadTime": 10e-3}')
self.assertAlmostEqual(o.compute_dwell_time_per_ground_pixel(res_AT_m=5000, sat_speed_kmps=7.8, iFOV_CT_deg=4), 0.021034188034188037)
def test_compute_swath_width(self):
o = ConicalScan.from_json('{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}')
# using approximate swath formula as the truth data
self.assertAlmostEqual(o.compute_swath_width(alt_km=500, instru_look_angle_deg=0), 612.7169711748869)
self.assertAlmostEqual(o.compute_swath_width(alt_km=700, instru_look_angle_deg=0), 862.5336432436297)
with self.assertRaises(Exception):
o.compute_swath_width(alt_km=500, instru_look_angle_deg=30) # instrument look angle is not 0 degrees
class TestRadiometerModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.radio1_json = '{"@type": "Radiometer", "name": "ray1", "mass": 50, "volume": 3, "power": 10,' \
' "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},' \
' "bitsPerPixel": 16,' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 0.8, "phyTemp": 300},' \
' "system": {"tlLoss": 0.5, "tlPhyTemp": 290, ' \
' "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200, ' \
' "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200,' \
' "mixerGainVariation": 2, "ifAmpGain": 30, "ifAmpInputNoiseTemp": 100,' \
' "ifAmpGainVariation": 10, "integratorVoltageGain": 1, "integrationTime": 100e-3,' \
' "bandwidth": 10e6, "@type": "TOTAL_POWER"},' \
' "scan": {"@type": "FIXED"},' \
' "targetBrightnessTemp": 345' \
'}'
cls.radio2_json = '{"@type": "Radiometer", "name": "ray2", "mass": 50, ' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "RECTANGULAR", "height": 1, "width": 1, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 0.75, "phyTemp": 300},' \
' "system": { "predetectionGain": 83, "predetectionInpNoiseTemp": 700, ' \
' "predetectionGainVariation": 1995262.314968883, "integrationTime": 1, ' \
' "bandwidth": 100e6, "referenceTemperature": 300, "integratorVoltageGain": 1,' \
' "@type": "UNBALANCED_DICKE"},' \
' "scan": {"@type": "CROSS_TRACK", "scanWidth": 120, "interScanOverheadTime": 1e-3},' \
' "targetBrightnessTemp": 301' \
'}'
cls.radio3_json = '{"@type": "Radiometer", "@id": "ray3",' \
' "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"},' \
' "bitsPerPixel": 16,' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "CIRCULAR", "diameter": 3.5, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 1, "phyTemp": 300},' \
' "system": { "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,' \
' "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,' \
' "ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "dickeSwitchOutputNoiseTemperature": 90,' \
' "integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "BALANCED_DICKE"},' \
' "scan": {"@type": "CONICAL", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3},' \
' "targetBrightnessTemp": 295' \
'}'
cls.radio4_json = '{"@type": "Radiometer", "@id": "ray4",' \
' "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-30},' \
' "operatingFrequency": 1.25e9,' \
' "antenna": {"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM",' \
' "radiationEfficiency": 1, "phyTemp": 300},' \
' "system": { "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,' \
' "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,' \
' "ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "excessNoiseTemperature": 1000,' \
' "integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "NOISE_ADDING"},' \
' "scan": {"@type": "FIXED"},' \
' "targetBrightnessTemp": 295' \
'}'
def test_from_json(self):
""" Test typical initializations of the RadiometerModel object
"""
o = RadiometerModel.from_json(self.radio1_json)
self.assertIsInstance(o, RadiometerModel)
self.assertIsNotNone(o._id)
self.assertEqual(o.name, "ray1")
self.assertEqual(o.mass, 50)
self.assertEqual(o.volume, 3)
self.assertEqual(o.power, 10)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}))
self.assertEqual(o.bitsPerPixel, 16)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 0.8, "phyTemp": 300}))
self.assertEqual(o.system, TotalPowerRadiometerSystem.from_dict({"tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200, "rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,
"ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10,
"integratorVoltageGain": 1, "integrationTime": 100e-3, "bandwidth": 10e6}))
self.assertEqual(o.scan, FixedScan.from_dict({}))
self.assertEqual(o.targetBrightnessTemp, 345)
o = RadiometerModel.from_json(self.radio2_json)
self.assertIsInstance(o, RadiometerModel)
self.assertIsNotNone(o._id)
self.assertEqual(o.name, "ray2")
self.assertEqual(o.mass, 50)
self.assertIsNone(o.volume)
self.assertIsNone(o.power)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}))
self.assertIsNone(o.bitsPerPixel)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "RECTANGULAR", "height": 1, "width":1, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 0.75, "phyTemp": 300}))
self.assertEqual(o.system, UnbalancedDikeRadiometerSystem.from_dict({ "predetectionGain": 83, "predetectionInpNoiseTemp": 700,
"predetectionGainVariation": 1995262.314968883, "integrationTime": 1,
"bandwidth": 100e6, "referenceTemperature": 300, "integratorVoltageGain": 1,
"@type": "UNBALANCED_DICKE"}))
self.assertEqual(o.scan, CrossTrackScan.from_dict({"scanWidth": 120, "interScanOverheadTime": 1e-3}))
self.assertEqual(o.targetBrightnessTemp, 301)
o = RadiometerModel.from_json(self.radio3_json)
self.assertIsInstance(o, RadiometerModel)
self.assertEqual(o._id, "ray3")
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertIsNone(o.power)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}))
self.assertEqual(o.bitsPerPixel, 16)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "CIRCULAR", "diameter": 3.5, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 1, "phyTemp": 300}))
self.assertEqual(o.system, BalancedDikeRadiometerSystem.from_dict({ "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,
"rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,
"ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "dickeSwitchOutputNoiseTemperature": 90,
"integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "BALANCED_DICKE"}))
self.assertEqual(o.scan, ConicalScan.from_dict({"offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}))
self.assertEqual(o.targetBrightnessTemp, 295)
o = RadiometerModel.from_json(self.radio4_json)
self.assertIsInstance(o, RadiometerModel)
self.assertEqual(o._id, "ray4")
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertIsNone(o.power)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle":-30}))
self.assertIsNone(o.bitsPerPixel)
self.assertEqual(o.operatingFrequency, 1.25e9)
self.assertEqual(o.antenna, Antenna.from_dict({"shape": "CIRCULAR", "diameter": 1, "apertureExcitationProfile": "UNIFORM", "radiationEfficiency": 1, "phyTemp": 300}))
self.assertEqual(o.system, NoiseAddingRadiometerSystem.from_dict({ "tlLoss": 0.5, "tlPhyTemp": 290, "rfAmpGain": 30, "rfAmpInpNoiseTemp": 200,
"rfAmpGainVariation": 10, "mixerGain": 23, "mixerInpNoiseTemp": 1200, "mixerGainVariation": 2,
"ifAmpGain": 30, "ifAmpInputNoiseTemp": 100, "ifAmpGainVariation": 10, "excessNoiseTemperature": 1000,
"integratorVoltageGain": 1, "integrationTime": 1, "bandwidth": 100e6, "@type": "NOISE_ADDING"}))
self.assertEqual(o.scan, FixedScan.from_dict({}))
self.assertEqual(o.targetBrightnessTemp, 295)
def test_to_dict(self):
o = RadiometerModel.from_json(self.radio1_json)
_id = o._id # id is generated randomly
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': 'ray1', 'mass': 50.0, 'volume': 3.0, 'power': 10.0,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 0.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None}, 'fieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': 16,
'antenna': {'shape': 'CIRCULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': 1.0, 'height': None, 'width': None, 'apertureEfficiency': None, 'radiationEfficiency': 0.8, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0,
'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0, 'ifAmpGainVariation': 10.0,
'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None,
'predetectionGainVariation': None, 'integrationTime': 0.1, 'bandwidth': 10000000.0, '@id': None, '@type': 'TOTAL_POWER'},
'scan': {'@id': None, '@type': 'FIXED'}, 'targetBrightnessTemp': 345.0, '@id': _id})
o = RadiometerModel.from_json(self.radio2_json)
_id = o._id # id is generated randomly
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': 'ray2', 'mass': 50.0, 'volume': None, 'power': None,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 0.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None},
'fieldOfViewGeometry': {'shape': 'RECTANGULAR', 'angleHeight': 12.092497171570107, 'angleWidth': 12.092497171570086, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'RECTANGULAR', 'angleHeight': 12.092497171570107, 'angleWidth': 12.092497171570086, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': None,
'antenna': {'shape': 'RECTANGULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': None, 'height': 1.0, 'width': 1.0, 'apertureEfficiency': None, 'radiationEfficiency': 0.75, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': None, 'tlPhyTemp': None, 'rfAmpGain': None, 'rfAmpInpNoiseTemp': None, 'rfAmpGainVariation': None,
'mixerGain,': None, 'mixerInpNoiseTemp': None, 'mixerGainVariation': None,
'ifAmpGain': None, 'ifAmpInputNoiseTemp': None, 'ifAmpGainVariation': None,
'dickeSwitchOutputNoiseTemperature': None, 'referenceTemperature': 300.0,
'integratorVoltageGain': 1.0, 'predetectionGain': 83.0, 'predetectionInpNoiseTemp': 700.0, 'predetectionGainVariation': 1995262.314968883,
'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'UNBALANCED_DICKE'},
'scan': {'scanWidth': 120.0, 'interScanOverheadTime': 0.001, '@id': None, '@type': 'CROSS_TRACK'},
'targetBrightnessTemp': 301.0, '@id': _id})
o = RadiometerModel.from_json(self.radio3_json)
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': None, 'mass': None, 'volume': None, 'power': None,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 0.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None},
'fieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 3.9261354453149697, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 3.9261354453149697, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': 16,
'antenna': {'shape': 'CIRCULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': 3.5, 'height': None, 'width': None, 'apertureEfficiency': None, 'radiationEfficiency': 1.0, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0,
'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0, 'ifAmpGainVariation': 10.0,
'dickeSwitchOutputNoiseTemperature': 90.0, 'integratorVoltageGain': 1.0,
'predetectionGain': None, 'predetectionInpNoiseTemp': None, 'predetectionGainVariation': None,
'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'BALANCED_DICKE'},
'scan': {'offNadirAngle': 30.0, 'clockAngleRange': 60.0, 'interScanOverheadTime': 0.001, '@id': None, '@type': 'CONICAL'},
'targetBrightnessTemp': 295.0, '@id': 'ray3'})
o = RadiometerModel.from_json(self.radio4_json)
self.assertEqual(o.to_dict(), {'@type': 'Radiometer', 'name': None, 'mass': None, 'volume': None, 'power': None,
'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerAngle1': 0.0, 'eulerAngle2': 330.0, 'eulerAngle3': 0.0, 'eulerSeq1': 1, 'eulerSeq2': 2, 'eulerSeq3': 3, '@id': None},
'fieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'sceneFieldOfViewGeometry': {'shape': 'CIRCULAR', 'diameter': 13.741474058602394, '@id': None},
'maneuver': None, 'pointingOption': None, 'dataRate': None, 'bitsPerPixel': None,
'antenna': {'shape': 'CIRCULAR', 'apertureExcitationProfile': 'UNIFORM', 'diameter': 1.0, 'height': None, 'width': None, 'apertureEfficiency': None, 'radiationEfficiency': 1.0, 'phyTemp': 300.0, '@id': None},
'operatingFrequency': 1250000000.0,
'system': {'tlLoss': 0.5, 'tlPhyTemp': 290.0, 'rfAmpGain': 30.0, 'rfAmpInpNoiseTemp': 200.0, 'rfAmpGainVariation': 10.0,
'mixerGain,': 23.0, 'mixerInpNoiseTemp': 1200.0, 'mixerGainVariation': 2.0,
'ifAmpGain': 30.0, 'ifAmpInputNoiseTemp': 100.0, 'ifAmpGainVariation': 10.0,
'excessNoiseTemperature': 1000.0, 'integratorVoltageGain': 1.0, 'predetectionGain': None, 'predetectionInpNoiseTemp': None, 'predetectionGainVariation': None,
'integrationTime': 1.0, 'bandwidth': 100000000.0, '@id': None, '@type': 'NOISE_ADDING'},
'scan': {'@id': None, '@type': 'FIXED'}, 'targetBrightnessTemp': 295.0, '@id': 'ray4'})
def test_calc_data_metrics_1(self):
""" ``instru_look_angle_from_target_inc_angle`` flag is set to False."""
epoch_JDUT1 = 2458543.06088 # 2019 Feb 28 13:27:40 is time at which the ECEF and ECI frames approximately align, hence ECEF to ECI rotation is identity. See <https://www.celnav.de/longterm.htm> online calculator of GMST.
SpacecraftOrbitState = {'time [JDUT1]':epoch_JDUT1, 'x [km]': 6878.137, 'y [km]': 0, 'z [km]': 0, 'vx [km/s]': 0, 'vy [km/s]': 7.6126, 'vz [km/s]': 0} # altitude 500 km
TargetCoords = {'lat [deg]': 0, 'lon [deg]': 0}
o = RadiometerModel.from_json(self.radio1_json)
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 119917.0, 'ground pixel cross-track resolution [m]': 119917.02,
'swath-width [m]': 120565.56, 'sensitivity [K]': 17.94, 'incidence angle [deg]': 0.03, 'beam efficiency': np.nan})
o = RadiometerModel.from_json(self.radio2_json)
TargetCoords = {'lat [deg]': 5, 'lon [deg]': 0} # target pixel somewhere on the cross-track direction.
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 161269.89, 'ground pixel cross-track resolution [m]': 260072.11, 'swath-width [m]': 3159185.93,
'sensitivity [K]': 0.2, 'incidence angle [deg]': 51.68, 'beam efficiency': 0.24})
o = RadiometerModel.from_json(self.radio3_json)
TargetCoords = {'lat [deg]': 0, 'lon [deg]': 2.62}
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
# calculated pixels resolutions are not accurate since the imaged pixel is not at side-looking geometry
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 40047.33, 'ground pixel cross-track resolution [m]': 47491.27,
'swath-width [m]': 306358.49, 'sensitivity [K]': 0.21, 'incidence angle [deg]': 32.51, 'beam efficiency': np.nan})
o = RadiometerModel.from_json(self.radio4_json)
TargetCoords = {'lat [deg]': -2.62, 'lon [deg]': 0}
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=False)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 140198.56, 'ground pixel cross-track resolution [m]': 166301.75,
'swath-width [m]': 168745.48, 'sensitivity [K]': 0.23, 'incidence angle [deg]': 32.54, 'beam efficiency': np.nan})
def test_calc_data_metrics_2(self):
""" ``instru_look_angle_from_target_inc_angle`` flag is set to True."""
epoch_JDUT1 = 2458543.06088 # 2019 Feb 28 13:27:40 is time at which the ECEF and ECI frames approximately align, hence ECEF to ECI rotation is identity. See <https://www.celnav.de/longterm.htm> online calculator of GMST.
SpacecraftOrbitState = {'time [JDUT1]':epoch_JDUT1, 'x [km]': 6878.137, 'y [km]': 0, 'z [km]': 0, 'vx [km/s]': 0, 'vy [km/s]': 7.6126, 'vz [km/s]': 0} # altitude 500 km
TargetCoords = {'lat [deg]': 0, 'lon [deg]': 0.5}
o = RadiometerModel.from_json(self.radio1_json)
data_metrics = o.calc_data_metrics(sc_orbit_state=SpacecraftOrbitState, target_coords=TargetCoords, instru_look_angle_from_target_inc_angle=True)
self.assertEqual(data_metrics, {'ground pixel along-track resolution [m]': 120708.29, 'ground pixel cross-track resolution [m]': 121567.92,
'swath-width [m]': 122255.0, 'sensitivity [K]': 17.94, 'incidence angle [deg]': 6.82, 'beam efficiency': np.nan})
|
[
"instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_dict",
"instrupy.util.Antenna.from_dict",
"instrupy.radiometer_model.FixedScan.from_dict",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time",
"instrupy.radiometer_model.PredetectionSectionParams",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_system_params",
"instrupy.radiometer_model.ConicalScan.from_dict",
"instrupy.util.Orientation.from_dict",
"instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json",
"instrupy.radiometer_model.CrossTrackScan.from_dict",
"instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json",
"instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_dict",
"instrupy.util.ViewGeometry",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.from_dict",
"instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json",
"instrupy.radiometer_model.ConicalScan.from_json",
"instrupy.radiometer_model.CrossTrackScan.from_json",
"instrupy.radiometer_model.FixedScan.from_json",
"instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json",
"numpy.deg2rad",
"instrupy.util.SphericalGeometry.from_dict",
"instrupy.radiometer_model.RadiometerModel.from_json",
"instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_dict"
] |
[((2357, 2413), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys1_json'], {}), '(self.tpr_sys1_json)\n', (2393, 2413), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((3412, 3468), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys2_json'], {}), '(self.tpr_sys2_json)\n', (3448, 3468), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((4468, 4524), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys1_json'], {}), '(self.tpr_sys1_json)\n', (4504, 4524), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((5200, 5256), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys2_json'], {}), '(self.tpr_sys2_json)\n', (5236, 5256), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6406, 6725), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(10000000.0)', 'tlLoss': '(0.5)', 'tlPhyTemp': '(290)', 'rfAmpGain': '(30)', 'mixerGain': '(23)', 'ifAmpGain': '(30)', 'rfAmpGainVariation': '(10)', 'mixerGainVariation': '(2)', 'ifAmpGainVariation': '(10)', 'rfAmpInpNoiseTemp': '(200)', 'mixerInpNoiseTemp': '(1200)', 'ifAmpInputNoiseTemp': '(100)'}), '(\n predetectionBandwidth=10000000.0, tlLoss=0.5, tlPhyTemp=290, rfAmpGain=\n 30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10,\n mixerGainVariation=2, ifAmpGainVariation=10, rfAmpInpNoiseTemp=200,\n mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)\n', (6464, 6725), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((7144, 7332), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(15000000.0)', 'predetectionGain': '(90)', 'predetectionGainVariation': '(10000000)', 'predetectionInpNoiseTemp': '(300)'}), '(\n predetectionBandwidth=15000000.0, predetectionGain=90,\n predetectionGainVariation=10000000, predetectionInpNoiseTemp=300)\n', (7202, 7332), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((7659, 7975), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(10000000.0)', 'tlLoss': '(0.5)', 'tlPhyTemp': '(290)', 'rfAmpGain': '(1)', 'mixerGain': '(23)', 'ifAmpGain': '(30)', 'rfAmpGainVariation': '(0)', 'mixerGainVariation': '(2)', 'ifAmpGainVariation': '(10)', 'rfAmpInpNoiseTemp': '(0)', 'mixerInpNoiseTemp': '(1200)', 'ifAmpInputNoiseTemp': '(100)'}), '(\n predetectionBandwidth=10000000.0, tlLoss=0.5, tlPhyTemp=290, rfAmpGain=\n 1, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=0, mixerGainVariation\n =2, ifAmpGainVariation=10, rfAmpInpNoiseTemp=0, mixerInpNoiseTemp=1200,\n ifAmpInputNoiseTemp=100)\n', (7717, 7975), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((8474, 8537), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 270})\n", (8491, 8537), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((8562, 8881), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_predetection_sec_params', 'TotalPowerRadiometerSystem.compute_predetection_sec_params', ([], {'predetectionBandwidth': '(10000000.0)', 'tlLoss': '(0.5)', 'tlPhyTemp': '(290)', 'rfAmpGain': '(30)', 'mixerGain': '(23)', 'ifAmpGain': '(30)', 'rfAmpGainVariation': '(10)', 'mixerGainVariation': '(2)', 'ifAmpGainVariation': '(10)', 'rfAmpInpNoiseTemp': '(200)', 'mixerInpNoiseTemp': '(1200)', 'ifAmpInputNoiseTemp': '(100)'}), '(\n predetectionBandwidth=10000000.0, tlLoss=0.5, tlPhyTemp=290, rfAmpGain=\n 30, mixerGain=23, ifAmpGain=30, rfAmpGainVariation=10,\n mixerGainVariation=2, ifAmpGainVariation=10, rfAmpInpNoiseTemp=200,\n mixerInpNoiseTemp=1200, ifAmpInputNoiseTemp=100)\n', (8620, 8881), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((8993, 9091), 'instrupy.radiometer_model.PredetectionSectionParams', 'PredetectionSectionParams', ([], {'G': 'G', 'G_p': '(G + 0.01 * G)', 'G_m': '(G - 0.01 * G)', 'T_REC_q': '(260)', 'B': '(10000000.0)'}), '(G=G, G_p=G + 0.01 * G, G_m=G - 0.01 * G, T_REC_q=\n 260, B=10000000.0)\n', (9018, 9091), False, 'from instrupy.radiometer_model import PredetectionSectionParams, SystemParams\n'), ((9085, 9200), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_system_params', 'TotalPowerRadiometerSystem.compute_system_params', (['antenna', 'pd_sec_params'], {'integratorVoltageGain': '(1000)', 'T_A_q': '(290)'}), '(antenna, pd_sec_params,\n integratorVoltageGain=1000, T_A_q=290)\n', (9133, 9200), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((9486, 9549), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 270})\n", (9503, 9549), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((9563, 9619), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys1_json'], {}), '(self.tpr_sys1_json)\n', (9599, 9619), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((10202, 10265), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 350}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 350})\n", (10219, 10265), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((10417, 10480), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.5, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.5, 'phyTemp': 270})\n", (10434, 10480), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((10723, 10779), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.tpr_sys2_json'], {}), '(self.tpr_sys2_json)\n', (10759, 10779), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((10856, 10919), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 270}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 270})\n", (10873, 10919), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((12737, 12797), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys1_json'], {}), '(self.udr_sys1_json)\n', (12777, 12797), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((13922, 13982), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (13962, 13982), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((15113, 15173), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys1_json'], {}), '(self.udr_sys1_json)\n', (15153, 15173), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((15928, 15988), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (15968, 15988), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((16815, 16878), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 300})\n", (16832, 16878), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((17101, 17161), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys1_json'], {}), '(self.udr_sys1_json)\n', (17141, 17161), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((17670, 17730), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (17710, 17730), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((18124, 18180), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (18160, 18180), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((18397, 18457), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_json', 'UnbalancedDikeRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (18437, 18457), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((18531, 18592), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 1, 'phyTemp': 300})\n", (18548, 18592), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((19076, 19132), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_json', 'TotalPowerRadiometerSystem.from_json', (['self.udr_sys2_json'], {}), '(self.udr_sys2_json)\n', (19112, 19132), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((20814, 20872), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys1_json'], {}), '(self.bdr_sys1_json)\n', (20852, 20872), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((21939, 21997), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys2_json'], {}), '(self.bdr_sys2_json)\n', (21977, 21997), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((23070, 23128), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys1_json'], {}), '(self.bdr_sys1_json)\n', (23108, 23128), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((23851, 23909), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys2_json'], {}), '(self.bdr_sys2_json)\n', (23889, 23909), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((24704, 24765), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 1, 'phyTemp': 300})\n", (24721, 24765), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((24857, 24915), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys1_json'], {}), '(self.bdr_sys1_json)\n', (24895, 24915), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((25189, 25247), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_json', 'BalancedDikeRadiometerSystem.from_json', (['self.bdr_sys2_json'], {}), '(self.bdr_sys2_json)\n', (25227, 25247), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((27032, 27089), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys1_json'], {}), '(self.nar_sys1_json)\n', (27069, 27089), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((28145, 28202), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys2_json'], {}), '(self.nar_sys2_json)\n', (28182, 28202), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((29268, 29325), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys1_json'], {}), '(self.nar_sys1_json)\n', (29305, 29325), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((30037, 30094), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys2_json'], {}), '(self.nar_sys2_json)\n', (30074, 30094), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((30875, 30938), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'radiationEfficiency': 0.8, 'phyTemp': 300}"], {}), "({'radiationEfficiency': 0.8, 'phyTemp': 300})\n", (30892, 30938), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((30952, 31009), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys1_json'], {}), '(self.nar_sys1_json)\n', (30989, 31009), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31201, 31258), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_json', 'NoiseAddingRadiometerSystem.from_json', (['self.nar_sys2_json'], {}), '(self.nar_sys2_json)\n', (31238, 31258), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31588, 31623), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (31607, 31623), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31767, 31804), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": "abc"}"""'], {}), '(\'{"@id": "abc"}\')\n', (31786, 31804), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((31950, 31975), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{}"""'], {}), "('{}')\n", (31969, 31975), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32147, 32182), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (32166, 32182), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32266, 32303), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": "abc"}"""'], {}), '(\'{"@id": "abc"}\')\n', (32285, 32303), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32389, 32414), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{}"""'], {}), "('{}')\n", (32408, 32414), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32552, 32589), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": "abc"}"""'], {}), '(\'{"@id": "abc"}\')\n', (32571, 32589), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((32619, 32729), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': 10}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': 10})\n", (32640, 32729), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((32756, 32822), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (32783, 32822), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((33115, 33209), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 10, 'angleWidth': 20}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 10,\n 'angleWidth': 20})\n", (33142, 33209), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((33535, 33570), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (33554, 33570), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((33754, 33789), 'instrupy.radiometer_model.FixedScan.from_json', 'FixedScan.from_json', (['"""{"@id": 123}"""'], {}), '(\'{"@id": 123}\')\n', (33773, 33789), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((33821, 33887), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (33848, 33887), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((34674, 34768), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (34698, 34768), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((35050, 35144), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (35074, 35144), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((35335, 35429), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (35359, 35429), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((35455, 35565), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': 10}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': 10})\n", (35476, 35565), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((35592, 35658), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (35619, 35658), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((35689, 35784), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 30, 'angleWidth': 150}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 30,\n 'angleWidth': 150})\n", (35716, 35784), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36022, 36116), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 15, 'angleWidth': 60}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 15,\n 'angleWidth': 60})\n", (36049, 36116), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36143, 36238), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'RECTANGULAR', 'angleHeight': 15, 'angleWidth': 180}"], {}), "({'shape': 'RECTANGULAR', 'angleHeight': 15,\n 'angleWidth': 180})\n", (36170, 36238), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36513, 36607), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 1e-3}\')\n', (36537, 36607), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((37227, 37322), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 10e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 120, "interScanOverheadTime": 10e-3}\')\n', (37251, 37322), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((37515, 37608), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 20, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 20, "interScanOverheadTime": 1e-3}\')\n', (37539, 37608), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((37636, 37701), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 1}"], {}), "({'shape': 'CIRCULAR', 'diameter': 1})\n", (37663, 37701), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((38140, 38233), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"@id": 123, "scanWidth": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": 123, "scanWidth": 60, "interScanOverheadTime": 1e-3}\')\n', (38164, 38233), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((38562, 38686), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}\'\n )\n', (38583, 38686), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((39009, 39133), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}\'\n )\n', (39030, 39133), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((39345, 39469), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 60, "interScanOverheadTime": 1e-3}\'\n )\n', (39366, 39469), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((39497, 39607), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': 10}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': 10})\n", (39518, 39607), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((39634, 39700), 'instrupy.util.SphericalGeometry.from_dict', 'SphericalGeometry.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 30}"], {}), "({'shape': 'CIRCULAR', 'diameter': 30})\n", (39661, 39700), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((40010, 40135), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}\'\n )\n', (40031, 40135), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((40742, 40820), 'instrupy.radiometer_model.CrossTrackScan.from_json', 'CrossTrackScan.from_json', (['"""{"scanWidth": 120, "interScanOverheadTime": 10e-3}"""'], {}), '(\'{"scanWidth": 120, "interScanOverheadTime": 10e-3}\')\n', (40766, 40820), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((41018, 41143), 'instrupy.radiometer_model.ConicalScan.from_json', 'ConicalScan.from_json', (['"""{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}"""'], {}), '(\n \'{"@id": "abc", "offNadirAngle": 30, "clockAngleRange": 120, "interScanOverheadTime": 1e-3}\'\n )\n', (41039, 41143), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((46556, 46599), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (46581, 46599), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((47916, 47959), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio2_json'], {}), '(self.radio2_json)\n', (47941, 47959), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((49338, 49381), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio3_json'], {}), '(self.radio3_json)\n', (49363, 49381), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((50915, 50958), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio4_json'], {}), '(self.radio4_json)\n', (50940, 50958), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((52445, 52488), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (52470, 52488), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((54435, 54478), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio2_json'], {}), '(self.radio2_json)\n', (54460, 54478), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((56790, 56833), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio3_json'], {}), '(self.radio3_json)\n', (56815, 56833), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((58848, 58891), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio4_json'], {}), '(self.radio4_json)\n', (58873, 58891), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((61463, 61506), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (61488, 61506), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((61987, 62030), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio2_json'], {}), '(self.radio2_json)\n', (62012, 62030), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((62621, 62664), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio3_json'], {}), '(self.radio3_json)\n', (62646, 62664), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((63329, 63372), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio4_json'], {}), '(self.radio4_json)\n', (63354, 63372), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((64525, 64568), 'instrupy.radiometer_model.RadiometerModel.from_json', 'RadiometerModel.from_json', (['self.radio1_json'], {}), '(self.radio1_json)\n', (64550, 64568), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6013, 6103), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time', 'TotalPowerRadiometerSystem.compute_integration_time', ([], {'td': '(1.5)', 'integration_time_spec': '(0.5)'}), '(td=1.5,\n integration_time_spec=0.5)\n', (6064, 6103), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6131, 6219), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time', 'TotalPowerRadiometerSystem.compute_integration_time', ([], {'td': '(1.5)', 'integration_time_spec': '(2)'}), '(td=1.5,\n integration_time_spec=2)\n', (6182, 6219), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((6247, 6338), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.compute_integration_time', 'TotalPowerRadiometerSystem.compute_integration_time', ([], {'td': '(1.5)', 'integration_time_spec': 'None'}), '(td=1.5,\n integration_time_spec=None)\n', (6298, 6338), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((33013, 33081), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (33025, 33081), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((33396, 33464), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (33408, 33464), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((35920, 35988), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (35932, 35988), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((36374, 36442), 'instrupy.util.ViewGeometry', 'ViewGeometry', ([], {'orien': 'instru_orientation', 'sph_geom': 'instru_fov_sph_geom'}), '(orien=instru_orientation, sph_geom=instru_fov_sph_geom)\n', (36386, 36442), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((46880, 46977), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'REF_FRAME_ALIGNED'}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'REF_FRAME_ALIGNED'})\n", (46901, 46977), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((47111, 47258), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 1, 'apertureExcitationProfile': 'UNIFORM',\n 'radiationEfficiency': 0.8, 'phyTemp': 300}"], {}), "({'shape': 'CIRCULAR', 'diameter': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 0.8,\n 'phyTemp': 300})\n", (47128, 47258), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((47287, 47665), 'instrupy.radiometer_model.TotalPowerRadiometerSystem.from_dict', 'TotalPowerRadiometerSystem.from_dict', (["{'tlLoss': 0.5, 'tlPhyTemp': 290, 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200,\n 'rfAmpGainVariation': 10, 'mixerGain': 23, 'mixerInpNoiseTemp': 1200,\n 'mixerGainVariation': 2, 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100,\n 'ifAmpGainVariation': 10, 'integratorVoltageGain': 1, 'integrationTime':\n 0.1, 'bandwidth': 10000000.0}"], {}), "({'tlLoss': 0.5, 'tlPhyTemp': 290,\n 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200, 'rfAmpGainVariation': 10,\n 'mixerGain': 23, 'mixerInpNoiseTemp': 1200, 'mixerGainVariation': 2,\n 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100, 'ifAmpGainVariation': 10,\n 'integratorVoltageGain': 1, 'integrationTime': 0.1, 'bandwidth': \n 10000000.0})\n", (47323, 47665), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((47824, 47847), 'instrupy.radiometer_model.FixedScan.from_dict', 'FixedScan.from_dict', (['{}'], {}), '({})\n', (47843, 47847), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((48235, 48332), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'REF_FRAME_ALIGNED'}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'REF_FRAME_ALIGNED'})\n", (48256, 48332), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((48463, 48624), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'RECTANGULAR', 'height': 1, 'width': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 0.75,\n 'phyTemp': 300}"], {}), "({'shape': 'RECTANGULAR', 'height': 1, 'width': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 0.75,\n 'phyTemp': 300})\n", (48480, 48624), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((48652, 48950), 'instrupy.radiometer_model.UnbalancedDikeRadiometerSystem.from_dict', 'UnbalancedDikeRadiometerSystem.from_dict', (["{'predetectionGain': 83, 'predetectionInpNoiseTemp': 700,\n 'predetectionGainVariation': 1995262.314968883, 'integrationTime': 1,\n 'bandwidth': 100000000.0, 'referenceTemperature': 300,\n 'integratorVoltageGain': 1, '@type': 'UNBALANCED_DICKE'}"], {}), "({'predetectionGain': 83,\n 'predetectionInpNoiseTemp': 700, 'predetectionGainVariation': \n 1995262.314968883, 'integrationTime': 1, 'bandwidth': 100000000.0,\n 'referenceTemperature': 300, 'integratorVoltageGain': 1, '@type':\n 'UNBALANCED_DICKE'})\n", (48692, 48950), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((49185, 49261), 'instrupy.radiometer_model.CrossTrackScan.from_dict', 'CrossTrackScan.from_dict', (["{'scanWidth': 120, 'interScanOverheadTime': 0.001}"], {}), "({'scanWidth': 120, 'interScanOverheadTime': 0.001})\n", (49209, 49261), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((49651, 49748), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'REF_FRAME_ALIGNED'}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'REF_FRAME_ALIGNED'})\n", (49672, 49748), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((49882, 50029), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 3.5, 'apertureExcitationProfile':\n 'UNIFORM', 'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'shape': 'CIRCULAR', 'diameter': 3.5,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 1,\n 'phyTemp': 300})\n", (49899, 50029), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((50058, 50504), 'instrupy.radiometer_model.BalancedDikeRadiometerSystem.from_dict', 'BalancedDikeRadiometerSystem.from_dict', (["{'tlLoss': 0.5, 'tlPhyTemp': 290, 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200,\n 'rfAmpGainVariation': 10, 'mixerGain': 23, 'mixerInpNoiseTemp': 1200,\n 'mixerGainVariation': 2, 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100,\n 'ifAmpGainVariation': 10, 'dickeSwitchOutputNoiseTemperature': 90,\n 'integratorVoltageGain': 1, 'integrationTime': 1, 'bandwidth': \n 100000000.0, '@type': 'BALANCED_DICKE'}"], {}), "({'tlLoss': 0.5, 'tlPhyTemp': 290,\n 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200, 'rfAmpGainVariation': 10,\n 'mixerGain': 23, 'mixerInpNoiseTemp': 1200, 'mixerGainVariation': 2,\n 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100, 'ifAmpGainVariation': 10,\n 'dickeSwitchOutputNoiseTemperature': 90, 'integratorVoltageGain': 1,\n 'integrationTime': 1, 'bandwidth': 100000000.0, '@type': 'BALANCED_DICKE'})\n", (50096, 50504), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((50748, 50851), 'instrupy.radiometer_model.ConicalScan.from_dict', 'ConicalScan.from_dict', (["{'offNadirAngle': 30, 'clockAngleRange': 60, 'interScanOverheadTime': 0.001}"], {}), "({'offNadirAngle': 30, 'clockAngleRange': 60,\n 'interScanOverheadTime': 0.001})\n", (50769, 50851), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((51228, 51339), 'instrupy.util.Orientation.from_dict', 'Orientation.from_dict', (["{'referenceFrame': 'SC_BODY_FIXED', 'convention': 'SIDE_LOOK',\n 'sideLookAngle': -30}"], {}), "({'referenceFrame': 'SC_BODY_FIXED', 'convention':\n 'SIDE_LOOK', 'sideLookAngle': -30})\n", (51249, 51339), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((51469, 51614), 'instrupy.util.Antenna.from_dict', 'Antenna.from_dict', (["{'shape': 'CIRCULAR', 'diameter': 1, 'apertureExcitationProfile': 'UNIFORM',\n 'radiationEfficiency': 1, 'phyTemp': 300}"], {}), "({'shape': 'CIRCULAR', 'diameter': 1,\n 'apertureExcitationProfile': 'UNIFORM', 'radiationEfficiency': 1,\n 'phyTemp': 300})\n", (51486, 51614), False, 'from instrupy.util import Antenna, Orientation, SphericalGeometry, ViewGeometry, FileUtilityFunctions, Maneuver\n'), ((51643, 52077), 'instrupy.radiometer_model.NoiseAddingRadiometerSystem.from_dict', 'NoiseAddingRadiometerSystem.from_dict', (["{'tlLoss': 0.5, 'tlPhyTemp': 290, 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200,\n 'rfAmpGainVariation': 10, 'mixerGain': 23, 'mixerInpNoiseTemp': 1200,\n 'mixerGainVariation': 2, 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100,\n 'ifAmpGainVariation': 10, 'excessNoiseTemperature': 1000,\n 'integratorVoltageGain': 1, 'integrationTime': 1, 'bandwidth': \n 100000000.0, '@type': 'NOISE_ADDING'}"], {}), "({'tlLoss': 0.5, 'tlPhyTemp': 290,\n 'rfAmpGain': 30, 'rfAmpInpNoiseTemp': 200, 'rfAmpGainVariation': 10,\n 'mixerGain': 23, 'mixerInpNoiseTemp': 1200, 'mixerGainVariation': 2,\n 'ifAmpGain': 30, 'ifAmpInputNoiseTemp': 100, 'ifAmpGainVariation': 10,\n 'excessNoiseTemperature': 1000, 'integratorVoltageGain': 1,\n 'integrationTime': 1, 'bandwidth': 100000000.0, '@type': 'NOISE_ADDING'})\n", (51680, 52077), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((52321, 52344), 'instrupy.radiometer_model.FixedScan.from_dict', 'FixedScan.from_dict', (['{}'], {}), '({})\n', (52340, 52344), False, 'from instrupy.radiometer_model import RadiometerModel, SystemType, TotalPowerRadiometerSystem, UnbalancedDikeRadiometerSystem, BalancedDikeRadiometerSystem, NoiseAddingRadiometerSystem, ScanTech, FixedScan, CrossTrackScan, ConicalScan\n'), ((34471, 34485), 'numpy.deg2rad', 'np.deg2rad', (['(15)'], {}), '(15)\n', (34481, 34485), True, 'import numpy as np\n')]
|
"""Main"""
import sys
sys.stderr = open("error.log", "w")
import time
# import msvcrt
import os
import subprocess
import cv2
import sqlite3
import numpy as np
from pyzbar import pyzbar
from easytello.tello import Tello
from httprequest import HTTPRequest
from easytello.tello_control import ControlCommand as CoCo
from easytello.tello_control import TelloControl
def main():
"""
ストリーミングでQRを解析しながら
DjangoサーバーにHTTPリクエストする
"""
#|パラメータ
#|--固定ルートモード
fixed_mode = True
max_height = 80 # [cm]
max_LR = 100 # [cm]
height_step = 1 # 段差数
#|--HTTPリクエスト
request_enable = True
request_url = "http://127.0.0.1/qrcodes/jsontest"
# #サーバー起動
# command = [
# "python",
# "./TelloRecords/records/manage.py",
# "runserver",
# "0.0.0.0:80"
# ]
# subprocess.Popen(command)
# time.sleep(10)
# #ブラウザ起動
# os.system("start http://127.0.0.1/qrcodes/")
arg = input("コードを入力してください:")
drone = Tello()
drone.streamon()
controller = TelloControl()
controller.append(CoCo(lambda : drone.set_speed(10)))
# DB取得(出庫時想定)
if len(arg) > 0:
fixed_mode = False
dbname = './TelloRecords/records/db.sqlite3'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
try:
sql = "select pos_x, pos_y, pos_z from qrcodes_qr"
sql += " where qr_code = '{}'".format(arg)
cur.execute(sql)
except Exception as ex:
print('SQL ERROR: {}'.format(ex))
finally:
target_pos = cur.fetchall()
print(target_pos)
if len(target_pos[0]) == 3:
controller.append(CoCo(drone.takeoff))
target_x: int = target_pos[0][0]
target_y: int = target_pos[0][1]
target_z: int = target_pos[0][2]
move_flg: bool = np.abs(target_x) >= 20
pls_flg: bool = target_x > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.right(target_x), np.array([target_x, 0, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.left(-target_x), np.array([target_x, 0, 0])))
move_flg = np.abs(target_y) >= 20
pls_flg = target_y > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.up(target_y), np.array([0, target_y, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.down(-target_y), np.array([0, target_y, 0])))
move_flg = np.abs(target_z) >= 20
pls_flg = target_z > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.forward(target_z), np.array([0, 0, target_z])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.back(-target_z), np.array([0, 0, target_z])))
move_flg = np.abs(target_z) >= 20
pls_flg = target_z > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.back(target_z), np.array([0, 0, -target_z])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.forward(-target_z), np.array([0, 0, -target_z])))
move_flg = np.abs(target_y) >= 20
pls_flg = target_y > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.down(target_y), np.array([0, -target_y, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.up(-target_y), np.array([0, -target_y, 0])))
move_flg: bool = np.abs(target_x) >= 20
pls_flg: bool = target_x > 0
if move_flg & pls_flg:
controller.append(CoCo(lambda : drone.left(target_x), np.array([-target_x, 0, 0])))
if move_flg & pls_flg == False:
controller.append(CoCo(lambda : drone.right(-target_x), np.array([-target_x, 0, 0])))
controller.append(CoCo(drone.land))
cur.close()
conn.close()
if fixed_mode:
# controller.append(CoCo(drone.takeoff))
# #平行移動
# controller.append(CoCo(lambda : drone.up(max_height), np.array([0, max_height, 0])))
# for i in range(height_step):
# if i % 2 == 0:
# # drone.left(max_LR)
# controller.append(CoCo(lambda : drone.left(max_LR), np.array([-max_LR, 0, 0])))
# else:
# controller.append(CoCo(lambda : drone.right(max_LR), np.array([max_LR, 0, 0])))
# controller.append(CoCo(lambda : drone.down(max_height/height_step), np.array([0, (int)(-max_height/height_step), 0])))
# controller.append(CoCo(drone.land))
with open("commands.txt") as f:
for line in f:
if line.startswith("takeoff"):
controller.append(CoCo(drone.takeoff))
if line.startswith("land"):
controller.append(CoCo(drone.land))
if line.startswith("up"):
distance = int(line.replace("up ", ""))
controller.append(CoCo(lambda: drone.up(distance), np.array([0,distance,0])))
if line.startswith("down"):
distance1 = int(line.replace("down ", ""))
controller.append(CoCo(lambda: drone.down(distance1), np.array([0,-distance1,0])))
if line.startswith("left"):
distance2 = int(line.replace("left ", ""))
controller.append(CoCo(lambda: drone.left(distance2), np.array([-distance2,0,0])))
if line.startswith("right"):
distance3 = int(line.replace("right ", ""))
controller.append(CoCo(lambda: drone.right(distance3), np.array([distance3,0,0])))
if line.startswith("forward"):
distance4 = int(line.replace("forward ", ""))
controller.append(CoCo(lambda: drone.forward(distance4), np.array([0,0,distance4])))
if line.startswith("back"):
distance5 = int(line.replace("back ", ""))
controller.append(CoCo(lambda: drone.back(distance5), np.array([0,0,-distance5])))
drone.set_controller(controller)
#ストリーミングをONにしたらN秒間待機
time.sleep(5)
controller.start()
req = None
if request_enable:
req = HTTPRequest(request_url)
try:
while True:
frame = drone.read()
#cv2.imshow('<NAME>', frame)
# QR解析
if frame is not None:
decoded_objs = pyzbar.decode(frame)
if decoded_objs != []:
# 解析した1個目を表示
str_dec_obj = decoded_objs[0][0].decode('utf-8', 'ignore')
print(f'QR cord: {str_dec_obj}')
# 解析時の座標
pos = drone.get_position()
# HTTP送信
if request_enable:
req.send_qr(str_dec_obj, pos)
# # キー入力
# if msvcrt.kbhit():
# kb = msvcrt.getch()
# key = kb.decode()
# if key == 't': # 離陸
# drone.takeoff()
# elif key == 'l': # 着陸
# drone.land()
# elif key == 'w': # 前進
# drone.forward(50)
# elif key == 's': # 後進
# drone.back(50)
# elif key == 'a': # 左移動
# drone.left(50)
# elif key == 'd': # 右移動
# drone.right(50)
# elif key == 'q': # 左旋回
# drone.ccw(50)
# elif key == 'e': # 右旋回
# drone.cw(50)
# elif key == 'r': # 上昇
# drone.up(50)
# elif key == 'f': # 下降
# drone.down(50)
# elif key == 'p': # 任意のタイミングでストップ
# #drone.send_command('stop')
# drone.send_command('emergency')
# ウエイト(とりあえず固定で)
time.sleep(0.05)
except KeyboardInterrupt:
pass
drone.streamoff()
if __name__ == "__main__":
main()
|
[
"numpy.abs",
"easytello.tello_control.ControlCommand",
"httprequest.HTTPRequest",
"easytello.tello.Tello",
"pyzbar.pyzbar.decode",
"time.sleep",
"sqlite3.connect",
"numpy.array",
"easytello.tello_control.TelloControl"
] |
[((1007, 1014), 'easytello.tello.Tello', 'Tello', ([], {}), '()\n', (1012, 1014), False, 'from easytello.tello import Tello\n'), ((1058, 1072), 'easytello.tello_control.TelloControl', 'TelloControl', ([], {}), '()\n', (1070, 1072), False, 'from easytello.tello_control import TelloControl\n'), ((6642, 6655), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6652, 6655), False, 'import time\n'), ((1268, 1291), 'sqlite3.connect', 'sqlite3.connect', (['dbname'], {}), '(dbname)\n', (1283, 1291), False, 'import sqlite3\n'), ((6733, 6757), 'httprequest.HTTPRequest', 'HTTPRequest', (['request_url'], {}), '(request_url)\n', (6744, 6757), False, 'from httprequest import HTTPRequest\n'), ((8528, 8544), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (8538, 8544), False, 'import time\n'), ((6950, 6970), 'pyzbar.pyzbar.decode', 'pyzbar.decode', (['frame'], {}), '(frame)\n', (6963, 6970), False, 'from pyzbar import pyzbar\n'), ((1720, 1739), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.takeoff'], {}), '(drone.takeoff)\n', (1724, 1739), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((1939, 1955), 'numpy.abs', 'np.abs', (['target_x'], {}), '(target_x)\n', (1945, 1955), True, 'import numpy as np\n'), ((2329, 2345), 'numpy.abs', 'np.abs', (['target_y'], {}), '(target_y)\n', (2335, 2345), True, 'import numpy as np\n'), ((2710, 2726), 'numpy.abs', 'np.abs', (['target_z'], {}), '(target_z)\n', (2716, 2726), True, 'import numpy as np\n'), ((3113, 3129), 'numpy.abs', 'np.abs', (['target_z'], {}), '(target_z)\n', (3119, 3129), True, 'import numpy as np\n'), ((3501, 3517), 'numpy.abs', 'np.abs', (['target_y'], {}), '(target_y)\n', (3507, 3517), True, 'import numpy as np\n'), ((3890, 3906), 'numpy.abs', 'np.abs', (['target_x'], {}), '(target_x)\n', (3896, 3906), True, 'import numpy as np\n'), ((4290, 4306), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.land'], {}), '(drone.land)\n', (4294, 4306), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((5193, 5212), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.takeoff'], {}), '(drone.takeoff)\n', (5197, 5212), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((5296, 5312), 'easytello.tello_control.ControlCommand', 'CoCo', (['drone.land'], {}), '(drone.land)\n', (5300, 5312), True, 'from easytello.tello_control import ControlCommand as CoCo\n'), ((2121, 2147), 'numpy.array', 'np.array', (['[target_x, 0, 0]'], {}), '([target_x, 0, 0])\n', (2129, 2147), True, 'import numpy as np\n'), ((2273, 2299), 'numpy.array', 'np.array', (['[target_x, 0, 0]'], {}), '([target_x, 0, 0])\n', (2281, 2299), True, 'import numpy as np\n'), ((2502, 2528), 'numpy.array', 'np.array', (['[0, target_y, 0]'], {}), '([0, target_y, 0])\n', (2510, 2528), True, 'import numpy as np\n'), ((2654, 2680), 'numpy.array', 'np.array', (['[0, target_y, 0]'], {}), '([0, target_y, 0])\n', (2662, 2680), True, 'import numpy as np\n'), ((2888, 2914), 'numpy.array', 'np.array', (['[0, 0, target_z]'], {}), '([0, 0, target_z])\n', (2896, 2914), True, 'import numpy as np\n'), ((3040, 3066), 'numpy.array', 'np.array', (['[0, 0, target_z]'], {}), '([0, 0, target_z])\n', (3048, 3066), True, 'import numpy as np\n'), ((3288, 3315), 'numpy.array', 'np.array', (['[0, 0, -target_z]'], {}), '([0, 0, -target_z])\n', (3296, 3315), True, 'import numpy as np\n'), ((3444, 3471), 'numpy.array', 'np.array', (['[0, 0, -target_z]'], {}), '([0, 0, -target_z])\n', (3452, 3471), True, 'import numpy as np\n'), ((3676, 3703), 'numpy.array', 'np.array', (['[0, -target_y, 0]'], {}), '([0, -target_y, 0])\n', (3684, 3703), True, 'import numpy as np\n'), ((3827, 3854), 'numpy.array', 'np.array', (['[0, -target_y, 0]'], {}), '([0, -target_y, 0])\n', (3835, 3854), True, 'import numpy as np\n'), ((4071, 4098), 'numpy.array', 'np.array', (['[-target_x, 0, 0]'], {}), '([-target_x, 0, 0])\n', (4079, 4098), True, 'import numpy as np\n'), ((4225, 4252), 'numpy.array', 'np.array', (['[-target_x, 0, 0]'], {}), '([-target_x, 0, 0])\n', (4233, 4252), True, 'import numpy as np\n'), ((5487, 5513), 'numpy.array', 'np.array', (['[0, distance, 0]'], {}), '([0, distance, 0])\n', (5495, 5513), True, 'import numpy as np\n'), ((5695, 5723), 'numpy.array', 'np.array', (['[0, -distance1, 0]'], {}), '([0, -distance1, 0])\n', (5703, 5723), True, 'import numpy as np\n'), ((5905, 5933), 'numpy.array', 'np.array', (['[-distance2, 0, 0]'], {}), '([-distance2, 0, 0])\n', (5913, 5933), True, 'import numpy as np\n'), ((6118, 6145), 'numpy.array', 'np.array', (['[distance3, 0, 0]'], {}), '([distance3, 0, 0])\n', (6126, 6145), True, 'import numpy as np\n'), ((6336, 6363), 'numpy.array', 'np.array', (['[0, 0, distance4]'], {}), '([0, 0, distance4])\n', (6344, 6363), True, 'import numpy as np\n'), ((6545, 6573), 'numpy.array', 'np.array', (['[0, 0, -distance5]'], {}), '([0, 0, -distance5])\n', (6553, 6573), True, 'import numpy as np\n')]
|
from enum import Enum
import numpy as np
def mean_squared_error(observed_value: np.ndarray, predicted_value: np.ndarray, axis: tuple = None) -> np.ndarray:
if axis is None:
return np.mean(np.square(np.subtract(observed_value, predicted_value)))
else:
return np.mean(np.square(np.subtract(observed_value, predicted_value)), axis=axis, keepdims=True)
def mean_squared_error_derivative(observed_value: np.ndarray, predicted_value: np.ndarray, axis: tuple = None) -> np.ndarray:
if axis is None:
return np.multiply(np.mean(np.subtract(observed_value, predicted_value)), 2.0)
else:
return np.multiply(np.mean(np.subtract(observed_value, predicted_value), axis=axis, keepdims=True), 2.0)
class LossFunctions:
MSE = mean_squared_error
class LossFunctionDerivatives:
MSE_DERIVATIVE = mean_squared_error_derivative
|
[
"numpy.subtract"
] |
[((213, 257), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (224, 257), True, 'import numpy as np\n'), ((303, 347), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (314, 347), True, 'import numpy as np\n'), ((560, 604), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (571, 604), True, 'import numpy as np\n'), ((657, 701), 'numpy.subtract', 'np.subtract', (['observed_value', 'predicted_value'], {}), '(observed_value, predicted_value)\n', (668, 701), True, 'import numpy as np\n')]
|
import numpy as np
from PIL import Image
from PIL import ImageFilter
import matplotlib.pyplot as plt
import os
from itertools import permutations
from IPython.display import clear_output
from copy import deepcopy
from collections import namedtuple
# ---------------- Image utilities ----------------
def read_img(filename):
'''Gets the array of an image file.'''
return Image.open(filename)
def split_img(im_shuffled, nb_lines, nb_cols, margin=(0, 0)):
'''Returns a dictionary of all the pieces of the puzzle.
Use optional argument margin in order to have more smooth cuts.
Args:
- im_suffled (Image object)
- nb_lines (int)
- nb_cols (int)
- margin ((x_margin, y_margin))
Returns:
- cropped (dict)
'''
w, h = im_shuffled.size # w, h = width, height
# For one piece of the puzzle
w_piece = (w / nb_cols)
h_piece = (h / nb_lines)
cropped = {}
x_margin, y_margin = margin
for i in range(nb_lines):
for j in range(nb_cols):
left = i * w_piece + x_margin / 2
top = j * h_piece + y_margin / 2
right = (i + 1) * w_piece - x_margin / 2
bottom = (j + 1) * h_piece - y_margin / 2
cropped[(i,j)] = im_shuffled.crop((left, top, right, bottom))
return cropped
def display_image(img, nb_lines, nb_cols, title='', figsize=(5,6)):
'''Show the image with custom ticks for both x and y axis, making piece
identification easier.
Args:
- img (Image object)
- nb_lines (int)
- nb_cols (int)
Returns:
- None
'''
plt.figure(figsize=figsize)
xticks_location = (img.width / nb_cols) / 2 + np.linspace(0, img.width, nb_cols+1)
yticks_location = (img.height / nb_lines) / 2 + np.linspace(0, img.height, nb_lines+1)
plt.xticks(xticks_location, range(nb_cols))
plt.yticks(yticks_location, range(nb_lines))
if title:
plt.title(title)
plt.imshow(img)
return
def display_cropped(cropped, nb_lines, nb_cols, title='', figsize=(5,6)):
'''Show the image with custom ticks for both x and y axis, making piece
identification easier.
Args:
- cropped ({key: image})
- nb_lines (int)
- nb_cols (int)
Returns:
- None
'''
img = cropped_to_img(cropped, nb_lines, nb_cols)
display_image(img, nb_lines, nb_cols, title='', figsize=figsize)
return
def save_cropped(cropped):
'''Save as file all the pieces of the puzzle in the cropped directory.
The files are named accordingly to 'i-j.jpg' where i and j are the coordinates
of the pieces of the puzzle in the PIL coods system.
Args:
- cropped ({key: image})
Returns:
- None
'''
for (i,j), im in cropped.items():
filename = f'{i}-{j}.jpg'
filepath = os.path.join('cropped', filename)
im.save(filepath)
print('Images successfully saved.')
return
# ---------------- Operations on images ----------------
def get_current_permutations(cropped):
''' Generator that yields a dictionary giving the mapping from the current
configuration to the shuffled puzzle.
Args:
- cropped ({key: image})
Returns:
- generator object
'''
list_keys = list(cropped.keys())
for config in permutations(list_keys):
map_config = dict(zip(list_keys, config))
yield map_config
def grad_x(im1, im2):
'''Return the discrete horizontal gradient. im2 must be to the right of im1.
Args:
- im1 (Image object)
- im2 (Image object)
Returns:
- grad_x_val (float)
NB: numpy and PIL don't share the same coordinate system! '''
## Conversion from Image object into numpy arrays
arr1 = np.array(im1)
arr2 = np.array(im2)
min_x = min(arr1.shape[0], arr2.shape[0])
min_y = min(arr1.shape[1], arr2.shape[1])
arr1 = arr1[:min_x,:min_y,:]
arr2 = arr2[:min_x,:min_y,:]
## Computation of the horizontal gradient at the frontier
return np.sum(np.square(arr1[-1,:,:] - arr2[0,:,:]))
def grad_y(im1, im2):
'''Return the discrete horizontal gradient. im2 must be below im1.
Args:
- im1 (Image object)
- im2 (Image object)
Returns:
- grad_y_val (float)
NB: numpy and PIL don't share the same coordinate system! '''
## Conversion into numpy arrays
arr1 = np.array(im1)
arr2 = np.array(im2)
min_x = min(arr1.shape[0], arr2.shape[0])
min_y = min(arr1.shape[1], arr2.shape[1])
arr1 = arr1[:min_x,:min_y,:]
arr2 = arr2[:min_x,:min_y,:]
## Computation of the vertical gradient at the frontier
return np.sum(np.square(arr1[:,0,:] - arr2[:,-1,:]))
def mean_grad(cropped, nb_lines, nb_cols):
'''Returns the mean of the gradient both horizontally and vertically.'''
res = 0
for j in range(nb_lines):
for i in range(nb_cols-1):
res += grad_x(cropped[(i,j)], cropped[(i+1,j)])
for i in range(nb_cols):
for j in range(nb_lines-1):
res += grad_y(cropped[(i,j)], cropped[(i,j+1)])
return res / 2
def read_cropped_im(i, j):
''' Returns the given image loaded from the cropped folder
as an Image object.'''
im = Image.open(os.path.join('cropped', f'{i}-{j}.jpg'))
return im
def get_concat_h(im1, im2):
''' Returns the horizontal concatenation of im1 and im2.'''
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def get_concat_v(im1, im2):
''' Returns the vertical concatenation of im1 and im2.'''
dst = Image.new('RGB', (im1.width, im1.height + im2.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (0, im1.height))
return dst
def config_to_img(map_config, nb_lines, nb_cols):
''' Returns an image according to the given configuration.
Strategy:
1) We'll start concatenate each line of the final configuration.
2) Only then are we going to concatenate those lines vertically.
Args:
- map_config ({(old_coords): (new_coords), ...}): dictionary mapping from the current configuration
to the shuffled puzzle.
- nb_lines (int)
- nb_cols (int)
Returns:
- an Image object
'''
## Step 1:
list_lines = []
for j in range(nb_lines): # We process line by line...
# We start from the left-most image.
current_im = read_cropped_im(*map_config[(0,j)]) # NB: The * allows to unpack the given tuple
for i in range(1, nb_cols): # For each piece of the line...
new_piece = read_cropped_im(*map_config[(i,j)]) # we get the juxtaposed piece just right to the previous one
current_im = get_concat_h(current_im, new_piece)
list_lines.append(current_im)
# Now we can vertically concatenate the obtained lines.
current_im = list_lines[0]
for idx, img_line in enumerate(list_lines):
if idx == 0:
pass
else:
current_im = get_concat_v(current_im, img_line)
return current_im
def cropped_to_img(cropped, nb_lines, nb_cols):
''' Returns an image according to the given configuration.
Strategy:
1) We'll start concatenate each line of the final configuration.
2) Only then are we going to concatenate those lines vertically.
Args:
- cropped ({(x, y): Image Object, ...}): dictionary mapping from the current configuration
to the shuffled puzzle.
- nb_lines (int)
- nb_cols (int)
Returns:
- an Image object
'''
## Step 1:
list_lines = []
for j in range(nb_lines): # We process line by line...
# We start from the left-most image.
current_im = cropped[(0,j)] # NB: The * allows to unpack the given tuple
for i in range(1, nb_cols): # For each piece of the line...
new_piece = cropped[(i,j)] # we get the juxtaposed piece just right to the previous one
current_im = get_concat_h(current_im, new_piece)
list_lines.append(current_im)
# Now we can vertically concatenate the obtained lines.
current_im = list_lines[0]
for idx, img_line in enumerate(list_lines):
if idx == 0:
pass
else:
current_im = get_concat_v(current_im, img_line)
return current_im
def get_grad_orientation(im_1, im_2, orientation):
'''Returns the gradient considering im_1 as the reference image and im_2
concatenated right next to im_1 with the given orientation. The gradient is
calculated at the limit.
Orientation must be in ['N', 'E', 'W', 'S'].
Args:
- im_1 (Image object)
- im_2 (Image object)
- orientation (str)
Returns:
- grad (float)
'''
assert orientation in ['N', 'E', 'W', 'S'], 'Given input for orientation not understood.'
if orientation == 'E':
return grad_y(im_1, im_2)
elif orientation == 'W':
return grad_y(im_2, im_1)
elif orientation == 'S':
return grad_x(im_1, im_2)
elif orientation == 'N':
return grad_x(im_2, im_1)
def getBestConfig(cropped, nb_lines, nb_cols):
'''Returns a dictionary that contains another dictionary that gives
the ID of the piece with the best gradient according to the current direction
('N' for North, 'E' for East, 'W' for West and 'S' for South) which is used
as the key. Moreover, one can access the gradient value using the 'grad_N'
(or grad_E, etc...) key.
Args:
- cropped {key: image}
- nb_lines (int)
- nb_cols (int)
Returns:
- dicBestConfig {curr_piece: {'N': (x_best_N, y_best_N), ..., 'grad_N': min_grad_N, ...}}
'''
dicBestConfig = {}
orientations = ['N', 'E', 'W', 'S']
for curr_piece_ID in cropped.keys(): # For every piece of the puzzle...
# Creating an empty dict for the current piece.
dicBestConfig[curr_piece_ID] = {}
for orientation in orientations: # For every single of the 4 orientation...
# Preparing the key for storing the gradient.
grad_orientation = 'grad_' + orientation
# Variables to store the best candidate.
min_grad = np.inf
best_piece_ID = None
for piece_ID in cropped.keys(): # For every piece of the puzzle...
if piece_ID == curr_piece_ID: # We skip duplicates...
continue
else: # If we have two different pieces...
curr_grad = get_grad_orientation(
im_1=cropped[curr_piece_ID],
im_2=cropped[piece_ID],
orientation=orientation)
if curr_grad < min_grad: # If it's a better candidate...
# Overwriting the previous variables.
min_grad = curr_grad
best_piece_ID = piece_ID
dicBestConfig[curr_piece_ID][orientation] = best_piece_ID
dicBestConfig[curr_piece_ID][grad_orientation] = min_grad
return dicBestConfig
def getOrderedConfigsByConfig(dicBestConfig, orientation, reverse=False):
'''Returns a sorted list of elements from dicBestConfig.values().
Args:
- dicBestConfig (dict)
- orientation (str)
- reverse (bool)
Returns:
- ordered_list [(value from dicBestConfig.values()) ordered by the
gradient according to the given orientation]
'''
orientations = ['N', 'E', 'W', 'S']
assert orientation in ['N', 'E', 'W', 'S'], 'Given input for orientation not understood.'
grad_orientation_key = 'grad_' + orientation
return sorted(dicBestConfig.items(), key=lambda x: x[1][grad_orientation_key], reverse=reverse)
def getOrderedConfigs(dicBestConfig, reverse=False):
"""Returns a sorted list of elements from dicBestConfig.values().
We don't consider orientation in this function.
Args:
- dicBestConfig (dict)
- reverse (bool)
Returns:
- ordered_list (list): list of named tuples of the form
(start, end, orientation, score)
"""
list_temp = []
list_orientations = ['N', 'E', 'W', 'S']
# Creating a namedtuple for convenience:
Config = namedtuple('Config', ['start', 'end', 'orientation', 'score'])
for start, val in dicBestConfig.items():
for orientation in list_orientations:
end = val[orientation]
score = val['grad_' + orientation]
list_temp.append(Config(start, end, orientation, score))
ordered_list = sorted(list_temp, key=lambda x: x.score, reverse=reverse)
return ordered_list
# ---------------- Brute force ----------------
def brute_force(cropped, nb_lines, nb_cols):
''' Brute force solve. VERY SLOW!!!
Saves all possibles configurations in the 'output' folder.
Args:
- cropped {dict}
- nb_lines (int)
- nb_cols (int)
Returns:
- None
'''
for idx, map_config in enumerate(get_current_permutations(cropped)):
print(f'Current configuration: {idx}')
im_config = config_to_img(map_config, nb_lines, nb_cols)
filename = f'{idx}.jpg'
filepath = os.path.join('outputs', filename)
im_config.save(filepath)
clear_output(wait=True)
# ---------------- Manual solve ----------------
def config_switcher(cropped, nb_lines, nb_cols, coords_1, coords_2):
'''Switch places for two pieces and return a new cropped dictionary.
Args:
- cropped
- nb_lines
- nb_cols
- coords_1 (2-tuple): 1st piece to move
- coords_2 (2-tuple): 2nd piece to move
Returns:
- new_cropped
'''
new_cropped = deepcopy(cropped)
new_cropped[coords_1], new_cropped[coords_2] = new_cropped[coords_2], new_cropped[coords_1]
return new_cropped
def config_switcher_helper(cropped, nb_lines, nb_cols, coords_1, coords_2):
'''Show on the same plot the previous image and the new one after having
the pieces switched places.
Args:
- cropped
- nb_lines
- nb_cols
- coords_1 (2-tuple): 1st piece to move
- coords_2 (2-tuple): 2nd piece to move
Returns:
- new_cropped
'''
plt.figure(figsize=(12, 10))
plt.subplot(1, 2, 1)
old_image = cropped_to_img(cropped, nb_lines, nb_cols)
xticks_location = (old_image.width / nb_cols) / 2 + np.linspace(0, old_image.width, nb_cols+1)
yticks_location = (old_image.height / nb_lines) / 2 + np.linspace(0, old_image.height, nb_lines+1)
plt.xticks(xticks_location, range(nb_cols))
plt.yticks(yticks_location, range(nb_lines))
plt.imshow(old_image)
plt.title('Old image')
plt.subplot(1, 2, 2)
new_cropped = config_switcher(cropped, nb_lines, nb_cols, coords_1, coords_2)
new_image = cropped_to_img(new_cropped, nb_lines, nb_cols)
plt.xticks(xticks_location, range(nb_cols))
plt.yticks(yticks_location, range(nb_lines))
plt.imshow(new_image)
plt.title('New image')
return
# ---------------- Backtracking ----------------
def get_next_location(nb_pieces, nb_lines, nb_cols):
'''Returns the next coords (i,j) of the piece according to the
completion strategy.
Completion strategy:
Adds a piece with increasing x and, if the x are the same,
with increasing y. In other terms, we complete the puzzle from
left to right and from top to bottom.'''
# Get the previous coords (not trivial)
if nb_pieces % nb_cols == 0: # If we have a full line...
y = (nb_pieces // nb_cols) - 1
x = nb_cols - 1
else: #If the line isn't fully completed yet...
y = nb_pieces // nb_cols
x = (nb_pieces % nb_cols) - 1
if x == nb_cols - 1: # If we are already at the end of a line...
x_new = 0
y_new = y + 1
else: # If there is still some room on the line...
x_new = x + 1
y_new = y
print(f'Added new piece at: ({x_new}, {y_new})')
assert 0 <= x_new < nb_cols, 'Error with the x axis!'
assert 0 <= y_new < nb_lines, 'Error with the y axis!'
return (x_new, y_new)
def score(config, cropped, nb_lines, nb_cols):
'''Computes the score of the current config, which is in this case
the squared mean gradient with respect to x and y divided by the
total number of pieces in the puzzle.'''
# In order to call the mean_grad function, we first
# have to generate a dictionary that has the same
# format as 'cropped': {(0, 0): <PIL.Image.Image>, ...}.
# Currently, 'config' has the shape {(new_coords): (old_coords), ...}.
# Next line allows to obtain the wanted dictionary.
new_cropped = get_config_mapped(config=config, cropped=cropped)
score = mean_grad(new_cropped, nb_lines, nb_cols)**2 / 2
return score
def get_config_mapped(config, cropped):
'''Converts a config dictionary to the same format as a cropped dictionary.
Args:
- config ({(new_coords): (old_coords), ...}): current configuration (not necessarily
completed)
- cropped ({(0, 0): <PIL.Image.Image>, ...}): dictionary of every single piece
of the puzzle
'''
return {new_coords: cropped[old_coords] for new_coords, old_coords in config.items()}
def partial_score(partial_config, cropped, nb_lines, nb_cols):
'''Computes the score of a partial configuration.'''
res = 0
config_mapped = get_config_mapped(config=partial_config, cropped=cropped)
# Gradient wrt to x:
for j in range(nb_lines):
for i in range(nb_cols-1):
if (i,j) in config_mapped.keys() and (i+1,j) in config_mapped.keys():
res += grad_x(config_mapped[(i,j)], config_mapped[(i+1,j)])
# Gradient wrt to y:
for i in range(nb_cols):
for j in range(nb_lines-1):
if (i,j) in config_mapped.keys() and (i,j+1) in config_mapped.keys():
res += grad_y(config_mapped[(i,j)], config_mapped[(i,j+1)])
return (res/2)**2 / (nb_lines * nb_cols)
def solve_backtracking(cropped, nb_lines, nb_cols):
'''
Applies backtracking for building the puzzle.
In what follows, 'config' is a dictionary with the
shape {(x, y): (i, j), ...}, ie that links the current
configuration to the suffled one.
Args:
- cropped ({(0, 0): <PIL.Image.Image>, ...}): dictionary of
every single piece of the puzzle
'''
bestScore = np.inf
bestSol = None
nb_pieces_total = len(cropped)
config = {}
# ------ Auxiliary functions ------
def is_terminal(config):
'''Returns True if we have generated a complete
solution for the puzzle.'''
return len(config) == nb_pieces_total
def is_promising(partial_config, bestScore):
'''Returns True iif the gradient score of the partial configuration
is lower or equal to bestScore.'''
current_score = partial_score(partial_config, cropped, nb_lines, nb_cols)
print(f'current_score: {current_score}')
return current_score < bestScore
def children(config, cropped, bestScore):
'''Generator for a list of configurations that have one supplementary piece
when compared to 'config'.
Args:
- config ({new_coords: old_coords, ...})
- cropped ({(i,j): Image object, ...})
Completion strategy:
Adds a piece with increasing x and, if the x are the same,
with increasing y. In other terms, we complete the puzzle from
left to right and from top to bottom.'''
# We get the location (i, j) of the next piece.
nb_pieces = len(config)
next_location = get_next_location(nb_pieces=nb_pieces, nb_lines=nb_lines, nb_cols=nb_cols)
# config.values() contains the old coords that have already been used
# cropped.keys() contains all the possible coords
remaining_pieces = [coords for coords in cropped.keys() if coords not in config.values()]
for next_piece in remaining_pieces:
config_copy = deepcopy(config)
assert next_location not in config_copy.keys(), 'issue when completing the current config'
config_copy[next_location] = next_piece
# print(f'config_copy = {config_copy}\n')
if is_promising(config_copy, bestScore):
print('Promising branch.\n')
yield config_copy
else:
print('Not promising branch.\n')
continue # get directly to next iteration
def backtracking(config, cropped, bestScore, bestSol):
'''
Backtracking for building the puzzle (recursive).
Args:
- config: dictionary giving the mapping from the current
configuration to a given configuration of the puzzle
(the dictionary doesn't have to contain alle the puzzle pieces
since it's being built on the moment)
- cropped
- bestScore (float)
- bestSol (dict)
Returns:
- new_bestScore
- new_bestSol
'''
if is_terminal(config):
# print('Viewing current configuration:')
# current_img = create_config(config, nb_lines, nb_cols)
# plt.figure(figsize = (5,2))
# plt.imshow(current_img)
# pdb.set_trace()
print('is_terminal')
current_score = score(config, cropped, nb_lines, nb_cols)
# clear_output(wait=True)
print(f'current_score: {current_score}\n')
if current_score < bestScore:
new_bestScore = current_score
new_bestSol = deepcopy(config)
print(f'New bestScore: {new_bestScore}\n')
else:
print(f'not terminal, current nb of pieces: {len(config)}')
for new_config in children(config, cropped, bestScore):
new_bestScore, new_bestSol = backtracking(new_config, cropped, bestScore, bestSol)
return new_bestScore, new_bestSol
# ------ Main ------
return backtracking(config, cropped, bestScore, bestSol)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"PIL.Image.new",
"copy.deepcopy",
"matplotlib.pyplot.imshow",
"itertools.permutations",
"numpy.square",
"PIL.Image.open",
"matplotlib.pyplot.figure",
"numpy.array",
"collections.namedtuple",
"numpy.linspace",
"IPython.display.clear_output",
"os.path.join"
] |
[((380, 400), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (390, 400), False, 'from PIL import Image\n'), ((1631, 1658), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1641, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1979, 1994), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1989, 1994), True, 'import matplotlib.pyplot as plt\n'), ((3339, 3362), 'itertools.permutations', 'permutations', (['list_keys'], {}), '(list_keys)\n', (3351, 3362), False, 'from itertools import permutations\n'), ((3779, 3792), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (3787, 3792), True, 'import numpy as np\n'), ((3804, 3817), 'numpy.array', 'np.array', (['im2'], {}), '(im2)\n', (3812, 3817), True, 'import numpy as np\n'), ((4422, 4435), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (4430, 4435), True, 'import numpy as np\n'), ((4447, 4460), 'numpy.array', 'np.array', (['im2'], {}), '(im2)\n', (4455, 4460), True, 'import numpy as np\n'), ((5463, 5516), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im1.width + im2.width, im1.height)'], {}), "('RGB', (im1.width + im2.width, im1.height))\n", (5472, 5516), False, 'from PIL import Image\n'), ((5695, 5749), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(im1.width, im1.height + im2.height)'], {}), "('RGB', (im1.width, im1.height + im2.height))\n", (5704, 5749), False, 'from PIL import Image\n'), ((12471, 12533), 'collections.namedtuple', 'namedtuple', (['"""Config"""', "['start', 'end', 'orientation', 'score']"], {}), "('Config', ['start', 'end', 'orientation', 'score'])\n", (12481, 12533), False, 'from collections import namedtuple\n'), ((13934, 13951), 'copy.deepcopy', 'deepcopy', (['cropped'], {}), '(cropped)\n', (13942, 13951), False, 'from copy import deepcopy\n'), ((14457, 14485), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (14467, 14485), True, 'import matplotlib.pyplot as plt\n'), ((14493, 14513), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (14504, 14513), True, 'import matplotlib.pyplot as plt\n'), ((14882, 14903), 'matplotlib.pyplot.imshow', 'plt.imshow', (['old_image'], {}), '(old_image)\n', (14892, 14903), True, 'import matplotlib.pyplot as plt\n'), ((14908, 14930), 'matplotlib.pyplot.title', 'plt.title', (['"""Old image"""'], {}), "('Old image')\n", (14917, 14930), True, 'import matplotlib.pyplot as plt\n'), ((14940, 14960), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (14951, 14960), True, 'import matplotlib.pyplot as plt\n'), ((15207, 15228), 'matplotlib.pyplot.imshow', 'plt.imshow', (['new_image'], {}), '(new_image)\n', (15217, 15228), True, 'import matplotlib.pyplot as plt\n'), ((15233, 15255), 'matplotlib.pyplot.title', 'plt.title', (['"""New image"""'], {}), "('New image')\n", (15242, 15255), True, 'import matplotlib.pyplot as plt\n'), ((1710, 1748), 'numpy.linspace', 'np.linspace', (['(0)', 'img.width', '(nb_cols + 1)'], {}), '(0, img.width, nb_cols + 1)\n', (1721, 1748), True, 'import numpy as np\n'), ((1799, 1839), 'numpy.linspace', 'np.linspace', (['(0)', 'img.height', '(nb_lines + 1)'], {}), '(0, img.height, nb_lines + 1)\n', (1810, 1839), True, 'import numpy as np\n'), ((1957, 1973), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1966, 1973), True, 'import matplotlib.pyplot as plt\n'), ((2851, 2884), 'os.path.join', 'os.path.join', (['"""cropped"""', 'filename'], {}), "('cropped', filename)\n", (2863, 2884), False, 'import os\n'), ((4071, 4112), 'numpy.square', 'np.square', (['(arr1[-1, :, :] - arr2[0, :, :])'], {}), '(arr1[-1, :, :] - arr2[0, :, :])\n', (4080, 4112), True, 'import numpy as np\n'), ((4712, 4753), 'numpy.square', 'np.square', (['(arr1[:, 0, :] - arr2[:, -1, :])'], {}), '(arr1[:, 0, :] - arr2[:, -1, :])\n', (4721, 4753), True, 'import numpy as np\n'), ((5305, 5344), 'os.path.join', 'os.path.join', (['"""cropped"""', 'f"""{i}-{j}.jpg"""'], {}), "('cropped', f'{i}-{j}.jpg')\n", (5317, 5344), False, 'import os\n'), ((13432, 13465), 'os.path.join', 'os.path.join', (['"""outputs"""', 'filename'], {}), "('outputs', filename)\n", (13444, 13465), False, 'import os\n'), ((13507, 13530), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (13519, 13530), False, 'from IPython.display import clear_output\n'), ((14630, 14674), 'numpy.linspace', 'np.linspace', (['(0)', 'old_image.width', '(nb_cols + 1)'], {}), '(0, old_image.width, nb_cols + 1)\n', (14641, 14674), True, 'import numpy as np\n'), ((14731, 14777), 'numpy.linspace', 'np.linspace', (['(0)', 'old_image.height', '(nb_lines + 1)'], {}), '(0, old_image.height, nb_lines + 1)\n', (14742, 14777), True, 'import numpy as np\n'), ((20394, 20410), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (20402, 20410), False, 'from copy import deepcopy\n'), ((22125, 22141), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (22133, 22141), False, 'from copy import deepcopy\n')]
|
from numpy import random
import numpy as np
import matplotlib.pyplot as plt
import math
### Defining theta
theta = math.pi/4
### Generates count number of random values in the range [0, 1]
def getU(count):
u = []
for i in range(count):
key = random.rand()
u.append(key)
return u
def getX(u):
x = []
for t in u:
res = -theta*(math.log(1-t))
x.append(res)
return x
def getSampleMeanVariance(x):
sum = 0.00
for i in x:
sum += i
avg = sum/(len(x))
cnt = 0.00
for i in x:
cnt += (i-avg)**2
variance = cnt/(len(x)-1)
return avg, variance
def plotCDF(data):
data_size = len(data)
data_set = sorted(set(data))
bins = np.append(data_set, data_set[-1]+1)
counts, bin_edges = np.histogram(data, bins=bins, density=False)
counts = counts.astype(float)/data_size
cdf = np.cumsum(counts)
plt.plot(bin_edges[0:-1], cdf, linestyle='--', marker='o', color='b')
plt.ylim((0, 1))
plt.ylabel("CDF")
plt.grid(True)
plt.show()
# Plots y = 1 - e^(-x/theta)
def plotActualDistributionFunction():
a = -1
b = 1/theta
c = 1
x = np.linspace(0, 10, 256, endpoint = True)
y = (a * np.exp(-b*x)) + c
plt.plot(x, y, '-r', label=r'$y = 1 - e^{-x/theta}$')
axes = plt.gca()
axes.set_xlim([x.min(), x.max()])
axes.set_ylim([y.min(), y.max()])
plt.xlabel('x')
plt.ylabel('y')
plt.title('Actual Distribution')
plt.legend(loc='upper left')
plt.show()
def execute(cnt):
print("For input size of : " + str(cnt))
u = getU(cnt)
u.sort()
# print(u)
x = getX(u)
# print(x)
sMean, sVariance = getSampleMeanVariance(x)
# Actual Mean is theta
print("Sample Mean: " + str(sMean) + " " + "Actual Mean: " + str(theta))
print("Abs. Difference : " + str(abs(sMean-theta)))
# Actual Variance is theta^2
print("Sample Variance: " + str(sVariance) + " " + "Actual Variance: " + str(theta**2))
print("Abs. Difference : " + str(abs(sVariance-theta**2)))
print()
plotCDF(x)
def main():
plotActualDistributionFunction()
execute(10)
execute(100)
execute(1000)
execute(10000)
execute(100000)
# execute(1000000)
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.append",
"numpy.histogram",
"numpy.cumsum",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.gca",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"math.log",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((649, 686), 'numpy.append', 'np.append', (['data_set', '(data_set[-1] + 1)'], {}), '(data_set, data_set[-1] + 1)\n', (658, 686), True, 'import numpy as np\n'), ((707, 751), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'bins', 'density': '(False)'}), '(data, bins=bins, density=False)\n', (719, 751), True, 'import numpy as np\n'), ((802, 819), 'numpy.cumsum', 'np.cumsum', (['counts'], {}), '(counts)\n', (811, 819), True, 'import numpy as np\n'), ((822, 891), 'matplotlib.pyplot.plot', 'plt.plot', (['bin_edges[0:-1]', 'cdf'], {'linestyle': '"""--"""', 'marker': '"""o"""', 'color': '"""b"""'}), "(bin_edges[0:-1], cdf, linestyle='--', marker='o', color='b')\n", (830, 891), True, 'import matplotlib.pyplot as plt\n'), ((893, 909), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (901, 909), True, 'import matplotlib.pyplot as plt\n'), ((911, 928), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CDF"""'], {}), "('CDF')\n", (921, 928), True, 'import matplotlib.pyplot as plt\n'), ((930, 944), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (938, 944), True, 'import matplotlib.pyplot as plt\n'), ((947, 957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (955, 957), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1099), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(256)'], {'endpoint': '(True)'}), '(0, 10, 256, endpoint=True)\n', (1072, 1099), True, 'import numpy as np\n'), ((1132, 1185), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-r"""'], {'label': '"""$y = 1 - e^{-x/theta}$"""'}), "(x, y, '-r', label='$y = 1 - e^{-x/theta}$')\n", (1140, 1185), True, 'import matplotlib.pyplot as plt\n'), ((1196, 1205), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1203, 1205), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1293), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1288, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1295, 1310), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1305, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1344), 'matplotlib.pyplot.title', 'plt.title', (['"""Actual Distribution"""'], {}), "('Actual Distribution')\n", (1321, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1346, 1374), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1356, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1387), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1385, 1387), True, 'import matplotlib.pyplot as plt\n'), ((248, 261), 'numpy.random.rand', 'random.rand', ([], {}), '()\n', (259, 261), False, 'from numpy import random\n'), ((340, 355), 'math.log', 'math.log', (['(1 - t)'], {}), '(1 - t)\n', (348, 355), False, 'import math\n'), ((1112, 1126), 'numpy.exp', 'np.exp', (['(-b * x)'], {}), '(-b * x)\n', (1118, 1126), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
import torch.nn.functional as F
from detectron2.config import configurable
from detectron2.data.detection_utils import convert_image_to_rgb
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.logger import log_first_n
from ..backbone import Backbone, build_backbone
from ..postprocessing import detector_postprocess
from ..proposal_generator import build_proposal_generator
from ..roi_heads import build_roi_heads
from .build import META_ARCH_REGISTRY
__all__ = ["GeneralizedRCNN", "ProposalNetwork", "ProposalNetwork1"]
class AugmentedConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dk, dv, Nh, shape=0, relative=False, stride=1):
super(AugmentedConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.dk = dk
self.dv = dv
self.Nh = Nh
self.shape = shape
self.relative = relative
self.stride = stride
self.padding = (self.kernel_size - 1) // 2
#print("conv param", padding, stride)
assert self.Nh != 0, "integer division or modulo by zero, Nh >= 1"
assert self.dk % self.Nh == 0, "dk should be divided by Nh. (example: out_channels: 20, dk: 40, Nh: 4)"
assert self.dv % self.Nh == 0, "dv should be divided by Nh. (example: out_channels: 20, dv: 4, Nh: 4)"
assert stride in [1, 2], str(stride) + " Up to 2 strides are allowed."
self.conv_out = nn.Conv2d(self.in_channels, self.out_channels - self.dv, self.kernel_size, stride=stride, padding=self.padding)
#self.conv_out = nn.Conv2d(self.in_channels, self.out_channels , self.kernel_size, stride=stride, padding=self.padding)
self.qkv_conv = nn.Conv2d(self.in_channels, 2 * self.dk + self.dv, kernel_size=self.kernel_size, stride=stride, padding=self.padding)
self.attn_out = nn.Conv2d(self.dv, self.dv, kernel_size=1, stride=1)
if self.relative:
self.key_rel_w = nn.Parameter(torch.randn((2 * self.shape - 1, dk // Nh), requires_grad=True))
self.key_rel_h = nn.Parameter(torch.randn((2 * self.shape - 1, dk // Nh), requires_grad=True))
def forward(self, x):
# Input x
# (batch_size, channels, height, width)
# batch, _, height, width = x.size()
# conv_out
# (batch_size, out_channels, height, width)
x = x.reshape(-1, 5 * x.shape[1], x.shape[2], x.shape[3])
conv_out = self.conv_out(x)
batch, _, height, width = conv_out.size()
#batch, _, height, width = x.size()
#height= width=self.shape
#print(conv_out.size())
# flat_q, flat_k, flat_v
# (batch_size, Nh, height * width, dvh or dkh)
# dvh = dv / Nh, dkh = dk / Nh
# q, k, v
# (batch_size, Nh, height, width, dv or dk)
#print("input to qkv", x.shape)
flat_q, flat_k, flat_v, q, k, v = self.compute_flat_qkv(x, self.dk, self.dv, self.Nh)
logits = torch.matmul(flat_q.transpose(2, 3), flat_k)
if self.relative:
h_rel_logits, w_rel_logits = self.relative_logits(q)
logits += h_rel_logits
logits += w_rel_logits
weights = F.softmax(logits, dim=-1)
# attn_out
# (batch, Nh, height * width, dvh)
attn_out = torch.matmul(weights, flat_v.transpose(2, 3))
attn_out = torch.reshape(attn_out, (batch, self.Nh, self.dv // self.Nh, height, width))
#print("attn",attn_out.size())
# combine_heads_2d
# (batch, out_channels, height, width)
#print("input to attn", attn_out.size())
attn_out = self.combine_heads_2d(attn_out)
attn_out = self.attn_out(attn_out)
return torch.cat((conv_out, attn_out), dim=1)
#return conv_out
def compute_flat_qkv(self, x, dk, dv, Nh):
qkv = self.qkv_conv(x)
#print("qkv",qkv.size())
N, _, H, W = qkv.size()
q, k, v = torch.split(qkv, [dk, dk, dv], dim=1)
q = self.split_heads_2d(q, Nh)
k = self.split_heads_2d(k, Nh)
v = self.split_heads_2d(v, Nh)
dkh = dk // Nh
q *= dkh ** -0.5
flat_q = torch.reshape(q, (N, Nh, dk // Nh, H * W))
flat_k = torch.reshape(k, (N, Nh, dk // Nh, H * W))
flat_v = torch.reshape(v, (N, Nh, dv // Nh, H * W))
return flat_q, flat_k, flat_v, q, k, v
def split_heads_2d(self, x, Nh):
batch, channels, height, width = x.size()
ret_shape = (batch, Nh, channels // Nh, height, width)
split = torch.reshape(x, ret_shape)
return split
def combine_heads_2d(self, x):
batch, Nh, dv, H, W = x.size()
ret_shape = (batch, Nh * dv, H, W)
return torch.reshape(x, ret_shape)
def relative_logits(self, q):
B, Nh, dk, H, W = q.size()
q = torch.transpose(q, 2, 4).transpose(2, 3)
rel_logits_w = self.relative_logits_1d(q, self.key_rel_w, H, W, Nh, "w")
rel_logits_h = self.relative_logits_1d(torch.transpose(q, 2, 3), self.key_rel_h, W, H, Nh, "h")
return rel_logits_h, rel_logits_w
def relative_logits_1d(self, q, rel_k, H, W, Nh, case):
rel_logits = torch.einsum('bhxyd,md->bhxym', q, rel_k)
rel_logits = torch.reshape(rel_logits, (-1, Nh * H, W, 2 * W - 1))
rel_logits = self.rel_to_abs(rel_logits)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H, W, W))
rel_logits = torch.unsqueeze(rel_logits, dim=3)
rel_logits = rel_logits.repeat((1, 1, 1, H, 1, 1))
if case == "w":
rel_logits = torch.transpose(rel_logits, 3, 4)
elif case == "h":
rel_logits = torch.transpose(rel_logits, 2, 4).transpose(4, 5).transpose(3, 5)
rel_logits = torch.reshape(rel_logits, (-1, Nh, H * W, H * W))
return rel_logits
def rel_to_abs(self, x):
B, Nh, L, _ = x.size()
col_pad = torch.zeros((B, Nh, L, 1)).to(x)
x = torch.cat((x, col_pad), dim=3)
flat_x = torch.reshape(x, (B, Nh, L * 2 * L))
flat_pad = torch.zeros((B, Nh, L - 1)).to(x)
flat_x_padded = torch.cat((flat_x, flat_pad), dim=2)
final_x = torch.reshape(flat_x_padded, (B, Nh, L + 1, 2 * L - 1))
final_x = final_x[:, :, :L, L - 1:]
return final_x
#augmented_conv_128 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=2, shape=64)
augmented_conv_64 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=64)
augmented_conv_32 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=32)
augmented_conv_16 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=16)
augmented_conv_8 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=8)
augmented_conv_4 = AugmentedConv(in_channels=5*256, out_channels=256, kernel_size=3, dk=40, dv=4, Nh=2, relative=True, stride=1, shape=4)
#attention with more dk dv = equal to convolution
# augmented_conv_128 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=2, shape=64)
# augmented_conv_64 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=64)
# augmented_conv_32 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=32)
# augmented_conv_16 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=16)
# augmented_conv_8 = AugmentedConv(in_channels=3*256, out_channels=256, kernel_size=3, dk=64, dv=64, Nh=8, relative=True, stride=1, shape=8)
#up_sample = nn.ConvTranspose2d(256, 256, 3,stride=2, padding=1, output_padding=1)
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
roi_heads: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
input_format: Optional[str] = None,
vis_period: int = 0,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
roi_heads: a ROI head that performs per-region computation
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
input_format: describe the meaning of channels of input. Needed by visualization
vis_period: the period to run visualization. Set to 0 to disable.
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.roi_heads = roi_heads
self.input_format = input_format
self.vis_period = vis_period
if vis_period > 0:
assert input_format is not None, "input_format is required for visualization!"
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
assert (
self.pixel_mean.shape == self.pixel_std.shape
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"roi_heads": build_roi_heads(cfg, backbone.output_shape()),
"input_format": cfg.INPUT.FORMAT,
"vis_period": cfg.VIS_PERIOD,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def visualize_training(self, batched_inputs, proposals):
"""
A function used to visualize images and proposals. It shows ground truth
bounding boxes on the original image and up to 20 top-scoring predicted
object proposals on the original image. Users can implement different
visualization functions for different models.
Args:
batched_inputs (list): a list that contains input to the model.
proposals (list): a list that contains predicted proposals. Both
batched_inputs and proposals should have the same length.
"""
from detectron2.utils.visualizer import Visualizer
storage = get_event_storage()
max_vis_prop = 20
for input, prop in zip(batched_inputs, proposals):
img = input["image"]
img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)
v_gt = Visualizer(img, None)
v_gt = v_gt.overlay_instances(boxes=input["instances"].gt_boxes)
anno_img = v_gt.get_image()
box_size = min(len(prop.proposal_boxes), max_vis_prop)
v_pred = Visualizer(img, None)
v_pred = v_pred.overlay_instances(
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
)
prop_img = v_pred.get_image()
vis_img = np.concatenate((anno_img, prop_img), axis=1)
vis_img = vis_img.transpose(2, 0, 1)
vis_name = "Left: GT bounding boxes; Right: Predicted proposals"
storage.put_image(vis_name, vis_img)
break # only visualize one image in a batch
def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.proposal_generator is not None:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
if self.vis_period > 0:
storage = get_event_storage()
if storage.iter % self.vis_period == 0:
self.visualize_training(batched_inputs, proposals)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(
self,
batched_inputs: Tuple[Dict[str, torch.Tensor]],
detected_instances: Optional[List[Instances]] = None,
do_postprocess: bool = True,
):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
When do_postprocess=True, same as in :meth:`forward`.
Otherwise, a list[Instances] containing raw network outputs.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if detected_instances is None:
if self.proposal_generator is not None:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
return GeneralizedRCNN._postprocess(results, batched_inputs, images.image_sizes)
else:
return results
def preprocess_image(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
@staticmethod
def _postprocess(instances, batched_inputs: Tuple[Dict[str, torch.Tensor]], image_sizes):
"""
Rescale the output instances to the target size.
"""
# note: private function; subject to changes
processed_results = []
for results_per_image, input_per_image, image_size in zip(
instances, batched_inputs, image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
proposal_generator: nn.Module,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
proposal_generator: a module that generates proposals using backbone features
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
"""
super().__init__()
self.backbone = backbone
self.proposal_generator = proposal_generator
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
return {
"backbone": backbone,
"proposal_generator": build_proposal_generator(cfg, backbone.output_shape()),
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"proposals": r})
return processed_results
@META_ARCH_REGISTRY.register()
class ProposalNetwork1(nn.Module):
"""
A meta architecture that only predicts object proposals.
"""
def __init__(self, cfg):
super().__init__()
self.backbone = build_backbone(cfg)
#self.augmentedConv_128 = augmented_conv_128
self.augmentedConv_64 = augmented_conv_64
self.augmentedConv_32 = augmented_conv_32
self.augmentedConv_16 = augmented_conv_16
self.augmentedConv_8 = augmented_conv_8#AugmentedConv()
self.augmentedConv_4 = augmented_conv_4
#self.up_sample = up_sample
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN1).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD1).view(-1, 1, 1))
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
Same as in :class:`GeneralizedRCNN.forward`
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "proposals" whose value is a
:class:`Instances` with keys "proposal_boxes" and "objectness_logits".
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
temp_images = ()
for im in images:
temp_images += im.split(3)
images = ImageList.from_tensors(temp_images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
# for key in features.keys():
# print(key,features[key].shape)
feature_fused = {}
feature_fused['p3'] = self.augmentedConv_64(features['p3'])
#print('feature_128.shape up sample',feature_fused['p2'].shape)
feature_fused['p4'] = self.augmentedConv_32(features['p4'])
feature_fused['p5'] = self.augmentedConv_16(features['p5'])
feature_fused['p6'] = self.augmentedConv_8(features['p6'])
feature_fused['p7'] = self.augmentedConv_4(features['p7'])
my_image = images.tensor[3::5] #5 slice
#my_image = images.tensor[4::9]
#print(my_image.shape)
my_image_sizes = [(my_image.shape[-2], my_image.shape[-1]) for im in my_image]
#print(image_sizes)
images = ImageList(my_image,my_image_sizes)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
proposals, proposal_losses = self.proposal_generator(images, feature_fused, gt_instances)
# In training, the proposals are not useful at all but we generate them anyway.
# This makes RPN-only models about 5% slower.
if self.training:
return proposal_losses
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
return processed_results
|
[
"detectron2.structures.ImageList.from_tensors",
"torch.jit.is_scripting",
"torch.cat",
"torch.randn",
"detectron2.utils.logger.log_first_n",
"detectron2.utils.visualizer.Visualizer",
"detectron2.utils.events.get_event_storage",
"torch.Tensor",
"torch.zeros",
"torch.split",
"torch.nn.Conv2d",
"detectron2.structures.ImageList",
"torch.einsum",
"torch.unsqueeze",
"torch.reshape",
"numpy.concatenate",
"torch.nn.functional.softmax",
"torch.tensor",
"torch.transpose"
] |
[((1746, 1861), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.in_channels', '(self.out_channels - self.dv)', 'self.kernel_size'], {'stride': 'stride', 'padding': 'self.padding'}), '(self.in_channels, self.out_channels - self.dv, self.kernel_size,\n stride=stride, padding=self.padding)\n', (1755, 1861), False, 'from torch import nn\n'), ((2019, 2141), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.in_channels', '(2 * self.dk + self.dv)'], {'kernel_size': 'self.kernel_size', 'stride': 'stride', 'padding': 'self.padding'}), '(self.in_channels, 2 * self.dk + self.dv, kernel_size=self.\n kernel_size, stride=stride, padding=self.padding)\n', (2028, 2141), False, 'from torch import nn\n'), ((2162, 2214), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.dv', 'self.dv'], {'kernel_size': '(1)', 'stride': '(1)'}), '(self.dv, self.dv, kernel_size=1, stride=1)\n', (2171, 2214), False, 'from torch import nn\n'), ((3519, 3544), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (3528, 3544), True, 'import torch.nn.functional as F\n'), ((3692, 3768), 'torch.reshape', 'torch.reshape', (['attn_out', '(batch, self.Nh, self.dv // self.Nh, height, width)'], {}), '(attn_out, (batch, self.Nh, self.dv // self.Nh, height, width))\n', (3705, 3768), False, 'import torch\n'), ((4040, 4078), 'torch.cat', 'torch.cat', (['(conv_out, attn_out)'], {'dim': '(1)'}), '((conv_out, attn_out), dim=1)\n', (4049, 4078), False, 'import torch\n'), ((4275, 4312), 'torch.split', 'torch.split', (['qkv', '[dk, dk, dv]'], {'dim': '(1)'}), '(qkv, [dk, dk, dv], dim=1)\n', (4286, 4312), False, 'import torch\n'), ((4496, 4538), 'torch.reshape', 'torch.reshape', (['q', '(N, Nh, dk // Nh, H * W)'], {}), '(q, (N, Nh, dk // Nh, H * W))\n', (4509, 4538), False, 'import torch\n'), ((4556, 4598), 'torch.reshape', 'torch.reshape', (['k', '(N, Nh, dk // Nh, H * W)'], {}), '(k, (N, Nh, dk // Nh, H * W))\n', (4569, 4598), False, 'import torch\n'), ((4616, 4658), 'torch.reshape', 'torch.reshape', (['v', '(N, Nh, dv // Nh, H * W)'], {}), '(v, (N, Nh, dv // Nh, H * W))\n', (4629, 4658), False, 'import torch\n'), ((4873, 4900), 'torch.reshape', 'torch.reshape', (['x', 'ret_shape'], {}), '(x, ret_shape)\n', (4886, 4900), False, 'import torch\n'), ((5055, 5082), 'torch.reshape', 'torch.reshape', (['x', 'ret_shape'], {}), '(x, ret_shape)\n', (5068, 5082), False, 'import torch\n'), ((5517, 5558), 'torch.einsum', 'torch.einsum', (['"""bhxyd,md->bhxym"""', 'q', 'rel_k'], {}), "('bhxyd,md->bhxym', q, rel_k)\n", (5529, 5558), False, 'import torch\n'), ((5580, 5633), 'torch.reshape', 'torch.reshape', (['rel_logits', '(-1, Nh * H, W, 2 * W - 1)'], {}), '(rel_logits, (-1, Nh * H, W, 2 * W - 1))\n', (5593, 5633), False, 'import torch\n'), ((5705, 5749), 'torch.reshape', 'torch.reshape', (['rel_logits', '(-1, Nh, H, W, W)'], {}), '(rel_logits, (-1, Nh, H, W, W))\n', (5718, 5749), False, 'import torch\n'), ((5771, 5805), 'torch.unsqueeze', 'torch.unsqueeze', (['rel_logits'], {'dim': '(3)'}), '(rel_logits, dim=3)\n', (5786, 5805), False, 'import torch\n'), ((6087, 6136), 'torch.reshape', 'torch.reshape', (['rel_logits', '(-1, Nh, H * W, H * W)'], {}), '(rel_logits, (-1, Nh, H * W, H * W))\n', (6100, 6136), False, 'import torch\n'), ((6288, 6318), 'torch.cat', 'torch.cat', (['(x, col_pad)'], {'dim': '(3)'}), '((x, col_pad), dim=3)\n', (6297, 6318), False, 'import torch\n'), ((6337, 6373), 'torch.reshape', 'torch.reshape', (['x', '(B, Nh, L * 2 * L)'], {}), '(x, (B, Nh, L * 2 * L))\n', (6350, 6373), False, 'import torch\n'), ((6451, 6487), 'torch.cat', 'torch.cat', (['(flat_x, flat_pad)'], {'dim': '(2)'}), '((flat_x, flat_pad), dim=2)\n', (6460, 6487), False, 'import torch\n'), ((6507, 6562), 'torch.reshape', 'torch.reshape', (['flat_x_padded', '(B, Nh, L + 1, 2 * L - 1)'], {}), '(flat_x_padded, (B, Nh, L + 1, 2 * L - 1))\n', (6520, 6562), False, 'import torch\n'), ((11513, 11532), 'detectron2.utils.events.get_event_storage', 'get_event_storage', ([], {}), '()\n', (11530, 11532), False, 'from detectron2.utils.events import get_event_storage\n'), ((16993, 17056), 'detectron2.structures.ImageList.from_tensors', 'ImageList.from_tensors', (['images', 'self.backbone.size_divisibility'], {}), '(images, self.backbone.size_divisibility)\n', (17015, 17056), False, 'from detectron2.structures import ImageList, Instances\n'), ((19763, 19826), 'detectron2.structures.ImageList.from_tensors', 'ImageList.from_tensors', (['images', 'self.backbone.size_divisibility'], {}), '(images, self.backbone.size_divisibility)\n', (19785, 19826), False, 'from detectron2.structures import ImageList, Instances\n'), ((22678, 22746), 'detectron2.structures.ImageList.from_tensors', 'ImageList.from_tensors', (['temp_images', 'self.backbone.size_divisibility'], {}), '(temp_images, self.backbone.size_divisibility)\n', (22700, 22746), False, 'from detectron2.structures import ImageList, Instances\n'), ((23572, 23607), 'detectron2.structures.ImageList', 'ImageList', (['my_image', 'my_image_sizes'], {}), '(my_image, my_image_sizes)\n', (23581, 23607), False, 'from detectron2.structures import ImageList, Instances\n'), ((5335, 5359), 'torch.transpose', 'torch.transpose', (['q', '(2)', '(3)'], {}), '(q, 2, 3)\n', (5350, 5359), False, 'import torch\n'), ((5915, 5948), 'torch.transpose', 'torch.transpose', (['rel_logits', '(3)', '(4)'], {}), '(rel_logits, 3, 4)\n', (5930, 5948), False, 'import torch\n'), ((11751, 11772), 'detectron2.utils.visualizer.Visualizer', 'Visualizer', (['img', 'None'], {}), '(img, None)\n', (11761, 11772), False, 'from detectron2.utils.visualizer import Visualizer\n'), ((11978, 11999), 'detectron2.utils.visualizer.Visualizer', 'Visualizer', (['img', 'None'], {}), '(img, None)\n', (11988, 11999), False, 'from detectron2.utils.visualizer import Visualizer\n'), ((12200, 12244), 'numpy.concatenate', 'np.concatenate', (['(anno_img, prop_img)'], {'axis': '(1)'}), '((anno_img, prop_img), axis=1)\n', (12214, 12244), True, 'import numpy as np\n'), ((14444, 14463), 'detectron2.utils.events.get_event_storage', 'get_event_storage', ([], {}), '()\n', (14461, 14463), False, 'from detectron2.utils.events import get_event_storage\n'), ((2284, 2347), 'torch.randn', 'torch.randn', (['(2 * self.shape - 1, dk // Nh)'], {'requires_grad': '(True)'}), '((2 * self.shape - 1, dk // Nh), requires_grad=True)\n', (2295, 2347), False, 'import torch\n'), ((2391, 2454), 'torch.randn', 'torch.randn', (['(2 * self.shape - 1, dk // Nh)'], {'requires_grad': '(True)'}), '((2 * self.shape - 1, dk // Nh), requires_grad=True)\n', (2402, 2454), False, 'import torch\n'), ((5165, 5189), 'torch.transpose', 'torch.transpose', (['q', '(2)', '(4)'], {}), '(q, 2, 4)\n', (5180, 5189), False, 'import torch\n'), ((6243, 6269), 'torch.zeros', 'torch.zeros', (['(B, Nh, L, 1)'], {}), '((B, Nh, L, 1))\n', (6254, 6269), False, 'import torch\n'), ((6393, 6420), 'torch.zeros', 'torch.zeros', (['(B, Nh, L - 1)'], {}), '((B, Nh, L - 1))\n', (6404, 6420), False, 'import torch\n'), ((16470, 16494), 'torch.jit.is_scripting', 'torch.jit.is_scripting', ([], {}), '()\n', (16492, 16494), False, 'import torch\n'), ((20062, 20161), 'detectron2.utils.logger.log_first_n', 'log_first_n', (['logging.WARN', '"""\'targets\' in the model inputs is now renamed to \'instances\'!"""'], {'n': '(10)'}), '(logging.WARN,\n "\'targets\' in the model inputs is now renamed to \'instances\'!", n=10)\n', (20073, 20161), False, 'from detectron2.utils.logger import log_first_n\n'), ((23795, 23894), 'detectron2.utils.logger.log_first_n', 'log_first_n', (['logging.WARN', '"""\'targets\' in the model inputs is now renamed to \'instances\'!"""'], {'n': '(10)'}), '(logging.WARN,\n "\'targets\' in the model inputs is now renamed to \'instances\'!", n=10)\n', (23806, 23894), False, 'from detectron2.utils.logger import log_first_n\n'), ((9958, 9982), 'torch.tensor', 'torch.tensor', (['pixel_mean'], {}), '(pixel_mean)\n', (9970, 9982), False, 'import torch\n'), ((10048, 10071), 'torch.tensor', 'torch.tensor', (['pixel_std'], {}), '(pixel_std)\n', (10060, 10071), False, 'import torch\n'), ((18662, 18686), 'torch.tensor', 'torch.tensor', (['pixel_mean'], {}), '(pixel_mean)\n', (18674, 18686), False, 'import torch\n'), ((18752, 18775), 'torch.tensor', 'torch.tensor', (['pixel_std'], {}), '(pixel_std)\n', (18764, 18775), False, 'import torch\n'), ((21810, 21845), 'torch.Tensor', 'torch.Tensor', (['cfg.MODEL.PIXEL_MEAN1'], {}), '(cfg.MODEL.PIXEL_MEAN1)\n', (21822, 21845), False, 'import torch\n'), ((21904, 21938), 'torch.Tensor', 'torch.Tensor', (['cfg.MODEL.PIXEL_STD1'], {}), '(cfg.MODEL.PIXEL_STD1)\n', (21916, 21938), False, 'import torch\n'), ((6000, 6033), 'torch.transpose', 'torch.transpose', (['rel_logits', '(2)', '(4)'], {}), '(rel_logits, 2, 4)\n', (6015, 6033), False, 'import torch\n')]
|
import sys
import numpy as np
file = sys.argv[-1]
with open(file) as f:
cnt = f.readlines()
count = []
distortion = []
calibration = []
linf = []
for line in cnt:
if line.startswith('Adversarial Example Found Successfully:'):
count.append(int(line.split(' ')[-2]))
distortion.append(eval(line.split(' ')[-6]))
elif line.startswith('1 Predicted label'):
calibration.append(eval(line.split(' ')[-3]))
linf.append(eval(line.split(' ')[-2]))
print('len:', len(count))
print('count:', np.mean(count), np.median(count), np.min(count), np.max(count))
print('distortion:', np.mean(distortion), np.median(distortion), np.min(distortion), np.max(distortion))
if len(linf) != 0:
print('calibration:', np.mean(calibration), np.median(calibration), np.min(calibration), np.max(calibration))
print('linf:', np.mean(linf), np.median(linf), np.min(linf), np.max(linf))
|
[
"numpy.median",
"numpy.min",
"numpy.mean",
"numpy.max"
] |
[((493, 507), 'numpy.mean', 'np.mean', (['count'], {}), '(count)\n', (500, 507), True, 'import numpy as np\n'), ((509, 525), 'numpy.median', 'np.median', (['count'], {}), '(count)\n', (518, 525), True, 'import numpy as np\n'), ((527, 540), 'numpy.min', 'np.min', (['count'], {}), '(count)\n', (533, 540), True, 'import numpy as np\n'), ((542, 555), 'numpy.max', 'np.max', (['count'], {}), '(count)\n', (548, 555), True, 'import numpy as np\n'), ((578, 597), 'numpy.mean', 'np.mean', (['distortion'], {}), '(distortion)\n', (585, 597), True, 'import numpy as np\n'), ((599, 620), 'numpy.median', 'np.median', (['distortion'], {}), '(distortion)\n', (608, 620), True, 'import numpy as np\n'), ((622, 640), 'numpy.min', 'np.min', (['distortion'], {}), '(distortion)\n', (628, 640), True, 'import numpy as np\n'), ((642, 660), 'numpy.max', 'np.max', (['distortion'], {}), '(distortion)\n', (648, 660), True, 'import numpy as np\n'), ((707, 727), 'numpy.mean', 'np.mean', (['calibration'], {}), '(calibration)\n', (714, 727), True, 'import numpy as np\n'), ((729, 751), 'numpy.median', 'np.median', (['calibration'], {}), '(calibration)\n', (738, 751), True, 'import numpy as np\n'), ((753, 772), 'numpy.min', 'np.min', (['calibration'], {}), '(calibration)\n', (759, 772), True, 'import numpy as np\n'), ((774, 793), 'numpy.max', 'np.max', (['calibration'], {}), '(calibration)\n', (780, 793), True, 'import numpy as np\n'), ((814, 827), 'numpy.mean', 'np.mean', (['linf'], {}), '(linf)\n', (821, 827), True, 'import numpy as np\n'), ((829, 844), 'numpy.median', 'np.median', (['linf'], {}), '(linf)\n', (838, 844), True, 'import numpy as np\n'), ((846, 858), 'numpy.min', 'np.min', (['linf'], {}), '(linf)\n', (852, 858), True, 'import numpy as np\n'), ((860, 872), 'numpy.max', 'np.max', (['linf'], {}), '(linf)\n', (866, 872), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, \
reset_variable_tracker
def _runs_are_the_same(var_gen_1, var_gen_2, use_assert = False):
delete_vars(['_test_random_var_32r5477w32'])
for run, gen in [(0, var_gen_1), (1, var_gen_2)]:
reset_variable_tracker()
for v in gen:
if use_assert:
assert_variable_matches_between_runs(v, '_test_random_var_32r5477w32')
else:
its_a_match=variable_matches_between_runs(v, '_test_random_var_32r5477w32')
if run==0:
assert its_a_match is None
else:
if not its_a_match:
return False
return True
def test_variable_matches_between_runs():
rng1 = np.random.RandomState(1234)
gen1 = (rng1.randn(3, 4) for _ in range(5))
rng2 = np.random.RandomState(1234)
gen2 = (rng2.randn(3, 4) for _ in range(5))
assert _runs_are_the_same(gen1, gen2)
rng = np.random.RandomState(1234)
gen1 = (rng.randn(3, 4) for _ in range(5))
gen2 = (rng.randn(3, 4) for _ in range(5))
assert not _runs_are_the_same(gen1, gen2)
gen1 = (i for i in range(5))
gen2 = (i for i in range(5))
assert _runs_are_the_same(gen1, gen2)
gen1 = (i for i in range(5))
gen2 = (i if i<4 else 7 for i in range(5))
assert not _runs_are_the_same(gen1, gen2)
def test_assert_variable_matches_between_runs():
rng1 = np.random.RandomState(1234)
gen1 = (rng1.randn(3, 4) for _ in range(5))
rng2 = np.random.RandomState(1234)
gen2 = (rng2.randn(3, 4) for _ in range(5))
_runs_are_the_same(gen1, gen2, use_assert=True)
rng = np.random.RandomState(1234)
gen1 = (rng.randn(3, 4) for _ in range(5))
gen2 = (rng.randn(3, 4) for _ in range(5))
with pytest.raises(AssertionError):
_runs_are_the_same(gen1, gen2, use_assert=True)
gen1 = (i for i in range(5))
gen2 = (i for i in range(5))
_runs_are_the_same(gen1, gen2, use_assert=True)
gen1 = (i for i in range(5))
gen2 = (i if i<4 else 7 for i in range(5))
with pytest.raises(AssertionError):
_runs_are_the_same(gen1, gen2, use_assert=True)
if __name__ == '__main__':
test_variable_matches_between_runs()
test_assert_variable_matches_between_runs()
|
[
"artemis.general.nondeterminism_hunting.delete_vars",
"numpy.random.RandomState",
"artemis.general.nondeterminism_hunting.variable_matches_between_runs",
"pytest.raises",
"artemis.general.nondeterminism_hunting.reset_variable_tracker",
"artemis.general.nondeterminism_hunting.assert_variable_matches_between_runs"
] |
[((268, 312), 'artemis.general.nondeterminism_hunting.delete_vars', 'delete_vars', (["['_test_random_var_32r5477w32']"], {}), "(['_test_random_var_32r5477w32'])\n", (279, 312), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n'), ((891, 918), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (912, 918), True, 'import numpy as np\n'), ((978, 1005), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (999, 1005), True, 'import numpy as np\n'), ((1107, 1134), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1128, 1134), True, 'import numpy as np\n'), ((1574, 1601), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1595, 1601), True, 'import numpy as np\n'), ((1661, 1688), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1682, 1688), True, 'import numpy as np\n'), ((1800, 1827), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (1821, 1827), True, 'import numpy as np\n'), ((375, 399), 'artemis.general.nondeterminism_hunting.reset_variable_tracker', 'reset_variable_tracker', ([], {}), '()\n', (397, 399), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n'), ((1931, 1960), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1944, 1960), False, 'import pytest\n'), ((2227, 2256), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2240, 2256), False, 'import pytest\n'), ((465, 535), 'artemis.general.nondeterminism_hunting.assert_variable_matches_between_runs', 'assert_variable_matches_between_runs', (['v', '"""_test_random_var_32r5477w32"""'], {}), "(v, '_test_random_var_32r5477w32')\n", (501, 535), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n'), ((582, 645), 'artemis.general.nondeterminism_hunting.variable_matches_between_runs', 'variable_matches_between_runs', (['v', '"""_test_random_var_32r5477w32"""'], {}), "(v, '_test_random_var_32r5477w32')\n", (611, 645), False, 'from artemis.general.nondeterminism_hunting import delete_vars, assert_variable_matches_between_runs, variable_matches_between_runs, reset_variable_tracker\n')]
|
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
import tensorflow as tf
from adnc.model.utils import layer_norm
@pytest.fixture()
def session():
with tf.Session() as sess:
yield sess
tf.reset_default_graph()
@pytest.fixture()
def np_rng():
seed = np.random.randint(1, 999)
return np.random.RandomState(seed)
def test_layer_norm(session, np_rng):
np_weights = np_rng.normal(0, 1, [64, 128])
weights = tf.constant(np_weights, dtype=tf.float32)
weights_ln = layer_norm(weights, 'test')
session.run(tf.global_variables_initializer())
weights_ln = session.run(weights_ln)
assert weights_ln.shape == (64, 128)
|
[
"adnc.model.utils.layer_norm",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"pytest.fixture",
"tensorflow.Session",
"tensorflow.constant",
"numpy.random.RandomState",
"numpy.random.randint"
] |
[((751, 767), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (765, 767), False, 'import pytest\n'), ((864, 880), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (878, 880), False, 'import pytest\n'), ((837, 861), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (859, 861), True, 'import tensorflow as tf\n'), ((906, 931), 'numpy.random.randint', 'np.random.randint', (['(1)', '(999)'], {}), '(1, 999)\n', (923, 931), True, 'import numpy as np\n'), ((943, 970), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (964, 970), True, 'import numpy as np\n'), ((1073, 1114), 'tensorflow.constant', 'tf.constant', (['np_weights'], {'dtype': 'tf.float32'}), '(np_weights, dtype=tf.float32)\n', (1084, 1114), True, 'import tensorflow as tf\n'), ((1132, 1159), 'adnc.model.utils.layer_norm', 'layer_norm', (['weights', '"""test"""'], {}), "(weights, 'test')\n", (1142, 1159), False, 'from adnc.model.utils import layer_norm\n'), ((792, 804), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (802, 804), True, 'import tensorflow as tf\n'), ((1177, 1210), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1208, 1210), True, 'import tensorflow as tf\n')]
|
from scipy.integrate import quad,dblquad
import numpy as np
from scipy.special import gamma
from scipy.special import gammainc
def poisson_integrand(tau, rho, beta, fm=1, K=1, alpha=2):
#lambda = beta*f(m)*rho*tau
L = np.array([(tau*beta*rho*fm)**k/gamma(k+1) for k in range(K)])
return (1-np.exp(-tau*beta*rho*fm)*np.sum(L))*tau**(-alpha-1)
def exponential_integrand(tau, rho, beta, fm=1, K=1, alpha=2):
scale = tau*rho*beta*fm
return np.exp(-K/scale)*tau**(-alpha-1)
def exponential_pdf(kappa,scale=1):
return np.exp(-kappa/scale)/scale
def weibull_integrand(tau, rho, beta, fm=1, K=1, alpha=2, shape=2):
scale = tau*rho*beta*fm/gamma(1+1/shape) #mean = tau*rho*beta
return np.exp(-(K/scale)**shape)*tau**(-alpha-1)
def weibull_pdf(kappa,scale=1,shape=2):
return (shape/scale)*(kappa/scale)**(shape-1)*np.exp(-(kappa/scale)**shape)
def frechet_integrand(tau, rho, beta, fm=1, K=1, alpha=2, shape=2):
scale = tau*rho*beta*fm/gamma(1-1/shape)
return (1-np.exp(-(K/scale)**(-shape)))*tau**(-alpha-1)
def frechet_pdf(kappa,scale=1,shape=2):
return (shape/scale)*(kappa/scale)**(-shape-1)*np.exp(-(kappa/scale)**(-shape))
def gamma_special_integrand(tau, rho, beta, fm=1, K=1, alpha=2 ,z = 0.):
#play with the scale/shape instead of just the scale, such that variance != mean^2
#z = 0 is equivalent to the exponential
param = tau*rho*beta*fm
return (1 - gammainc(param**z,K/param**(1-z)))*tau**(-alpha-1)
def kernel(rho, beta, fm=1, K=1, alpha=2., tmin=1, T=np.inf,
integrand=exponential_integrand,
args=tuple()):
Z = (tmin**(-alpha)-T**(-alpha))/alpha
_args = (rho,beta,fm,K,alpha,*args)
return quad(integrand,tmin,T,args=_args)[0]/Z
#same as kernel, but put beta first for integration and multiply by Q(beta)
def kernel2(beta, Q, rho, fm=1, K=1, alpha=2.,tmin=1, T=np.inf,
integrand=exponential_integrand,
args=tuple()):
_args = (rho,beta,fm,K,alpha, *args)
return Q(beta)*quad(integrand,tmin,T,args=_args)[0]
def kernel_het_beta(rho, fm=1, K=1, alpha=2., tmin=1, T=np.inf,
integrand=exponential_integrand,args=tuple(),
Q=lambda b: np.exp(-b),betalim=(0,np.inf)):
Z = (tmin**(-alpha)-T**(-alpha))/alpha
_args=(Q,rho,fm,K,alpha,tmin,T,integrand,args)
return quad(kernel2,betalim[0],betalim[1],args=_args)[0]/Z
if __name__ == '__main__':
import matplotlib.pyplot as plt
alpha_list = [0.5,1.,1.5,2.]
rho_list = np.logspace(-3,0,100)
beta = 0.1
for alpha in alpha_list:
label=fr"$\alpha = {alpha}$"
kernel_list = [kernel(rho,alpha,beta,K=0.1,tmin=1,
integrand=gamma_integrand,
args=tuple())
for rho in rho_list]
plt.loglog(rho_list,kernel_list, '-',label=label)
plt.loglog(rho_list,rho_list**alpha, '--',label=label)
plt.legend()
plt.xlabel(r"$\rho$")
plt.ylabel(r"$\theta_m(\rho)$")
plt.show()
|
[
"matplotlib.pyplot.loglog",
"matplotlib.pyplot.show",
"numpy.sum",
"scipy.integrate.quad",
"numpy.logspace",
"matplotlib.pyplot.legend",
"scipy.special.gammainc",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.special.gamma"
] |
[((2512, 2535), 'numpy.logspace', 'np.logspace', (['(-3)', '(0)', '(100)'], {}), '(-3, 0, 100)\n', (2523, 2535), True, 'import numpy as np\n'), ((2945, 2957), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2955, 2957), True, 'import matplotlib.pyplot as plt\n'), ((2962, 2983), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\rho$"""'], {}), "('$\\\\rho$')\n", (2972, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3020), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\theta_m(\\\\rho)$"""'], {}), "('$\\\\theta_m(\\\\rho)$')\n", (2998, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3032, 3034), True, 'import matplotlib.pyplot as plt\n'), ((458, 476), 'numpy.exp', 'np.exp', (['(-K / scale)'], {}), '(-K / scale)\n', (464, 476), True, 'import numpy as np\n'), ((539, 561), 'numpy.exp', 'np.exp', (['(-kappa / scale)'], {}), '(-kappa / scale)\n', (545, 561), True, 'import numpy as np\n'), ((663, 683), 'scipy.special.gamma', 'gamma', (['(1 + 1 / shape)'], {}), '(1 + 1 / shape)\n', (668, 683), False, 'from scipy.special import gamma\n'), ((712, 741), 'numpy.exp', 'np.exp', (['(-(K / scale) ** shape)'], {}), '(-(K / scale) ** shape)\n', (718, 741), True, 'import numpy as np\n'), ((845, 878), 'numpy.exp', 'np.exp', (['(-(kappa / scale) ** shape)'], {}), '(-(kappa / scale) ** shape)\n', (851, 878), True, 'import numpy as np\n'), ((972, 992), 'scipy.special.gamma', 'gamma', (['(1 - 1 / shape)'], {}), '(1 - 1 / shape)\n', (977, 992), False, 'from scipy.special import gamma\n'), ((1141, 1175), 'numpy.exp', 'np.exp', (['(-(kappa / scale) ** -shape)'], {}), '(-(kappa / scale) ** -shape)\n', (1147, 1175), True, 'import numpy as np\n'), ((2211, 2221), 'numpy.exp', 'np.exp', (['(-b)'], {}), '(-b)\n', (2217, 2221), True, 'import numpy as np\n'), ((2827, 2878), 'matplotlib.pyplot.loglog', 'plt.loglog', (['rho_list', 'kernel_list', '"""-"""'], {'label': 'label'}), "(rho_list, kernel_list, '-', label=label)\n", (2837, 2878), True, 'import matplotlib.pyplot as plt\n'), ((2885, 2943), 'matplotlib.pyplot.loglog', 'plt.loglog', (['rho_list', '(rho_list ** alpha)', '"""--"""'], {'label': 'label'}), "(rho_list, rho_list ** alpha, '--', label=label)\n", (2895, 2943), True, 'import matplotlib.pyplot as plt\n'), ((1003, 1033), 'numpy.exp', 'np.exp', (['(-(K / scale) ** -shape)'], {}), '(-(K / scale) ** -shape)\n', (1009, 1033), True, 'import numpy as np\n'), ((1423, 1465), 'scipy.special.gammainc', 'gammainc', (['(param ** z)', '(K / param ** (1 - z))'], {}), '(param ** z, K / param ** (1 - z))\n', (1431, 1465), False, 'from scipy.special import gammainc\n'), ((1700, 1736), 'scipy.integrate.quad', 'quad', (['integrand', 'tmin', 'T'], {'args': '_args'}), '(integrand, tmin, T, args=_args)\n', (1704, 1736), False, 'from scipy.integrate import quad, dblquad\n'), ((2010, 2046), 'scipy.integrate.quad', 'quad', (['integrand', 'tmin', 'T'], {'args': '_args'}), '(integrand, tmin, T, args=_args)\n', (2014, 2046), False, 'from scipy.integrate import quad, dblquad\n'), ((2348, 2397), 'scipy.integrate.quad', 'quad', (['kernel2', 'betalim[0]', 'betalim[1]'], {'args': '_args'}), '(kernel2, betalim[0], betalim[1], args=_args)\n', (2352, 2397), False, 'from scipy.integrate import quad, dblquad\n'), ((258, 270), 'scipy.special.gamma', 'gamma', (['(k + 1)'], {}), '(k + 1)\n', (263, 270), False, 'from scipy.special import gamma\n'), ((303, 333), 'numpy.exp', 'np.exp', (['(-tau * beta * rho * fm)'], {}), '(-tau * beta * rho * fm)\n', (309, 333), True, 'import numpy as np\n'), ((328, 337), 'numpy.sum', 'np.sum', (['L'], {}), '(L)\n', (334, 337), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import random
import argparse
import logging
import glog as log
import os
import sys
from stcgan.shadow6.nets import *
import stcgan.shadow6.module as module
import glob
import mxnet as mx
# import pydevd
# pydevd.settrace('172.17.122.65', port=10203, stdoutToServer=True, stderrToServer=True)
def get_args(arglist=None):
parser = argparse.ArgumentParser(
description='Shadow Removel Params')
parser.add_argument('-dbprefix', type=str, default='./ISTD_Dataset/train',
help='path of generated dataset prefix')
parser.add_argument('-valprefix', type=str, default='./',
help='path of generated dataset prefix')
parser.add_argument('-logfn', type=str,
default='deshadow_train', help='path to save log file')
parser.add_argument('-gpuid', type=int, default=0,
help='gpu id, -1 for cpu')
parser.add_argument('-lr', type=float, default=2e-3, help="learning rate")
return parser.parse_args() if arglist is None else parser.parse_args(arglist)
def ferr(label, pred):
pred = pred.ravel()
label = label.ravel()
return np.abs(label - (pred > 0.5)).sum() / label.shape[0]
if __name__ == '__main__':
args = get_args()
# environment setting
log_file_name = args.logfn + '.log'
log_file = open(log_file_name, 'w')
log_file.close()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(log_file_name)
logger.addHandler(fh)
if args.gpuid >= 0:
context = mx.gpu(args.gpuid)
else:
context = mx.cpu()
if not os.path.exists(args.dbprefix):
logging.info(
"training data not exist, pls check if the file path is correct.")
sys.exit(0)
if not os.path.exists("./result"):
os.mkdir("./result")
if not os.path.exists("./val_result"):
os.mkdir("./val_result")
if not os.path.exists("./trained_params"):
os.mkdir("./trained_params")
mstr = 'train'
train_s_dir = os.path.join(args.dbprefix, '%s_A' % mstr) # with shadow
train_m_dir = os.path.join(args.dbprefix, '%s_B' % mstr) # shadow mask
train_g_dir = os.path.join(args.dbprefix, '%s_C' % mstr) # gt
val_s_dir = os.path.join(args.valprefix, 'test')
# val_m_dir = os.path.join(args.valprefix, 'test_B')
# val_g_dir = os.path.join(args.valprefix, 'test_C')
assert os.path.exists(train_s_dir), '%s_A not exist!' % mstr
assert os.path.exists(train_m_dir), '%s_B not exist!' % mstr
assert os.path.exists(train_g_dir), '%s_C not exist!' % mstr
filenms = os.listdir(train_s_dir)
filenms_test = os.listdir(val_s_dir)
# use rec file to load image.
index = list(range(len(filenms)))
index2 = list(range(len(filenms_test)))
lr = args.lr
beta1 = 0.5
batch_size = 16
# rand_shape = (batch_size, 100)
num_epoch = 1000
width = 256
height = 256
data_g1_shape = (batch_size, 3, width, height)
data_g2_shape = (batch_size, 4, width, height)
data_d1_shape = (batch_size, 4, width, height)
data_d2_shape = (batch_size, 7, width, height)
# initialize net
gmod = module.GANModule(
shadow_det_net_G1_v2(),
shadow_removal_net_G2_v2(),
shadow_det_net_D_v2(),
bce_loss_v2(),
l1_loss_v2(),
context=context,
data_g1_shape=data_g1_shape,
data_g2_shape=data_g2_shape,
data_d1_shape=data_d1_shape,
data_d2_shape=data_d2_shape,
hw=int(width / 32)
)
gmod.init_params(mx.init.Uniform(0.2))
gmod.init_optimizer(lr)
metric_acc1 = mx.metric.CustomMetric(ferr)
metric_acc2 = mx.metric.CustomMetric(ferr)
# load data
for epoch in range(num_epoch):
metric_acc1.reset()
metric_acc2.reset()
random.shuffle(index)
random.shuffle(index2)
data_s = np.zeros((batch_size, 3, width, height))
data_m = np.zeros((batch_size, 1, width, height))
data_g = np.zeros((batch_size, 3, width, height))
for i in range(len(index) // batch_size):
for j in range(batch_size):
data_s_tmp = cv2.resize(cv2.imread(os.path.join(
train_s_dir, filenms[index[i * batch_size + j]])) / 255.0, (width, height))
data_m_tmp = cv2.resize(cv2.imread(os.path.join(
train_m_dir, filenms[index[i * batch_size + j]]), cv2.IMREAD_GRAYSCALE) / 255.0, (width, height))
data_m_tmp[data_m_tmp > 0.5] = 1.0
data_m_tmp[data_m_tmp <= 0.5] = 0.0
data_g_tmp = cv2.resize(cv2.imread(os.path.join(
train_g_dir, filenms[index[i * batch_size + j]])) / 255, (width, height))
# random crop
random_x = random.randint(0, data_s_tmp.shape[1] - height)
random_y = random.randint(0, data_s_tmp.shape[0] - width)
data_s[j, :, :, :] = np.transpose(
data_s_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
data_m[j, 0, :, :] = data_m_tmp[random_y: random_y +
width, random_x: random_x + height]
data_g[j, :, :, :] = np.transpose(
data_g_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
gmod.update(mx.nd.array(data_s, ctx=context), mx.nd.array(
data_m, ctx=context), mx.nd.array(data_g, ctx=context))
gmod.temp_label[:] = 0.0
metric_acc1.update([gmod.temp_label], gmod.outputs_fake1)
metric_acc2.update([gmod.temp_label], gmod.outputs_fake2)
gmod.temp_label[:] = 1.0
metric_acc1.update([gmod.temp_label], gmod.outputs_real1)
metric_acc2.update([gmod.temp_label], gmod.outputs_real2)
# training results
log.info('epoch: %d, bce_loss is %.5f, adver_d1_loss is %.5f, l1_loss is %.5f, adver_d2_loss is %.5f'%(
epoch,gmod.loss[0, 0], gmod.loss[0, 1], gmod.loss[0, 2], gmod.loss[0, 3]))
if epoch % 500 == 0 or epoch == num_epoch - 1:
gmod.modG1.save_params('G1_epoch_{}.params'.format(epoch))
gmod.modG2.save_params('G2_epoch_{}.params'.format(epoch))
gmod.modD1.save_params('D1_epoch_{}.params'.format(epoch))
gmod.modD2.save_params('D2_epoch_{}.params'.format(epoch))
img_dir = glob.glob("test/*")
img_name = []
for i in img_dir:
value = i[i.find("test/") + 5:]
# print(value)
# img_name.append(value)
# dir_length = len(img_dir)
# for i in range(dir_length):
# img=cv2.imread(os.path.join(val_s_dir, i[i.find("test/") + 5:]))
img_gt = cv2.imread(os.path.join(
val_s_dir, i[i.find("test/") + 5:]))
# w = cv2.imread(os.path.join(val_s_dir, i[i.find("test/") + 5:]))[1]
# h = cv2.imread(os.path.join(val_s_dir, i[i.find("test/") + 5:]))[0]
data_s_tmp = cv2.resize(cv2.imread(os.path.join(
val_s_dir, i[i.find("test/") + 5:])) / 255.0, (width, height))
# data_m_tmp = cv2.resize(cv2.imread(os.path.join(val_m_dir, filenms_test[index2[i]]),
# cv2.IMREAD_GRAYSCALE), (width, height))
# data_g_tmp = cv2.resize(cv2.imread(os.path.join(
# val_g_dir, filenms_test[index2[i]])), (width, height))
# random crop
random_x = random.randint(0, data_s_tmp.shape[1] - height)
random_y = random.randint(0, data_s_tmp.shape[0] - width)
data_s[0, :, :, :] = np.transpose(
data_s_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
# data_m[0, 0, :, :] = data_m_tmp[random_y: random_y +
# width, random_x: random_x + height]
# data_g[0, :, :, :] = np.transpose(
# data_g_tmp[random_y: random_y + width, random_x: random_x + height, :], (2, 0, 1))
gmod.forward(mx.nd.array(data_s, ctx=context))
# cv2.imwrite('./val_result/sin_{}_{}.jpg'.format(epoch, i),
# np.round((np.transpose(data_s[0, :, :, :], (1, 2, 0))) * 255))
# cv2.imwrite('./val_result/min_{}_{}.jpg'.format(epoch, i),
# data_m_tmp)
# cv2.imwrite('./val_result/gin_{}_{}.jpg'.format(epoch, i),
# data_g_tmp)
# cv2.imwrite('./SBU/shadow_free/'+img_name[i],#shadow free
# np.clip(np.round((np.transpose(gmod.temp_outG2.asnumpy()[0, :, :, :], (1, 2, 0)) + 1) / 2 * 255), 0, 255).astype(np.uint8))
# cv2.imwrite('./SBU/shadow_mask/'+img_name[i],
# np.round((np.transpose(gmod.temp_outG1.asnumpy()[0, :, :, :], (1, 2, 0)) + 1) / 2 * 255))
# shadow_remove
# shadow_mask
img = np.clip(np.round(np.transpose(gmod.temp_outG2.asnumpy()[
0, :, :, :], (1, 2, 0)) * 255), 0, 255).astype(np.uint8)
img = cv2.resize(img, (img_gt.shape[1], img_gt.shape[0]))
img2 = np.round((np.transpose(gmod.temp_outG1.asnumpy()[0, :, :, :], (1, 2, 0)) * 255).astype(np.uint8))
img2 = cv2.resize(img2, (img_gt.shape[1], img_gt.shape[0]))
cv2.imwrite('result/shadow_remove/' + value,
img)
cv2.imwrite('result/shadow_mask/' + value,
img2)
|
[
"os.mkdir",
"numpy.abs",
"argparse.ArgumentParser",
"random.shuffle",
"glob.glob",
"mxnet.metric.CustomMetric",
"os.path.join",
"logging.FileHandler",
"random.randint",
"glog.info",
"cv2.imwrite",
"os.path.exists",
"numpy.transpose",
"mxnet.gpu",
"cv2.resize",
"mxnet.cpu",
"mxnet.nd.array",
"os.listdir",
"sys.exit",
"mxnet.init.Uniform",
"numpy.zeros",
"logging.info",
"logging.getLogger"
] |
[((369, 429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Shadow Removel Params"""'}), "(description='Shadow Removel Params')\n", (392, 429), False, 'import argparse\n'), ((1430, 1449), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1447, 1449), False, 'import logging\n'), ((1493, 1527), 'logging.FileHandler', 'logging.FileHandler', (['log_file_name'], {}), '(log_file_name)\n', (1512, 1527), False, 'import logging\n'), ((2082, 2124), 'os.path.join', 'os.path.join', (['args.dbprefix', "('%s_A' % mstr)"], {}), "(args.dbprefix, '%s_A' % mstr)\n", (2094, 2124), False, 'import os\n'), ((2158, 2200), 'os.path.join', 'os.path.join', (['args.dbprefix', "('%s_B' % mstr)"], {}), "(args.dbprefix, '%s_B' % mstr)\n", (2170, 2200), False, 'import os\n'), ((2234, 2276), 'os.path.join', 'os.path.join', (['args.dbprefix', "('%s_C' % mstr)"], {}), "(args.dbprefix, '%s_C' % mstr)\n", (2246, 2276), False, 'import os\n'), ((2299, 2335), 'os.path.join', 'os.path.join', (['args.valprefix', '"""test"""'], {}), "(args.valprefix, 'test')\n", (2311, 2335), False, 'import os\n'), ((2461, 2488), 'os.path.exists', 'os.path.exists', (['train_s_dir'], {}), '(train_s_dir)\n', (2475, 2488), False, 'import os\n'), ((2526, 2553), 'os.path.exists', 'os.path.exists', (['train_m_dir'], {}), '(train_m_dir)\n', (2540, 2553), False, 'import os\n'), ((2591, 2618), 'os.path.exists', 'os.path.exists', (['train_g_dir'], {}), '(train_g_dir)\n', (2605, 2618), False, 'import os\n'), ((2659, 2682), 'os.listdir', 'os.listdir', (['train_s_dir'], {}), '(train_s_dir)\n', (2669, 2682), False, 'import os\n'), ((2702, 2723), 'os.listdir', 'os.listdir', (['val_s_dir'], {}), '(val_s_dir)\n', (2712, 2723), False, 'import os\n'), ((3682, 3710), 'mxnet.metric.CustomMetric', 'mx.metric.CustomMetric', (['ferr'], {}), '(ferr)\n', (3704, 3710), True, 'import mxnet as mx\n'), ((3729, 3757), 'mxnet.metric.CustomMetric', 'mx.metric.CustomMetric', (['ferr'], {}), '(ferr)\n', (3751, 3757), True, 'import mxnet as mx\n'), ((1597, 1615), 'mxnet.gpu', 'mx.gpu', (['args.gpuid'], {}), '(args.gpuid)\n', (1603, 1615), True, 'import mxnet as mx\n'), ((1644, 1652), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (1650, 1652), True, 'import mxnet as mx\n'), ((1665, 1694), 'os.path.exists', 'os.path.exists', (['args.dbprefix'], {}), '(args.dbprefix)\n', (1679, 1694), False, 'import os\n'), ((1704, 1783), 'logging.info', 'logging.info', (['"""training data not exist, pls check if the file path is correct."""'], {}), "('training data not exist, pls check if the file path is correct.')\n", (1716, 1783), False, 'import logging\n'), ((1805, 1816), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1813, 1816), False, 'import sys\n'), ((1828, 1854), 'os.path.exists', 'os.path.exists', (['"""./result"""'], {}), "('./result')\n", (1842, 1854), False, 'import os\n'), ((1864, 1884), 'os.mkdir', 'os.mkdir', (['"""./result"""'], {}), "('./result')\n", (1872, 1884), False, 'import os\n'), ((1896, 1926), 'os.path.exists', 'os.path.exists', (['"""./val_result"""'], {}), "('./val_result')\n", (1910, 1926), False, 'import os\n'), ((1936, 1960), 'os.mkdir', 'os.mkdir', (['"""./val_result"""'], {}), "('./val_result')\n", (1944, 1960), False, 'import os\n'), ((1972, 2006), 'os.path.exists', 'os.path.exists', (['"""./trained_params"""'], {}), "('./trained_params')\n", (1986, 2006), False, 'import os\n'), ((2016, 2044), 'os.mkdir', 'os.mkdir', (['"""./trained_params"""'], {}), "('./trained_params')\n", (2024, 2044), False, 'import os\n'), ((3612, 3632), 'mxnet.init.Uniform', 'mx.init.Uniform', (['(0.2)'], {}), '(0.2)\n', (3627, 3632), True, 'import mxnet as mx\n'), ((3873, 3894), 'random.shuffle', 'random.shuffle', (['index'], {}), '(index)\n', (3887, 3894), False, 'import random\n'), ((3903, 3925), 'random.shuffle', 'random.shuffle', (['index2'], {}), '(index2)\n', (3917, 3925), False, 'import random\n'), ((3943, 3983), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, width, height)'], {}), '((batch_size, 3, width, height))\n', (3951, 3983), True, 'import numpy as np\n'), ((4001, 4041), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, width, height)'], {}), '((batch_size, 1, width, height))\n', (4009, 4041), True, 'import numpy as np\n'), ((4059, 4099), 'numpy.zeros', 'np.zeros', (['(batch_size, 3, width, height)'], {}), '((batch_size, 3, width, height))\n', (4067, 4099), True, 'import numpy as np\n'), ((6537, 6556), 'glob.glob', 'glob.glob', (['"""test/*"""'], {}), "('test/*')\n", (6546, 6556), False, 'import glob\n'), ((5978, 6173), 'glog.info', 'log.info', (["('epoch: %d, bce_loss is %.5f, adver_d1_loss is %.5f, l1_loss is %.5f, adver_d2_loss is %.5f'\n % (epoch, gmod.loss[0, 0], gmod.loss[0, 1], gmod.loss[0, 2], gmod.loss\n [0, 3]))"], {}), "(\n 'epoch: %d, bce_loss is %.5f, adver_d1_loss is %.5f, l1_loss is %.5f, adver_d2_loss is %.5f'\n % (epoch, gmod.loss[0, 0], gmod.loss[0, 1], gmod.loss[0, 2], gmod.loss\n [0, 3]))\n", (5986, 6173), True, 'import glog as log\n'), ((7683, 7730), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[1] - height)'], {}), '(0, data_s_tmp.shape[1] - height)\n', (7697, 7730), False, 'import random\n'), ((7754, 7800), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[0] - width)'], {}), '(0, data_s_tmp.shape[0] - width)\n', (7768, 7800), False, 'import random\n'), ((7834, 7931), 'numpy.transpose', 'np.transpose', (['data_s_tmp[random_y:random_y + width, random_x:random_x + height, :]', '(2, 0, 1)'], {}), '(data_s_tmp[random_y:random_y + width, random_x:random_x +\n height, :], (2, 0, 1))\n', (7846, 7931), True, 'import numpy as np\n'), ((9382, 9433), 'cv2.resize', 'cv2.resize', (['img', '(img_gt.shape[1], img_gt.shape[0])'], {}), '(img, (img_gt.shape[1], img_gt.shape[0]))\n', (9392, 9433), False, 'import cv2\n'), ((9572, 9624), 'cv2.resize', 'cv2.resize', (['img2', '(img_gt.shape[1], img_gt.shape[0])'], {}), '(img2, (img_gt.shape[1], img_gt.shape[0]))\n', (9582, 9624), False, 'import cv2\n'), ((9638, 9687), 'cv2.imwrite', 'cv2.imwrite', (["('result/shadow_remove/' + value)", 'img'], {}), "('result/shadow_remove/' + value, img)\n", (9649, 9687), False, 'import cv2\n'), ((9725, 9773), 'cv2.imwrite', 'cv2.imwrite', (["('result/shadow_mask/' + value)", 'img2'], {}), "('result/shadow_mask/' + value, img2)\n", (9736, 9773), False, 'import cv2\n'), ((1187, 1215), 'numpy.abs', 'np.abs', (['(label - (pred > 0.5))'], {}), '(label - (pred > 0.5))\n', (1193, 1215), True, 'import numpy as np\n'), ((4854, 4901), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[1] - height)'], {}), '(0, data_s_tmp.shape[1] - height)\n', (4868, 4901), False, 'import random\n'), ((4929, 4975), 'random.randint', 'random.randint', (['(0)', '(data_s_tmp.shape[0] - width)'], {}), '(0, data_s_tmp.shape[0] - width)\n', (4943, 4975), False, 'import random\n'), ((5013, 5110), 'numpy.transpose', 'np.transpose', (['data_s_tmp[random_y:random_y + width, random_x:random_x + height, :]', '(2, 0, 1)'], {}), '(data_s_tmp[random_y:random_y + width, random_x:random_x +\n height, :], (2, 0, 1))\n', (5025, 5110), True, 'import numpy as np\n'), ((5320, 5417), 'numpy.transpose', 'np.transpose', (['data_g_tmp[random_y:random_y + width, random_x:random_x + height, :]', '(2, 0, 1)'], {}), '(data_g_tmp[random_y:random_y + width, random_x:random_x +\n height, :], (2, 0, 1))\n', (5332, 5417), True, 'import numpy as np\n'), ((5461, 5493), 'mxnet.nd.array', 'mx.nd.array', (['data_s'], {'ctx': 'context'}), '(data_s, ctx=context)\n', (5472, 5493), True, 'import mxnet as mx\n'), ((5495, 5527), 'mxnet.nd.array', 'mx.nd.array', (['data_m'], {'ctx': 'context'}), '(data_m, ctx=context)\n', (5506, 5527), True, 'import mxnet as mx\n'), ((5546, 5578), 'mxnet.nd.array', 'mx.nd.array', (['data_g'], {'ctx': 'context'}), '(data_g, ctx=context)\n', (5557, 5578), True, 'import mxnet as mx\n'), ((8287, 8319), 'mxnet.nd.array', 'mx.nd.array', (['data_s'], {'ctx': 'context'}), '(data_s, ctx=context)\n', (8298, 8319), True, 'import mxnet as mx\n'), ((4242, 4303), 'os.path.join', 'os.path.join', (['train_s_dir', 'filenms[index[i * batch_size + j]]'], {}), '(train_s_dir, filenms[index[i * batch_size + j]])\n', (4254, 4303), False, 'import os\n'), ((4403, 4464), 'os.path.join', 'os.path.join', (['train_m_dir', 'filenms[index[i * batch_size + j]]'], {}), '(train_m_dir, filenms[index[i * batch_size + j]])\n', (4415, 4464), False, 'import os\n'), ((4689, 4750), 'os.path.join', 'os.path.join', (['train_g_dir', 'filenms[index[i * batch_size + j]]'], {}), '(train_g_dir, filenms[index[i * batch_size + j]])\n', (4701, 4750), False, 'import os\n')]
|
import sys,os,time,csv,getopt,cv2,argparse
import numpy, ctypes, array
import numpy as np
#import matplotlib as plt
from datetime import datetime
from ctypes import cdll, c_char_p
from skimage.transform import resize
from numpy.ctypeslib import ndpointer
from lime import lime_image
from skimage.segmentation import mark_boundaries
import ntpath
import scipy.misc
from PIL import Image
AnnInferenceLib = ctypes.cdll.LoadLibrary('/home/rajy/work/inceptionv4/build/libannmodule.so')
inf_fun = AnnInferenceLib.annRunInference
inf_fun.restype = ctypes.c_int
inf_fun.argtypes = [ctypes.c_void_p,
ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"),
ctypes.c_size_t,
ndpointer(ctypes.c_float, flags="C_CONTIGUOUS"),
ctypes.c_size_t]
hdl = 0
def PreprocessImage(img, dim):
imgw = img.shape[1]
imgh = img.shape[0]
imgb = np.empty((dim[0], dim[1], 3)) #for inception v4
imgb.fill(1.0)
if imgh/imgw > dim[1]/dim[0]:
neww = int(imgw * dim[1] / imgh)
newh = dim[1]
else:
newh = int(imgh * dim[0] / imgw)
neww = dim[0]
offx = int((dim[0] - neww)/2)
offy = int((dim[1] - newh)/2)
imgc = img.copy()*(2.0/255.0) - 1.0
#print('INFO:: newW:%d newH:%d offx:%d offy: %d' % (neww, newh, offx, offy))
imgb[offy:offy+newh,offx:offx+neww,:] = resize(imgc,(newh,neww),1.0)
#im = imgb[:,:,(2,1,0)]
return imgb
def runInference(img):
global hdl
imgw = img.shape[1]
imgh = img.shape[0]
#proc_images.append(im)
out_buf = bytearray(1000*4)
#out_buf = memoryview(out_buf)
out = np.frombuffer(out_buf, dtype=numpy.float32)
#im = im.astype(np.float32)
inf_fun(hdl, np.ascontiguousarray(img, dtype=np.float32), (img.shape[0]*img.shape[1]*3*4), np.ascontiguousarray(out, dtype=np.float32), len(out_buf))
return out
def predict_fn(images):
results = np.zeros(shape=(len(images), 1000))
for i in range(len(images)):
results[i] = runInference(images[i])
return results
def lime_explainer(image, preds):
for x in preds.argsort()[0][-5:]:
print (x, names[x], preds[0,x])
top_indeces.append(x)
tmp = datetime.now()
explainer = lime_image.LimeImageExplainer()
# Hide color is the color for a superpixel turned OFF. Alternatively, if it is NONE, the superpixel will be replaced by the average of its pixels
explanation = explainer.explain_instance(image, predict_fn, top_labels=5, hide_color=0, num_samples=1000)
#to see the explanation for the top class
temp, mask = explanation.get_image_and_mask(top_indeces[4], positive_only=True, num_features=5, hide_rest=True)
im_top1 = mark_boundaries(temp / 2 + 0.5, mask)
#print "iminfo",im_top1.shape, im_top1.dtype
im_top1 = im_top1[:,:,(2,1,0)] #BGR to RGB
temp1, mask1 = explanation.get_image_and_mask(top_indeces[3], positive_only=True, num_features=100, hide_rest=True)
im_top2 = mark_boundaries(temp1 / 2 + 0.5, mask1)
im_top2 = im_top2[:,:,(2,1,0)] #BGR to RGB
del top_indeces[:]
return im_top1, im_top2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image', dest='image', type=str,
default='./images/dog.jpg', help='An image path.')
parser.add_argument('--video', dest='video', type=str,
default='./videos/car.avi', help='A video path.')
parser.add_argument('--imagefolder', dest='imagefolder', type=str,
default='./', help='A directory with images.')
parser.add_argument('--resultsfolder', dest='resultfolder', type=str,
default='./', help='A directory with images.')
parser.add_argument('--labels', dest='labelfile', type=str,
default='./labels.txt', help='file with labels')
args = parser.parse_args()
imagefile = args.image
videofile = args.video
imagedir = args.imagefolder
outputdir = args.resultfolder
synsetfile = args.labelfile
images = []
proc_images = []
AnnInferenceLib.annCreateContext.argtype = [ctypes.c_char_p]
data_folder = "/home/rajy/work/inceptionv4"
b_data_folder = data_folder.encode('utf-8')
global hdl
hdl = AnnInferenceLib.annCreateContext(b_data_folder)
top_indeces = []
#read synset names
if synsetfile:
fp = open(synsetfile, 'r')
names = fp.readlines()
names = [x.strip() for x in names]
fp.close()
if sys.argv[1] == '--image':
# image preprocess
img = cv2.imread(imagefile)
dim = (299,299)
imgb = PreprocessImage(img, dim)
images.append(imgb)
#proc_images.append(imgb)
start = datetime.now()
preds = predict_fn(images)
end = datetime.now()
elapsedTime = end-start
print ('total time for inference in milliseconds', elapsedTime.total_seconds()*1000)
if False:
for x in preds.argsort()[0][-5:]:
print (x, names[x], preds[0,x])
top_indeces.append(x)
image0 = images[0]
tmp = datetime.now()
explainer = lime_image.LimeImageExplainer()
# Hide color is the color for a superpixel turned OFF. Alternatively, if it is NONE, the superpixel will be replaced by the average of its pixels
explanation = explainer.explain_instance(image0, predict_fn, top_labels=5, hide_color=0, num_samples=1000)
elapsedTime = datetime.now()-tmp
print ('total time for lime is " milliseconds', elapsedTime.total_seconds()*1000)
#to see the explanation for the top class
temp, mask = explanation.get_image_and_mask(top_indeces[4], positive_only=True, num_features=5, hide_rest=True)
im_top1 = mark_boundaries(temp / 2 + 0.5, mask)
#print "iminfo",im_top1.shape, im_top1.dtype
im_top1_save = im_top1[:,:,(2,1,0)] #BGR to RGB
infile = ntpath.basename(imagefile)
inname,ext = infile.split('.')
cv2.imshow('top1', im_top1)
scipy.misc.imsave(outputdir + inname + '_top1.jpg', im_top1_save)
#scipy.imsave(outputdir + inname + '_1.jpg', im_top1)
#im_top1_norm.save(outputdir + inname + '_1.jpg')
temp1, mask1 = explanation.get_image_and_mask(top_indeces[3], positive_only=True, num_features=100, hide_rest=True)
#temp, mask = explanation.get_image_and_mask(top_indeces[3], positive_only=True, num_features=1000, hide_rest=False, min_weight=0.05)
#cv2.imshow('top2', mark_boundaries(temp1 / 2 + 0.5, mask1))
im_top2 = mark_boundaries(temp1 / 2 + 0.5, mask1)
im_top2 = im_top2[:,:,(2,1,0)] #BGR to RGB
scipy.misc.imsave(outputdir + inname + '_top2.jpg', im_top2)
else:
im_top1, im_top2 = lime_explainer(images[0], preds)
infile = ntpath.basename(imagefile)
inname,ext = infile.split('.')
#cv2.imshow('top1', im_top1)
scipy.misc.imsave(outputdir + inname + '_top1.jpg', im_top1)
scipy.misc.imsave(outputdir + inname + '_top2.jpg', im_top2)
#cv2.destroyAllWindows()
AnnInferenceLib.annReleaseContext(ctypes.c_void_p(hdl))
exit()
elif sys.argv[1] == '--imagefolder':
count = 0
start = datetime.now()
for image in sorted(os.listdir(imagedir)):
print('Processing Image ' + image)
img = cv2.imread(imagedir + image)
dim = (299,299)
imgb = PreprocessImage(img, dim)
images.append(imgb)
#proc_images.append(imgb)
preds = predict_fn(images)
im_top1, im_top2 = lime_explainer(images[0], preds)
inname,ext = image.split('.')
#cv2.imshow('top1', im_top1)
scipy.misc.imsave(outputdir + inname + '_top1.jpg', im_top1)
scipy.misc.imsave(outputdir + inname + '_top2.jpg', im_top2)
images.remove(imgb)
count += 1
end = datetime.now()
elapsedTime = end-start
print ('total time is " milliseconds', elapsedTime.total_seconds()*1000)
AnnInferenceLib.annReleaseContext(ctypes.c_void_p(hdl))
exit()
|
[
"skimage.segmentation.mark_boundaries",
"numpy.ctypeslib.ndpointer",
"argparse.ArgumentParser",
"ntpath.basename",
"numpy.frombuffer",
"numpy.empty",
"numpy.ascontiguousarray",
"ctypes.cdll.LoadLibrary",
"lime.lime_image.LimeImageExplainer",
"cv2.imread",
"skimage.transform.resize",
"ctypes.c_void_p",
"cv2.imshow",
"datetime.datetime.now",
"os.listdir"
] |
[((406, 482), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['"""/home/rajy/work/inceptionv4/build/libannmodule.so"""'], {}), "('/home/rajy/work/inceptionv4/build/libannmodule.so')\n", (429, 482), False, 'import numpy, ctypes, array\n'), ((609, 656), 'numpy.ctypeslib.ndpointer', 'ndpointer', (['ctypes.c_float'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_float, flags='C_CONTIGUOUS')\n", (618, 656), False, 'from numpy.ctypeslib import ndpointer\n'), ((707, 754), 'numpy.ctypeslib.ndpointer', 'ndpointer', (['ctypes.c_float'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_float, flags='C_CONTIGUOUS')\n", (716, 754), False, 'from numpy.ctypeslib import ndpointer\n'), ((888, 917), 'numpy.empty', 'np.empty', (['(dim[0], dim[1], 3)'], {}), '((dim[0], dim[1], 3))\n', (896, 917), True, 'import numpy as np\n'), ((1362, 1393), 'skimage.transform.resize', 'resize', (['imgc', '(newh, neww)', '(1.0)'], {}), '(imgc, (newh, neww), 1.0)\n', (1368, 1393), False, 'from skimage.transform import resize\n'), ((1627, 1670), 'numpy.frombuffer', 'np.frombuffer', (['out_buf'], {'dtype': 'numpy.float32'}), '(out_buf, dtype=numpy.float32)\n', (1640, 1670), True, 'import numpy as np\n'), ((2201, 2215), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2213, 2215), False, 'from datetime import datetime\n'), ((2232, 2263), 'lime.lime_image.LimeImageExplainer', 'lime_image.LimeImageExplainer', ([], {}), '()\n', (2261, 2263), False, 'from lime import lime_image\n'), ((2700, 2737), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp / 2 + 0.5)', 'mask'], {}), '(temp / 2 + 0.5, mask)\n', (2715, 2737), False, 'from skimage.segmentation import mark_boundaries\n'), ((2968, 3007), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp1 / 2 + 0.5)', 'mask1'], {}), '(temp1 / 2 + 0.5, mask1)\n', (2983, 3007), False, 'from skimage.segmentation import mark_boundaries\n'), ((3147, 3172), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3170, 3172), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((1720, 1763), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (1740, 1763), True, 'import numpy as np\n'), ((1798, 1841), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['out'], {'dtype': 'np.float32'}), '(out, dtype=np.float32)\n', (1818, 1841), True, 'import numpy as np\n'), ((4586, 4607), 'cv2.imread', 'cv2.imread', (['imagefile'], {}), '(imagefile)\n', (4596, 4607), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((4751, 4765), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4763, 4765), False, 'from datetime import datetime\n'), ((4815, 4829), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4827, 4829), False, 'from datetime import datetime\n'), ((5154, 5168), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5166, 5168), False, 'from datetime import datetime\n'), ((5193, 5224), 'lime.lime_image.LimeImageExplainer', 'lime_image.LimeImageExplainer', ([], {}), '()\n', (5222, 5224), False, 'from lime import lime_image\n'), ((5842, 5879), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp / 2 + 0.5)', 'mask'], {}), '(temp / 2 + 0.5, mask)\n', (5857, 5879), False, 'from skimage.segmentation import mark_boundaries\n'), ((6019, 6045), 'ntpath.basename', 'ntpath.basename', (['imagefile'], {}), '(imagefile)\n', (6034, 6045), False, 'import ntpath\n'), ((6102, 6129), 'cv2.imshow', 'cv2.imshow', (['"""top1"""', 'im_top1'], {}), "('top1', im_top1)\n", (6112, 6129), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((6713, 6752), 'skimage.segmentation.mark_boundaries', 'mark_boundaries', (['(temp1 / 2 + 0.5)', 'mask1'], {}), '(temp1 / 2 + 0.5, mask1)\n', (6728, 6752), False, 'from skimage.segmentation import mark_boundaries\n'), ((6984, 7010), 'ntpath.basename', 'ntpath.basename', (['imagefile'], {}), '(imagefile)\n', (6999, 7010), False, 'import ntpath\n'), ((7324, 7344), 'ctypes.c_void_p', 'ctypes.c_void_p', (['hdl'], {}), '(hdl)\n', (7339, 7344), False, 'import numpy, ctypes, array\n'), ((7436, 7450), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7448, 7450), False, 'from datetime import datetime\n'), ((8147, 8161), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8159, 8161), False, 'from datetime import datetime\n'), ((5528, 5542), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5540, 5542), False, 'from datetime import datetime\n'), ((7479, 7499), 'os.listdir', 'os.listdir', (['imagedir'], {}), '(imagedir)\n', (7489, 7499), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((7567, 7595), 'cv2.imread', 'cv2.imread', (['(imagedir + image)'], {}), '(imagedir + image)\n', (7577, 7595), False, 'import sys, os, time, csv, getopt, cv2, argparse\n'), ((8317, 8337), 'ctypes.c_void_p', 'ctypes.c_void_p', (['hdl'], {}), '(hdl)\n', (8332, 8337), False, 'import numpy, ctypes, array\n')]
|
# -*- coding: utf-8 -*-
### ATENÇÃO ###
# Antes de executar instale o pyfirmata:
# pip install pyfirmata --user
# E compile no arduino o código do ArduinoIDE encontrado em:
# Arquivo -> Exemplos -> Firmata -> StandardFirmata
### IMPORTANTE ###
# O valor da frequencia fica aproximado
# imports
import pyfirmata
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from loguru import logger
import pandas as pd
#-------------------------------#-------------------------------#-------------------------------#-------------------------------
### INICIO MUDANÇAS PERMITIDAS ###
#-------------------------------
# Controlador desejado
#controlUse = "sc" # Sem controlador
controlUse = "cavlr1" #Cavlr 1ª ord ********** Controlador em avanço por lugar das raizes para modelo de primeira ordem
#controlUse = "catlr1" #Catlr 1ª ord ********** Controlador em atraso por lugar das raizes para modelo de primeira ordem
#controlUse = "cavatlr1" #Cavatlr 1ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de primeira ordem
#controlUse = "cavrf1" #Cavrf 1ª ord ********** Controlador em avanço por resposta em frequencia para modelo de primeira ordem
#controlUse = "catrf1" #Catrf 1ª ord ********** Controlador em atraso por resposta em frequencia para modelo de primeira ordem
#controlUse = "cavlr2" #Cavlr 2ª ord ********** Controlador em avanço por lugar das raizes para modelo de segunda ordem
#controlUse = "catlr2" #Catlr 2ª ord ********** Controlador em atraso por lugar das raizes para modelo de segunda ordem
#controlUse = "cavatlr2" #Cavatlr 2ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de segunda ordem
#controlUse = "cavrf2" #Cavrf 2ª ord ********** Controlador em avanço por resposta em frequencia para modelo de segunda ordem
#controlUse = "catrf2" #Catrf 2ª ord ********** Controlador em atraso por resposta em frequencia para modelo de segunda ordem
#-------------------------------
# Configuração do arduino
"""
x:n:t -> ordem de configuração dos pinos sendo:
x - a letra referente ao pino
n - numero do pino
t - tipo que sera utilizado o pino
p - PWM
i - input
o - output
"""
serialPort = '/dev/ttyACM0' # Porta que o arduino esta conectada
outPin = 'd:9:p' # Pino de escrita PWM
inPin = 'a:0:i' # Pino utilizado para ler
#-------------------------------
# dados para salvar imagem
dpiImage = 100 # Dpi da imagem
srcImage = './../../Controles/PRBS-FS10/ord1/real/graph-'+controlUse+'-5Xkc-zero 2Xsigma-esp 0.1.svg' # Endereço e nome da imagem a ser salva, se setar como None não salva
#srcImage = None
formatImage = "svg" # Tipo de imagem a ser salva
width = 1920 # Largura em px (pixels) da imagem salva
height = 1080 # Altura em px (pixels) da imagem salva
#-------------------------------
# dados para salvar csv dos dados
srcFile = './../../Controles/PRBS-FS10/ord1/real/data-'+controlUse+'-5Xkc-zero 2Xsigma-esp 0.1.csv'# Endereço e nome do csv a ser salva, se setar como None não salva
#srcFile # None
#-------------------------------
# frequência de amostragem
freq = 10 # Em amostras por seg (Hz)
#-------------------------------
# Numero total de amostras
N = 400 # Total de amostras
#-------------------------------
# vetor de entrada (yr)
qtdTrocas = 8 # Quantas vezes o sinal vai trocar de nivel
sizeStep = int(N/qtdTrocas) # Calcula o tamanho das janelas
# Monta o vetor de entrada yr como um conjunto de degraus
yr = np.zeros(sizeStep)
yr = np.append(yr,4*np.ones(sizeStep))
yr = np.append(yr, np.zeros(sizeStep))
yr = np.append(yr,5*np.ones(sizeStep))
yr = np.append(yr,1*np.ones(sizeStep))
yr = np.append(yr,2*np.ones(sizeStep))
yr = np.append(yr,0*np.ones(sizeStep))
yr = np.append(yr,3*np.ones(sizeStep))
#-------------------------------
# Valores do arduino
maxValue = 5 # O arduino só aguenta ler/escrever até 5V
minValue = 0 # O arduino só aguenta ler/escrever a partir de 0V
#-------------------------------
# Valores do arduino
erroAcc = 1.15 # quantas vezes é aceito que a frequencia real seja superior a desejada
#-------------------------------
# coeficientes dos controladores
if controlUse == "sc":
controlName = "Sem controlador"
elif controlUse == "cavlr1":
#******* Cavlr 1ª ord ********** Controlador em avanço por lugar das raizes para modelo de primeira ordem
controlName = "Controlador avanço - LR"
# Kc= Kc
# b0 = 2.244
# b1 = -1.964
# b2 = 0
# a1 = -0.4845
# a2 = 0
# Kc= Kc # fs = 100
# b0 = 2.758
# b1 = -2.722
# b2 = 0
# a1 = -0.9329
# a2 = 0
# Kc= Kc e zero em 3/4*sigma
# b0 = 1.13
# b1 = -1.022
# b2 = 0
# a1 = -0.6931
# a2 = 0
# Kc= 5*Kc
#b0 = 11.23
#b1 = -9.823
#b2 = 0
#a1 = -0.4845
#a2 = 0
# Kc= 10*Kc
# b0 = 22.44
# b1 = -19.64
# b2 = 0
# a1 = -0.4845
# a2 = 0
# Kc= 10*Kc # fs = 100
# b0 = 27.58
# b1 = -27.22
# b2 = 0
# a1 = -0.9329
# a2 = 0
# Kc= 10*Kc # zero = 3/4*sigma
# b0 = 11.3
# b1 = -10.22
# b2 = 0
# a1 = -0.6931
# a2 = 0
# Kc= 10*Kc # zero = *sigma/3
# b0 = 4.89
# b1 = -4.652
# b2 = 0
# a1 = -0.8415
# a2 = 0
# Kc= 5*Kc # zero = 2*sigma # e_esp = 0.1
b0 = 6.151
b1 = -4.704
b2 = 0
a1 = -0.6033
a2 = 0
elif controlUse == "cavlr2":
#******* Cavlr 2ª ord ********** Controlador em avanço por lugar das raizes para modelo de segunda ordem
controlName = "Controlador avanço - LR"
# # Colocando zero em sigma *2
# b0 = 3.882
# b1 = -1.664
# b2 = 0
# a1 = -0.0007006
# a2 = 0
# Colocando zero em sigma *3
# b0 = 4.05
# b1 = -1.012
# b2 = 0
# a1 = -0.02119
# a2 = 0
# Colocando zero em sigma *4.5
b0 = 4.061
b1 = -0.214
b2 = 0
a1 = 0.0184
a2 = 0
elif controlUse == "cavrf1":
#******* Cavrf 1ª ord ********** Controlador em avanço por resposta em frequencia para modelo de primeira ordem
controlName = "Controlador avanço - RF"
# b0 = 31.73
# b1 = 20.49
# b2 = 0
# a1 = 0.09445
# a2 = 0
# Kc = Kc /2 -> ficou mais instavel
# b0 = 12.56
# b1 = 5.048
# b2 = 0
# a1 = -0.2618
# a2 = 0
# trocando o erro esperado para 0.1
# b0 = 1.118
# b1 = -0.4326
# b2 = 0
# a1 = -0.8546
# a2 = 0
# trocando o erro esperado para 0.03
b0 = 10.7
b1 = -5.587
b2 = 0
a1 = -0.6781
a2 = 0
elif controlUse == "cavrf2":
#******* Cavrf 2ª ord ********** Controlador em avanço por resposta em frequencia para modelo de segunda ordem
controlName = "Controlador avanço - RF"
b0 = 0.4338
b1 = -0.1238
b2 = 0
a1 = -0.9367
a2 = 0
elif controlUse == "catlr1":
#******* Catlr 1ª ord ********** Controlador em atraso por lugar das raizes para modelo de primeira ordem
controlName = "Controlador atraso - LR"
b0 = 0.825
b1 = -0.651
b2 = 0
a1 = -0.997
a2 = 0
elif controlUse == "catlr2":
#******* Catlr 2ª ord ********** Controlador em atraso por lugar das raizes para modelo de segunda ordem
controlName = "Controlador atraso - LR"
b0 = 4.752
b1 = -3.447
b2 = 0
a1 = -0.996
a2 = 0
elif controlUse == "catrf1":
#******* Catrf 1ª ord ********** Controlador em atraso por resposta em frequencia para modelo de primeira ordem
# b0 = 29.22
# b1 = -15.25
# b2 = 0
# a1 = -0.7072
# a2 = 0
# alterando o erro esperado para 0.1
b0 = 1.086
b1 = -0.5667
b2 = 0
a1 = -0.8912
a2 = 0
controlName = "Controlador atraso - RF"
elif controlUse == "catrf2":
#******* Catrf 2ª ord ********** Controlador em atraso por resposta em frequencia para modelo de segunda ordem
controlName = "Controlador atraso - RF"
b0 = 13.91
b1 = 7.194
b2 = 0
a1 = -0.3594
a2 = 0
elif controlUse == "cavatlr1":
#******* Cavatlr 1ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de primeira ordem
controlName = "Controlador avanço-atraso - LR"
# b0 = 2.823
# b1 = -4.129
# b2 = 1.452
# a1 = -1.481
# a2 = 0.483
# Colocando o zero do controlador de avanço em sigma/2
# b0 = 1.133
# b1 = -1.29
# b2 = 0.2146
# a1 = -1.79
# a2 = 0.7911
# Colocando o zero do controlador de avanço em sigma*3/4
b0 = 1.583
b1 = -2.105
b2 = 0.6091
a1 = -1.69
a2 = 0.691
elif controlUse == "cavatlr2":
#******* Cavatlr 2ª ord ********** Controlador em avanço-atraso por lugar das raizes para modelo de segunda ordem
controlName = "Controlador avanço-atraso - LR"
# colocando o zero em sigma * 4.5
b0 = 4.355
b1 = -4.072
b2 = 0.2026
a1 = -0.9776
a2 = -0.01833
elif controlUse == "cavatrf1":
#****************
#******* Cavatrf 1ª ord ********** Controlador em avanço-atraso por resposta em frequencia para modelo de primeira ordem
controlName = "Controlador avanço-atraso - RF"
elif controlUse == "cavatrf2":
#****************
#******* Cavatrf 2ª ord ********** Controlador em avanço-atraso por resposta em frequencia para modelo de segunda ordem
controlName = "Controlador avanço-atraso - RF"
else:
controlName = "Sem controlador"
### FIM MUDANÇAS PERMITIDAS ###
#-------------------------------#-------------------------------#-------------------------------#-------------------------------
# Configurando DEBUG
debugOn = False
# Configuração do arduino
logger.info(f"Configurando conexão com o arduino...")
board = pyfirmata.Arduino(serialPort)
pwmPin = board.get_pin(outPin)
readPin = board.get_pin(inPin)
it = pyfirmata.util.Iterator(board)
it.start()
readPin.enable_reporting()
time.sleep(0.5) # espera as configurações surtirem efeito
# Monta o vetor de saida (y) zerado, o de erro e de controle também
logger.info(f"Inicializando vetoros utilizados...")
y = np.zeros(len(yr)) # vetor de saida
e = np.zeros(len(yr)) # vetor de erro
u = np.zeros(len(yr)) # vetor de controle
#--**----**----**----**----**----**----**----**----**----**--
# Normaliza os dados de entrada
yr = yr/maxValue
# Loop de operações com o arduino
logger.info(f"Tempo total estimado para executar as medições: {len(yr)/freq}")
t_ini = time.time() # registra o tempo de inicio
contLevel = 0 # Inicia o contador de leveis atingidos do yr
for i in range(2,len(yr)):
t_ini_loop = time.time() # registra horario de inicio da interação
#------------------------------
aux = readPin.read() # lê com a porta analogica
if(aux != None):
y[i] = float(aux) # salva no vetor resultado
#------------------------------
e[i] = yr[i] - y[i] # calcula o erro
#------------------------------
# malha de controle
if controlName != "Sem controlador":
u[i] = b0* e[i] + b1*e[i-1] + b2*e[i-2] - a1*u[i-1] - a2*u[i-2]
else:
u[i] = yr[i]
# garante que o sinal estara entre os valores acc pelo arduino
if(u[i] > 1):
u[i] = 1
elif(u[i] < minValue):
u[i] = minValue
#------------------------------
pwmPin.write(u[i]) # escreve no PWM
#------------------------------
if debugOn:
logger.debug(f"{i}:In: {y[i]*maxValue}")
logger.debug(f"{i}:PWM: {u[i]*maxValue}")
logger.debug(f"{i}:yr: {yr[i]*maxValue}")
else:
if(i > contLevel):
contLevel += sizeStep
logger.info(f"Já foram realizados {contLevel/sizeStep}/{qtdTrocas} trocas de niveis!")
#------------------------------
try:
time.sleep((1/freq)-(time.time() - t_ini_loop)) # gera delay para esperar pelo período de amostragem
except:
pass
pwmPin.write(0) # Desliga o motor
t_end = time.time() # registra o tempo de término
#--**----**----**----**----**----**----**----**----**----**--
board.exit() # Encerra conexão com o arduino
# Exibe informações
logger.info(f"Tempo total gasto para executar as medições: {t_end-t_ini}")
logger.info(f"frequencia real: {len(yr)/(t_end-t_ini)}")
if len(yr)/(t_end-t_ini) > erroAcc * freq:
logger.warning(f"frequencia real {len(yr)/(t_end-t_ini)} está superioa a {erroAcc} vezes acima da desejada {freq}")
logger.warning(f"Encerrando execução")
exit()
# Monta dados de saida
yr = yr.astype(np.float64) * maxValue
u = u.astype(np.float64) * maxValue
y = y.astype(np.float64) * maxValue
e = e.astype(np.float64) * maxValue
logger.info(f"Montando data frame")
data = pd.DataFrame()
data.loc[:, 'yr'] = yr
data.loc[:, 'u'] = u
data.loc[:, 'y'] = y
data.loc[0, 'fs'] = freq
if srcFile != None:
logger.info(f"Salvando csv de dados...")
data.to_csv(srcFile, index=False)
# Monta o grafico de resultado
x = [i for i,a in enumerate(yr)] # Monta eixo x dos graficos
sizeImage = (width/dpiImage,height/dpiImage)
fig, axs = plt.subplots(3, sharex=True, figsize=sizeImage, dpi=dpiImage)
axs[0].plot(x,y , color='red', linewidth=4,label='y')
axs[0].plot(x,yr,'--', color='blue', linewidth=2, label='yr')
axs[0].set_ylim(-0.5,5.5)
axs[0].set_title('Dados Lidos - y(k)', fontsize=21)
axs[0].legend(loc="upper right")
axs[0].grid(color='gray')
axs[1].plot(x,u,'--', color='green', linewidth=4)
axs[1].set_ylim(-0.5,5.5)
axs[1].set_title('Saída controlador - u(k)', fontsize=21)
axs[1].grid(color='gray')
axs[2].plot(x,e, color='black', linewidth=4)
axs[2].set_ylim(-5.5,5.5)
axs[2].set_title('Erro - e(k)', fontsize=21)
axs[2].grid(color='gray')
plt.suptitle(controlName, fontsize=26)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
for ax in axs.flat:
ax.set_ylabel('Voltagem (V)', fontsize=16)
ax.set_xlabel('Amostras (k)', fontsize=18)
for ax in axs.flat:
ax.label_outer()
if srcImage != None:
logger.info(f"Salvando grafico...")
plt.savefig(srcImage, format=formatImage)
plt.show()
logger.info(f"Encerrando execução!")
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.suptitle",
"pyfirmata.util.Iterator",
"loguru.logger.warning",
"numpy.zeros",
"numpy.ones",
"time.sleep",
"time.time",
"loguru.logger.info",
"pyfirmata.Arduino",
"matplotlib.pyplot.subplots_adjust",
"loguru.logger.debug",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((4782, 4800), 'numpy.zeros', 'np.zeros', (['sizeStep'], {}), '(sizeStep)\n', (4790, 4800), True, 'import numpy as np\n'), ((11161, 11214), 'loguru.logger.info', 'logger.info', (['f"""Configurando conexão com o arduino..."""'], {}), "(f'Configurando conexão com o arduino...')\n", (11172, 11214), False, 'from loguru import logger\n'), ((11237, 11266), 'pyfirmata.Arduino', 'pyfirmata.Arduino', (['serialPort'], {}), '(serialPort)\n', (11254, 11266), False, 'import pyfirmata\n'), ((11376, 11406), 'pyfirmata.util.Iterator', 'pyfirmata.util.Iterator', (['board'], {}), '(board)\n', (11399, 11406), False, 'import pyfirmata\n'), ((11445, 11460), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (11455, 11460), False, 'import time\n'), ((11572, 11623), 'loguru.logger.info', 'logger.info', (['f"""Inicializando vetoros utilizados..."""'], {}), "(f'Inicializando vetoros utilizados...')\n", (11583, 11623), False, 'from loguru import logger\n'), ((12160, 12171), 'time.time', 'time.time', ([], {}), '()\n', (12169, 12171), False, 'import time\n'), ((14045, 14056), 'time.time', 'time.time', ([], {}), '()\n', (14054, 14056), False, 'import time\n'), ((14313, 14389), 'loguru.logger.info', 'logger.info', (['f"""Tempo total gasto para executar as medições: {t_end - t_ini}"""'], {}), "(f'Tempo total gasto para executar as medições: {t_end - t_ini}')\n", (14324, 14389), False, 'from loguru import logger\n'), ((14903, 14938), 'loguru.logger.info', 'logger.info', (['f"""Montando data frame"""'], {}), "(f'Montando data frame')\n", (14914, 14938), False, 'from loguru import logger\n'), ((14961, 14975), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14973, 14975), True, 'import pandas as pd\n'), ((15387, 15448), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)', 'figsize': 'sizeImage', 'dpi': 'dpiImage'}), '(3, sharex=True, figsize=sizeImage, dpi=dpiImage)\n', (15399, 15448), True, 'import matplotlib.pyplot as plt\n'), ((16009, 16047), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['controlName'], {'fontsize': '(26)'}), '(controlName, fontsize=26)\n', (16021, 16047), True, 'import matplotlib.pyplot as plt\n'), ((16048, 16143), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'None', 'bottom': 'None', 'right': 'None', 'top': 'None', 'wspace': 'None', 'hspace': '(0.3)'}), '(left=None, bottom=None, right=None, top=None, wspace=\n None, hspace=0.3)\n', (16067, 16143), True, 'import matplotlib.pyplot as plt\n'), ((16403, 16413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16411, 16413), True, 'import matplotlib.pyplot as plt\n'), ((16415, 16451), 'loguru.logger.info', 'logger.info', (['f"""Encerrando execução!"""'], {}), "(f'Encerrando execução!')\n", (16426, 16451), False, 'from loguru import logger\n'), ((4885, 4903), 'numpy.zeros', 'np.zeros', (['sizeStep'], {}), '(sizeStep)\n', (4893, 4903), True, 'import numpy as np\n'), ((12406, 12417), 'time.time', 'time.time', ([], {}), '()\n', (12415, 12417), False, 'import time\n'), ((14612, 14650), 'loguru.logger.warning', 'logger.warning', (['f"""Encerrando execução"""'], {}), "(f'Encerrando execução')\n", (14626, 14650), False, 'from loguru import logger\n'), ((15101, 15141), 'loguru.logger.info', 'logger.info', (['f"""Salvando csv de dados..."""'], {}), "(f'Salvando csv de dados...')\n", (15112, 15141), False, 'from loguru import logger\n'), ((16320, 16355), 'loguru.logger.info', 'logger.info', (['f"""Salvando grafico..."""'], {}), "(f'Salvando grafico...')\n", (16331, 16355), False, 'from loguru import logger\n'), ((16360, 16401), 'matplotlib.pyplot.savefig', 'plt.savefig', (['srcImage'], {'format': 'formatImage'}), '(srcImage, format=formatImage)\n', (16371, 16401), True, 'import matplotlib.pyplot as plt\n'), ((4834, 4851), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (4841, 4851), True, 'import numpy as np\n'), ((4938, 4955), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (4945, 4955), True, 'import numpy as np\n'), ((4990, 5007), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (4997, 5007), True, 'import numpy as np\n'), ((5042, 5059), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (5049, 5059), True, 'import numpy as np\n'), ((5094, 5111), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (5101, 5111), True, 'import numpy as np\n'), ((5146, 5163), 'numpy.ones', 'np.ones', (['sizeStep'], {}), '(sizeStep)\n', (5153, 5163), True, 'import numpy as np\n'), ((13423, 13465), 'loguru.logger.debug', 'logger.debug', (['f"""{i}:In: {y[i] * maxValue}"""'], {}), "(f'{i}:In: {y[i] * maxValue}')\n", (13435, 13465), False, 'from loguru import logger\n'), ((13472, 13515), 'loguru.logger.debug', 'logger.debug', (['f"""{i}:PWM: {u[i] * maxValue}"""'], {}), "(f'{i}:PWM: {u[i] * maxValue}')\n", (13484, 13515), False, 'from loguru import logger\n'), ((13522, 13565), 'loguru.logger.debug', 'logger.debug', (['f"""{i}:yr: {yr[i] * maxValue}"""'], {}), "(f'{i}:yr: {yr[i] * maxValue}')\n", (13534, 13565), False, 'from loguru import logger\n'), ((13647, 13745), 'loguru.logger.info', 'logger.info', (['f"""Já foram realizados {contLevel / sizeStep}/{qtdTrocas} trocas de niveis!"""'], {}), "(\n f'Já foram realizados {contLevel / sizeStep}/{qtdTrocas} trocas de niveis!'\n )\n", (13658, 13745), False, 'from loguru import logger\n'), ((13808, 13819), 'time.time', 'time.time', ([], {}), '()\n', (13817, 13819), False, 'import time\n')]
|
import os, sys
import time
import torch
import numpy as np
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal
import copy
import cv2
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from PIL import Image as pil
import pickle
def print_loss_pack(loss_pack, name):
loss_depth, loss_mask_gt, loss_mask_out, loss_normal, loss_l2reg = loss_pack['depth'], loss_pack['mask_gt'], loss_pack['mask_out'], loss_pack['normal'], loss_pack['l2reg']
if len(loss_depth.shape) == 1:
loss_mask_gt, loss_mask_out, loss_depth, loss_normal, loss_l2reg = loss_mask_gt.mean(), loss_mask_out.mean(), loss_depth.mean(), loss_normal.mean(), loss_l2reg.mean()
print('NAME = [{0}] -- loss_depth: {1:.4f}, loss_mask_gt: {2:.4f}, loss_mask_out: {3:.4f}, loss_normal: {4:.4f}, loss_l2reg: {5:.4f}'.format(name, loss_depth.detach().cpu().numpy(), loss_mask_gt.detach().cpu().numpy(), loss_mask_out.detach().cpu().numpy(), loss_normal.detach().cpu().numpy(), loss_l2reg.detach().cpu().numpy()))
def print_loss_pack_color(loss_pack, name):
loss_color, loss_depth, loss_mask_gt, loss_mask_out, loss_normal, loss_l2reg, loss_l2reg_c = loss_pack['color'], loss_pack['depth'], loss_pack['mask_gt'], loss_pack['mask_out'], loss_pack['normal'], loss_pack['l2reg'], loss_pack['l2reg_c']
print('NAME = [{0}] -- loss_color: {1:.4f}, loss_depth: {2:.4f}, loss_mask_gt: {3:.4f}, loss_mask_out: {4:.4f}, loss_normal: {5:.4f}, loss_l2reg: {6:.4f}, loss_l2re_cg: {7:.4f}'.format(name, loss_color.detach().cpu().numpy(), loss_depth.detach().cpu().numpy(), loss_mask_gt.detach().cpu().numpy(), loss_mask_out.detach().cpu().numpy(), loss_normal.detach().cpu().numpy(), loss_l2reg.detach().cpu().numpy(), loss_l2reg_c.detach().cpu().numpy()))
def demo_color_save_render_output(prefix, sdf_renderer, shape_code, color_code, camera, lighting_loc=None, profile=False):
R, T = camera.extrinsic[:,:3], camera.extrinsic[:,3]
R, T = torch.from_numpy(R).float().cuda(), torch.from_numpy(T).float().cuda()
R.requires_grad, T.requires_grad = False, False
if lighting_loc is not None:
lighting_locations = torch.from_numpy(lighting_loc).float().unsqueeze(0).cuda()
else:
lighting_locations = None
render_output = sdf_renderer.render(color_code, shape_code, R, T, profile=profile, no_grad=True, lighting_locations=lighting_locations)
depth_rendered, normal_rendered, color_rgb, valid_mask_rendered, min_sdf_sample = render_output
data = {}
data['depth'] = depth_rendered.detach().cpu().numpy()
data['normal'] = normal_rendered.detach().cpu().numpy()
data['mask'] = valid_mask_rendered.detach().cpu().numpy()
data['color'] = color_rgb.detach().cpu().numpy()
data['min_sdf_sample'] = min_sdf_sample.detach().cpu().numpy()
data['latent_tensor'] = shape_code.detach().cpu().numpy()
data['K'] = sdf_renderer.get_intrinsic()
data['RT'] = torch.cat([R, T[:,None]], 1).detach().cpu().numpy()
fname = prefix + '_info.pkl'
with open(fname, 'wb') as f:
pickle.dump(data, f)
img_hw = sdf_renderer.get_img_hw()
visualizer = Visualizer(img_hw)
print('Writing to prefix: {}'.format(prefix))
visualizer.visualize_depth(prefix + '_depth.png', depth_rendered.detach().cpu().numpy(), valid_mask_rendered.detach().cpu().numpy())
visualizer.visualize_normal(prefix + '_normal.png', normal_rendered.detach().cpu().numpy(), valid_mask_rendered.detach().cpu().numpy(), bgr2rgb=True)
visualizer.visualize_mask(prefix + '_silhouette.png', valid_mask_rendered.detach().cpu().numpy())
cv2.imwrite(prefix + '_rendered_rgb.png', color_rgb.detach().cpu().numpy() * 255)
class Visualizer(object):
def __init__(self, img_hw, dmin=0.0, dmax=10.0):
self.img_h, self.img_w = img_hw[0], img_hw[1]
self.data = {}
self.dmin, self.dmax = dmin, dmax
self.loss_counter = 0
self.loss_curve = {}
self.loss_list = []
self.chamfer_list = []
def get_data(self, data_name):
if data_name in self.data.keys():
return self.data[data_name]
else:
raise ValueError('Key {0} does not exist.'.format(data_name))
def set_data(self, data):
self.data = data
def reset_data(self):
self.data = {}
keys = ['mask_gt', 'mask_output', 'loss_mask_gt', 'loss_mask_out',
'depth_gt', 'depth_output', 'loss_depth',
'normal_gt', 'normal_output', 'loss_normal']
for key in keys:
self.data[key] = np.zeros((64, 64))
def reset_loss_curve(self):
self.loss_counter = 0
self.loss_curve = {}
def reset_all(self):
self.reset_data()
self.reset_loss_curve()
def add_loss_from_pack(self, loss_pack):
'''
potential properties:
['mask_gt', 'mask_out', 'depth' 'normal', 'l2reg']
'''
loss_name_list = list(loss_pack.keys())
if self.loss_curve == {}:
for loss_name in loss_name_list:
self.loss_curve[loss_name] = []
for loss_name in loss_name_list:
loss_value = loss_pack[loss_name].detach().cpu().numpy()
self.loss_curve[loss_name].append(loss_value)
self.loss_counter = self.loss_counter + 1
def add_loss(self, loss):
self.loss_list.append(loss.detach().cpu().numpy())
def add_chamfer(self, chamfer):
self.chamfer_list.append(chamfer)
def add_data(self, data_name, data_src, data_mask=None):
'''
potential properties:
mask: ['mask_gt', 'mask_output', 'loss_mask_gt', 'loss_mask_out']
depth: ['depth_gt', 'depth_output', 'loss_depth']
normal: ['normal_gt', 'normal_output', 'loss_normal']
'''
if data_mask is None:
self.data[data_name] = data_src
else:
data_map = np.zeros(data_mask.shape)
data_map[data_mask != 0] = data_src
self.data[data_name] = data_map
def save_depth(self, fname, depth_vis, cmap='magma', direct=False):
if direct:
cv2.imwrite(fname, depth_vis)
return 0
vmin, vmax = 0, 255
normalizer = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
mapper = cm.ScalarMappable(norm=normalizer, cmap=cmap)
colormapped_im = (mapper.to_rgba(depth_vis)[:,:,:3] * 255).astype(np.uint8)
im = pil.fromarray(colormapped_im)
im.save(fname)
def save_mask(self, fname, mask_vis, bgr2rgb=False):
if bgr2rgb:
mask_vis = cv2.cvtColor(mask_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, mask_vis)
def save_normal(self, fname, normal_vis, bgr2rgb=False):
if bgr2rgb:
normal_vis = cv2.cvtColor(normal_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, normal_vis)
def save_error(self, fname, error_vis, bgr2rgb=False):
self.save_depth(fname, error_vis, cmap='jet')
def visualize_depth(self, fname, depth, mask=None):
# depth_vis = get_vis_depth(depth, mask=mask, dmin=self.dmin, dmax=self.dmax)
depth_vis = get_vis_depth(depth, mask=mask)
# self.save_depth(fname, depth_vis)
cv2.imwrite(fname, depth_vis)
def visualize_normal(self, fname, normal, mask=None, bgr2rgb=False):
normal_vis = get_vis_normal(normal, mask=mask)
if bgr2rgb:
normal_vis = cv2.cvtColor(normal_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, normal_vis)
def visualize_mask(self, fname, mask, bgr2rgb=False):
mask_vis = get_vis_mask(mask)
if bgr2rgb:
mask_vis = cv2.cvtColor(mask_vis, cv2.COLOR_BGR2RGB)
cv2.imwrite(fname, mask_vis)
def imshow(self, ax, img, title=None):
ax.imshow(img)
ax.axis('off')
if title is not None:
ax.set_title(title)
def imshow_bgr2rgb(self, ax, img, title=None):
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
ax.imshow(img)
ax.axis('off')
if title is not None:
ax.set_title(title)
def show_loss_curve(self, fname):
pass
def show_all_data_3x4(self, fname):
fig, axs = plt.subplots(3, 4, figsize=(30,30))
# first row, groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[0, 0], 255 - depth_gt_vis, title='depth gt')
normal_gt_vis = get_vis_normal(self.data['normal_gt'], mask=self.data['mask_gt'])
self.imshow(axs[0, 1], normal_gt_vis, title='normal gt')
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.imshow_bgr2rgb(axs[0, 2], 255 - mask_gt_vis, title='mask gt')
axs[0, 3].axis('off')
# second row, output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[1, 0], 255 - depth_output_vis, title='depth output')
normal_output_vis = get_vis_normal(self.data['normal_output'], mask=self.data['mask_output'])
self.imshow(axs[1, 1], normal_output_vis, title='normal output')
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.imshow_bgr2rgb(axs[1, 2], 255 - mask_output_vis, title='mask output')
axs[1, 3].axis('off')
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5)
self.imshow_bgr2rgb(axs[2, 0], 255 - loss_depth_vis, title='depth loss')
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)
self.imshow_bgr2rgb(axs[2, 1], 255 - loss_normal_vis, title='normal loss')
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt']) > 0)
self.imshow_bgr2rgb(axs[2, 2], 255 - loss_mask_gt_vis, title='gt \ output')
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out']) > 0)
self.imshow_bgr2rgb(axs[2, 3], 255 - loss_mask_out_vis, title='output \ gt')
# savefig
fig.savefig(fname)
plt.close('all')
def save_all_data(self, prefix):
# groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.save_depth(prefix + '_depth_gt.png', depth_gt_vis, cmap='magma', direct=True)
normal_gt_vis = get_vis_normal(self.data['normal_gt'], mask=self.data['mask_gt'])
self.save_normal(prefix + '_normal_gt.png', normal_gt_vis, bgr2rgb=True)
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.save_mask(prefix + '_mask_gt.png', mask_gt_vis)
# output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.save_depth(prefix + '_depth_output.png', depth_output_vis, cmap='magma', direct=True)
normal_output_vis = get_vis_normal(self.data['normal_output'], mask=self.data['mask_output'])
self.save_normal(prefix + '_normal_output.png', normal_output_vis, bgr2rgb=True)
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.save_mask(prefix + '_mask_output.png', mask_output_vis)
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5, bg_color=0)
self.save_error(prefix + '_depth_loss.png', loss_depth_vis, bgr2rgb=True)
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0, bg_color=0)
self.save_error(prefix + '_normal_loss.png', loss_normal_vis, bgr2rgb=True)
loss_mask_gt_vis = get_vis_depth(np.abs(self.data['loss_mask_gt']), bg_color=0)
self.save_error(prefix + '_mask_gt_loss.png', loss_mask_gt_vis, bgr2rgb=True)
loss_mask_out_vis = get_vis_depth(np.abs(self.data['loss_mask_out']), bg_color=0)
self.save_error(prefix + '_mask_out_loss.png', loss_mask_out_vis, bgr2rgb=True)
self.save_error(prefix + '_mask_loss.png', loss_mask_gt_vis + loss_mask_out_vis, bgr2rgb=True)
def dump_all_data(self, fname):
with open(fname, 'wb') as f:
pickle.dump({'data': self.data, 'loss_curve': self.loss_curve, 'loss_list': self.loss_list, 'chamfer_list': self.chamfer_list}, f)
def show_all_data(self, fname):
self.show_all_data_3x4(fname)
# self.save_all_data(fname[:-4])
def show_all_data_color(self, fname):
fig, axs = plt.subplots(3, 4, figsize=(30,30))
# first row, groundtruth
depth_gt_vis = get_vis_depth(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[0, 0], depth_gt_vis, title='depth gt')
normal_gt_vis = get_vis_normal(self.data['normal_gt'])
self.imshow_bgr2rgb(axs[0, 1], normal_gt_vis, title='normal gt')
mask_gt_vis = get_vis_mask(self.data['mask_gt'])
self.imshow_bgr2rgb(axs[0, 2], mask_gt_vis, title='mask gt')
self.imshow_bgr2rgb(axs[0, 3], self.data['color_gt'], title='rgb gt')
# second row, output
depth_output_vis = get_vis_depth(self.data['depth_output'], mask=self.data['mask_output'], dmin=self.dmin, dmax=self.dmax)
self.imshow_bgr2rgb(axs[1, 0], depth_output_vis, title='depth output')
normal_output_vis = get_vis_normal(self.data['normal_output'])
self.imshow_bgr2rgb(axs[1, 1], normal_output_vis, title='normal output')
mask_output_vis = get_vis_mask(self.data['mask_output'])
self.imshow_bgr2rgb(axs[1, 2], mask_output_vis, title='mask output')
self.imshow_bgr2rgb(axs[1, 3], self.data['color_output'], title='rgb output')
# third row, loss
valid_mask = np.logical_and(self.data['mask_gt'], self.data['mask_output'])
loss_depth_vis = get_vis_depth(np.abs(self.data['loss_depth']), valid_mask, dmin=0.0, dmax=0.5)
self.imshow_bgr2rgb(axs[2, 0], loss_depth_vis, title='depth loss')
loss_normal_vis = get_vis_depth(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)
self.imshow_bgr2rgb(axs[2, 1], loss_normal_vis, title='normal loss')
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt']) > 0)
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out']) > 0)
loss_mask_gt_vis += loss_mask_out_vis
self.imshow_bgr2rgb(axs[2, 2], loss_mask_gt_vis, title='mask loss')
self.imshow_bgr2rgb(axs[2, 3], self.data['loss_color'], title='rgb loss')
# savefig
fig.savefig(fname)
plt.close('all')
def return_output_data_color(self):
return self.data['color_output'], self.data['depth_output'], self.data['normal_output'], self.data['mask_output']
def show_all_data_color_multi(self, fname, num_img=4):
fig, axs = plt.subplots(3, 2*num_img, figsize=(8*2*num_img,25))
for i in range(num_img):
# first row, ground truth
self.imshow_bgr2rgb(axs[0, 2*i], self.data['color_gt-{}'.format(i)], title='rgb gt {}'.format(i))
mask_gt_vis = get_vis_mask(self.data['mask_gt-{}'.format(i)])
self.imshow_bgr2rgb(axs[0, 2*i+1], mask_gt_vis, title='mask gt {}'.format(i))
# second row, output
self.imshow_bgr2rgb(axs[1, 2*i], self.data['color_output-{}'.format(i)], title='rgb output {}'.format(i))
mask_output_vis = get_vis_mask(self.data['mask_output-{}'.format(i)])
self.imshow_bgr2rgb(axs[1, 2*i+1], mask_output_vis, title='mask output {}'.format(i))
# third row, loss
self.imshow_bgr2rgb(axs[2, 2*i], self.data['loss_color-{}'.format(i)], title='rgb loss {}'.format(i))
loss_mask_gt_vis = get_vis_mask(np.abs(self.data['loss_mask_gt-{}'.format(i)]) > 0)
loss_mask_out_vis = get_vis_mask(np.abs(self.data['loss_mask_out-{}'.format(i)]) > 0)
loss_mask_gt_vis += loss_mask_out_vis
self.imshow_bgr2rgb(axs[2, 2*i+1], loss_mask_gt_vis, title='mask loss {}'.format(i))
# savefig
plt.subplots_adjust(top=0.95, right=0.99, left=0.01, bottom=0.01, wspace=0.05, hspace=0.1)
fig.savefig(fname)
plt.close('all')
def show_all_data_color_warp(self, fname):
fig, axs = plt.subplots(1, 5, figsize=(15, 3.4))
self.imshow_bgr2rgb(axs[0], self.data['color_gt-1'], title='view 1')
self.imshow_bgr2rgb(axs[1], self.data['color_gt-2'], title='view 2')
self.imshow_bgr2rgb(axs[2], self.data['color_valid-1'], title='valid region in view 1')
self.imshow_bgr2rgb(axs[3], self.data['color_valid-2'], title='warped color from view 2')
self.imshow_bgr2rgb(axs[4], self.data['color_valid_loss'], title='color loss')
# savefig
plt.subplots_adjust(top=0.99, right=0.99, left=0.01, bottom=0.00, wspace=0.05, hspace=0)
fig.savefig(fname)
plt.close('all')
|
[
"vis_utils.get_vis_depth",
"pickle.dump",
"numpy.abs",
"torch.cat",
"os.path.abspath",
"matplotlib.colors.Normalize",
"cv2.cvtColor",
"matplotlib.cm.ScalarMappable",
"cv2.imwrite",
"matplotlib.pyplot.close",
"vis_utils.get_vis_mask",
"matplotlib.pyplot.subplots",
"matplotlib.use",
"matplotlib.pyplot.subplots_adjust",
"vis_utils.get_vis_normal",
"torch.from_numpy",
"numpy.logical_and",
"numpy.zeros",
"PIL.Image.fromarray"
] |
[((233, 247), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (240, 247), True, 'import matplotlib as mpl\n'), ((91, 116), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (106, 116), False, 'import os, sys\n'), ((3133, 3153), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (3144, 3153), False, 'import pickle\n'), ((6299, 6341), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (6319, 6341), True, 'import matplotlib as mpl\n'), ((6359, 6404), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'normalizer', 'cmap': 'cmap'}), '(norm=normalizer, cmap=cmap)\n', (6376, 6404), True, 'import matplotlib.cm as cm\n'), ((6502, 6531), 'PIL.Image.fromarray', 'pil.fromarray', (['colormapped_im'], {}), '(colormapped_im)\n', (6515, 6531), True, 'from PIL import Image as pil\n'), ((6706, 6734), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'mask_vis'], {}), '(fname, mask_vis)\n', (6717, 6734), False, 'import cv2\n'), ((6894, 6924), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'normal_vis'], {}), '(fname, normal_vis)\n', (6905, 6924), False, 'import cv2\n'), ((7202, 7233), 'vis_utils.get_vis_depth', 'get_vis_depth', (['depth'], {'mask': 'mask'}), '(depth, mask=mask)\n', (7215, 7233), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((7286, 7315), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'depth_vis'], {}), '(fname, depth_vis)\n', (7297, 7315), False, 'import cv2\n'), ((7411, 7444), 'vis_utils.get_vis_normal', 'get_vis_normal', (['normal'], {'mask': 'mask'}), '(normal, mask=mask)\n', (7425, 7444), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((7542, 7572), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'normal_vis'], {}), '(fname, normal_vis)\n', (7553, 7572), False, 'import cv2\n'), ((7651, 7669), 'vis_utils.get_vis_mask', 'get_vis_mask', (['mask'], {}), '(mask)\n', (7663, 7669), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((7763, 7791), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'mask_vis'], {}), '(fname, mask_vis)\n', (7774, 7791), False, 'import cv2\n'), ((8303, 8339), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {'figsize': '(30, 30)'}), '(3, 4, figsize=(30, 30))\n', (8315, 8339), True, 'import matplotlib.pyplot as plt\n'), ((8396, 8496), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_gt']"], {'mask': "self.data['mask_gt']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.\n dmin, dmax=self.dmax)\n", (8409, 8496), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((8593, 8658), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_gt']"], {'mask': "self.data['mask_gt']"}), "(self.data['normal_gt'], mask=self.data['mask_gt'])\n", (8607, 8658), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((8746, 8780), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_gt']"], {}), "(self.data['mask_gt'])\n", (8758, 8780), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((8943, 9050), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_output']"], {'mask': "self.data['mask_output']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_output'], mask=self.data['mask_output'],\n dmin=self.dmin, dmax=self.dmax)\n", (8956, 9050), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((9160, 9233), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_output']"], {'mask': "self.data['mask_output']"}), "(self.data['normal_output'], mask=self.data['mask_output'])\n", (9174, 9233), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((9333, 9371), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_output']"], {}), "(self.data['mask_output'])\n", (9345, 9371), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((9533, 9595), 'numpy.logical_and', 'np.logical_and', (["self.data['mask_gt']", "self.data['mask_output']"], {}), "(self.data['mask_gt'], self.data['mask_output'])\n", (9547, 9595), True, 'import numpy as np\n'), ((9807, 9879), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['loss_normal']", 'valid_mask'], {'dmin': '(-1.0)', 'dmax': '(0.0)'}), "(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)\n", (9820, 9879), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10346, 10362), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10355, 10362), True, 'import matplotlib.pyplot as plt\n'), ((10446, 10546), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_gt']"], {'mask': "self.data['mask_gt']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.\n dmin, dmax=self.dmax)\n", (10459, 10546), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10657, 10722), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_gt']"], {'mask': "self.data['mask_gt']"}), "(self.data['normal_gt'], mask=self.data['mask_gt'])\n", (10671, 10722), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10826, 10860), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_gt']"], {}), "(self.data['mask_gt'])\n", (10838, 10860), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((10967, 11074), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_output']"], {'mask': "self.data['mask_output']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_output'], mask=self.data['mask_output'],\n dmin=self.dmin, dmax=self.dmax)\n", (10980, 11074), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((11198, 11271), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_output']"], {'mask': "self.data['mask_output']"}), "(self.data['normal_output'], mask=self.data['mask_output'])\n", (11212, 11271), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((11387, 11425), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_output']"], {}), "(self.data['mask_output'])\n", (11399, 11425), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((11543, 11605), 'numpy.logical_and', 'np.logical_and', (["self.data['mask_gt']", "self.data['mask_output']"], {}), "(self.data['mask_gt'], self.data['mask_output'])\n", (11557, 11605), True, 'import numpy as np\n'), ((11830, 11918), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['loss_normal']", 'valid_mask'], {'dmin': '(-1.0)', 'dmax': '(0.0)', 'bg_color': '(0)'}), "(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0,\n bg_color=0)\n", (11843, 11918), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((12850, 12886), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {'figsize': '(30, 30)'}), '(3, 4, figsize=(30, 30))\n', (12862, 12886), True, 'import matplotlib.pyplot as plt\n'), ((12943, 13043), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_gt']"], {'mask': "self.data['mask_gt']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_gt'], mask=self.data['mask_gt'], dmin=self.\n dmin, dmax=self.dmax)\n", (12956, 13043), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13134, 13172), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_gt']"], {}), "(self.data['normal_gt'])\n", (13148, 13172), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13268, 13302), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_gt']"], {}), "(self.data['mask_gt'])\n", (13280, 13302), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13507, 13614), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['depth_output']"], {'mask': "self.data['mask_output']", 'dmin': 'self.dmin', 'dmax': 'self.dmax'}), "(self.data['depth_output'], mask=self.data['mask_output'],\n dmin=self.dmin, dmax=self.dmax)\n", (13520, 13614), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13718, 13760), 'vis_utils.get_vis_normal', 'get_vis_normal', (["self.data['normal_output']"], {}), "(self.data['normal_output'])\n", (13732, 13760), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((13868, 13906), 'vis_utils.get_vis_mask', 'get_vis_mask', (["self.data['mask_output']"], {}), "(self.data['mask_output'])\n", (13880, 13906), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((14118, 14180), 'numpy.logical_and', 'np.logical_and', (["self.data['mask_gt']", "self.data['mask_output']"], {}), "(self.data['mask_gt'], self.data['mask_output'])\n", (14132, 14180), True, 'import numpy as np\n'), ((14386, 14458), 'vis_utils.get_vis_depth', 'get_vis_depth', (["self.data['loss_normal']", 'valid_mask'], {'dmin': '(-1.0)', 'dmax': '(0.0)'}), "(self.data['loss_normal'], valid_mask, dmin=-1.0, dmax=0.0)\n", (14399, 14458), False, 'from vis_utils import get_vis_depth, get_vis_mask, get_vis_normal\n'), ((14954, 14970), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (14963, 14970), True, 'import matplotlib.pyplot as plt\n'), ((15213, 15272), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2 * num_img)'], {'figsize': '(8 * 2 * num_img, 25)'}), '(3, 2 * num_img, figsize=(8 * 2 * num_img, 25))\n', (15225, 15272), True, 'import matplotlib.pyplot as plt\n'), ((16457, 16552), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'right': '(0.99)', 'left': '(0.01)', 'bottom': '(0.01)', 'wspace': '(0.05)', 'hspace': '(0.1)'}), '(top=0.95, right=0.99, left=0.01, bottom=0.01, wspace=\n 0.05, hspace=0.1)\n', (16476, 16552), True, 'import matplotlib.pyplot as plt\n'), ((16583, 16599), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16592, 16599), True, 'import matplotlib.pyplot as plt\n'), ((16667, 16704), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {'figsize': '(15, 3.4)'}), '(1, 5, figsize=(15, 3.4))\n', (16679, 16704), True, 'import matplotlib.pyplot as plt\n'), ((17167, 17259), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.99)', 'right': '(0.99)', 'left': '(0.01)', 'bottom': '(0.0)', 'wspace': '(0.05)', 'hspace': '(0)'}), '(top=0.99, right=0.99, left=0.01, bottom=0.0, wspace=\n 0.05, hspace=0)\n', (17186, 17259), True, 'import matplotlib.pyplot as plt\n'), ((17291, 17307), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17300, 17307), True, 'import matplotlib.pyplot as plt\n'), ((4638, 4656), 'numpy.zeros', 'np.zeros', (['(64, 64)'], {}), '((64, 64))\n', (4646, 4656), True, 'import numpy as np\n'), ((5977, 6002), 'numpy.zeros', 'np.zeros', (['data_mask.shape'], {}), '(data_mask.shape)\n', (5985, 6002), True, 'import numpy as np\n'), ((6199, 6228), 'cv2.imwrite', 'cv2.imwrite', (['fname', 'depth_vis'], {}), '(fname, depth_vis)\n', (6210, 6228), False, 'import cv2\n'), ((6656, 6697), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_vis', 'cv2.COLOR_BGR2RGB'], {}), '(mask_vis, cv2.COLOR_BGR2RGB)\n', (6668, 6697), False, 'import cv2\n'), ((6842, 6885), 'cv2.cvtColor', 'cv2.cvtColor', (['normal_vis', 'cv2.COLOR_BGR2RGB'], {}), '(normal_vis, cv2.COLOR_BGR2RGB)\n', (6854, 6885), False, 'import cv2\n'), ((7490, 7533), 'cv2.cvtColor', 'cv2.cvtColor', (['normal_vis', 'cv2.COLOR_BGR2RGB'], {}), '(normal_vis, cv2.COLOR_BGR2RGB)\n', (7502, 7533), False, 'import cv2\n'), ((7713, 7754), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_vis', 'cv2.COLOR_BGR2RGB'], {}), '(mask_vis, cv2.COLOR_BGR2RGB)\n', (7725, 7754), False, 'import cv2\n'), ((8046, 8082), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (8058, 8082), False, 'import cv2\n'), ((9635, 9666), 'numpy.abs', 'np.abs', (["self.data['loss_depth']"], {}), "(self.data['loss_depth'])\n", (9641, 9666), True, 'import numpy as np\n'), ((11645, 11676), 'numpy.abs', 'np.abs', (["self.data['loss_depth']"], {}), "(self.data['loss_depth'])\n", (11651, 11676), True, 'import numpy as np\n'), ((12041, 12074), 'numpy.abs', 'np.abs', (["self.data['loss_mask_gt']"], {}), "(self.data['loss_mask_gt'])\n", (12047, 12074), True, 'import numpy as np\n'), ((12216, 12250), 'numpy.abs', 'np.abs', (["self.data['loss_mask_out']"], {}), "(self.data['loss_mask_out'])\n", (12222, 12250), True, 'import numpy as np\n'), ((12541, 12675), 'pickle.dump', 'pickle.dump', (["{'data': self.data, 'loss_curve': self.loss_curve, 'loss_list': self.\n loss_list, 'chamfer_list': self.chamfer_list}", 'f'], {}), "({'data': self.data, 'loss_curve': self.loss_curve, 'loss_list':\n self.loss_list, 'chamfer_list': self.chamfer_list}, f)\n", (12552, 12675), False, 'import pickle\n'), ((14220, 14251), 'numpy.abs', 'np.abs', (["self.data['loss_depth']"], {}), "(self.data['loss_depth'])\n", (14226, 14251), True, 'import numpy as np\n'), ((10003, 10036), 'numpy.abs', 'np.abs', (["self.data['loss_mask_gt']"], {}), "(self.data['loss_mask_gt'])\n", (10009, 10036), True, 'import numpy as np\n'), ((10167, 10201), 'numpy.abs', 'np.abs', (["self.data['loss_mask_out']"], {}), "(self.data['loss_mask_out'])\n", (10173, 10201), True, 'import numpy as np\n'), ((14576, 14609), 'numpy.abs', 'np.abs', (["self.data['loss_mask_gt']"], {}), "(self.data['loss_mask_gt'])\n", (14582, 14609), True, 'import numpy as np\n'), ((14656, 14690), 'numpy.abs', 'np.abs', (["self.data['loss_mask_out']"], {}), "(self.data['loss_mask_out'])\n", (14662, 14690), True, 'import numpy as np\n'), ((2039, 2058), 'torch.from_numpy', 'torch.from_numpy', (['R'], {}), '(R)\n', (2055, 2058), False, 'import torch\n'), ((2075, 2094), 'torch.from_numpy', 'torch.from_numpy', (['T'], {}), '(T)\n', (2091, 2094), False, 'import torch\n'), ((3007, 3036), 'torch.cat', 'torch.cat', (['[R, T[:, None]]', '(1)'], {}), '([R, T[:, None]], 1)\n', (3016, 3036), False, 'import torch\n'), ((2225, 2255), 'torch.from_numpy', 'torch.from_numpy', (['lighting_loc'], {}), '(lighting_loc)\n', (2241, 2255), False, 'import torch\n')]
|
import os
import time
import numpy as np
import torch
from torch import nn
from butterfly_factor import butterfly_factor_mult_intermediate
# from butterfly import Block2x2DiagProduct
# from test_factor_multiply import twiddle_list_concat
exps = np.arange(6, 14)
sizes = 1 << exps
batch_size = 256
ntrials = [100000, 100000, 10000, 10000, 10000, 10000, 10000, 10000]
dense_times = np.zeros(exps.size)
fft_times = np.zeros(exps.size)
butterfly_times = np.zeros(exps.size)
for idx_n, (n, ntrial) in enumerate(zip(sizes, ntrials)):
print(n)
# B = Block2x2DiagProduct(n).to('cuda')
L = torch.nn.Linear(n, n, bias=False).to('cuda')
x = torch.randn(batch_size, n, requires_grad=True).to('cuda')
grad = torch.randn_like(x)
# twiddle = twiddle_list_concat(B)
# Dense multiply
output = L(x) # Do it once to initialize cuBlas handle and such
torch.autograd.grad(output, (L.weight, x), grad)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = L(x)
torch.autograd.grad(output, (L.weight, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
dense_times[idx_n] = (end - start) / ntrial
# FFT
output = torch.rfft(x, 1) # Do it once to initialize cuBlas handle and such
grad_fft = torch.randn_like(output)
torch.autograd.grad(output, x, grad_fft)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = torch.rfft(x, 1)
torch.autograd.grad(output, x, grad_fft)
torch.cuda.synchronize()
end = time.perf_counter()
fft_times[idx_n] = (end - start) / ntrial
# Butterfly
output = butterfly_factor_mult_intermediate(twiddle, x)
torch.autograd.grad(output, (twiddle, x), grad)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(ntrial):
output = butterfly_factor_mult_intermediate(twiddle, x)
torch.autograd.grad(output, (twiddle, x), grad)
torch.cuda.synchronize()
end = time.perf_counter()
butterfly_times[idx_n] = (end-start) / ntrial
print(dense_times)
print(fft_times)
print(butterfly_times)
print(dense_times / butterfly_times)
print(dense_times / fft_times)
data = {
'sizes': sizes,
'speedup_fft': dense_times / fft_times,
'speedup_butterfly': dense_times / butterfly_times,
}
import pickle
with open('speed_training_data.pkl', 'wb') as f:
pickle.dump(data, f)
|
[
"torch.cuda.synchronize",
"pickle.dump",
"torch.randn_like",
"butterfly_factor.butterfly_factor_mult_intermediate",
"torch.autograd.grad",
"numpy.zeros",
"time.perf_counter",
"torch.randn",
"numpy.arange",
"torch.rfft",
"torch.nn.Linear"
] |
[((249, 265), 'numpy.arange', 'np.arange', (['(6)', '(14)'], {}), '(6, 14)\n', (258, 265), True, 'import numpy as np\n'), ((387, 406), 'numpy.zeros', 'np.zeros', (['exps.size'], {}), '(exps.size)\n', (395, 406), True, 'import numpy as np\n'), ((419, 438), 'numpy.zeros', 'np.zeros', (['exps.size'], {}), '(exps.size)\n', (427, 438), True, 'import numpy as np\n'), ((457, 476), 'numpy.zeros', 'np.zeros', (['exps.size'], {}), '(exps.size)\n', (465, 476), True, 'import numpy as np\n'), ((722, 741), 'torch.randn_like', 'torch.randn_like', (['x'], {}), '(x)\n', (738, 741), False, 'import torch\n'), ((876, 924), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(L.weight, x)', 'grad'], {}), '(output, (L.weight, x), grad)\n', (895, 924), False, 'import torch\n'), ((929, 953), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (951, 953), False, 'import torch\n'), ((966, 985), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (983, 985), False, 'import time\n'), ((1097, 1121), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1119, 1121), False, 'import torch\n'), ((1132, 1151), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1149, 1151), False, 'import time\n'), ((1224, 1240), 'torch.rfft', 'torch.rfft', (['x', '(1)'], {}), '(x, 1)\n', (1234, 1240), False, 'import torch\n'), ((1307, 1331), 'torch.randn_like', 'torch.randn_like', (['output'], {}), '(output)\n', (1323, 1331), False, 'import torch\n'), ((1336, 1376), 'torch.autograd.grad', 'torch.autograd.grad', (['output', 'x', 'grad_fft'], {}), '(output, x, grad_fft)\n', (1355, 1376), False, 'import torch\n'), ((1381, 1405), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1403, 1405), False, 'import torch\n'), ((1418, 1437), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1435, 1437), False, 'import time\n'), ((1553, 1577), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1575, 1577), False, 'import torch\n'), ((1588, 1607), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1605, 1607), False, 'import time\n'), ((1684, 1730), 'butterfly_factor.butterfly_factor_mult_intermediate', 'butterfly_factor_mult_intermediate', (['twiddle', 'x'], {}), '(twiddle, x)\n', (1718, 1730), False, 'from butterfly_factor import butterfly_factor_mult_intermediate\n'), ((1735, 1782), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(twiddle, x)', 'grad'], {}), '(output, (twiddle, x), grad)\n', (1754, 1782), False, 'import torch\n'), ((1787, 1811), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1809, 1811), False, 'import torch\n'), ((1824, 1843), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1841, 1843), False, 'import time\n'), ((1996, 2020), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2018, 2020), False, 'import torch\n'), ((2031, 2050), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2048, 2050), False, 'import time\n'), ((2431, 2451), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (2442, 2451), False, 'import pickle\n'), ((1044, 1092), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(L.weight, x)', 'grad'], {}), '(output, (L.weight, x), grad)\n', (1063, 1092), False, 'import torch\n'), ((1483, 1499), 'torch.rfft', 'torch.rfft', (['x', '(1)'], {}), '(x, 1)\n', (1493, 1499), False, 'import torch\n'), ((1508, 1548), 'torch.autograd.grad', 'torch.autograd.grad', (['output', 'x', 'grad_fft'], {}), '(output, x, grad_fft)\n', (1527, 1548), False, 'import torch\n'), ((1889, 1935), 'butterfly_factor.butterfly_factor_mult_intermediate', 'butterfly_factor_mult_intermediate', (['twiddle', 'x'], {}), '(twiddle, x)\n', (1923, 1935), False, 'from butterfly_factor import butterfly_factor_mult_intermediate\n'), ((1944, 1991), 'torch.autograd.grad', 'torch.autograd.grad', (['output', '(twiddle, x)', 'grad'], {}), '(output, (twiddle, x), grad)\n', (1963, 1991), False, 'import torch\n'), ((600, 633), 'torch.nn.Linear', 'torch.nn.Linear', (['n', 'n'], {'bias': '(False)'}), '(n, n, bias=False)\n', (615, 633), False, 'import torch\n'), ((653, 699), 'torch.randn', 'torch.randn', (['batch_size', 'n'], {'requires_grad': '(True)'}), '(batch_size, n, requires_grad=True)\n', (664, 699), False, 'import torch\n')]
|
from pandas import Series
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold, ShuffleSplit
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
def transform_labels(y) -> Series:
if type(next(iter(y))) is str:
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
return Series(y)
def calc_auc(clf, test_x, test_y):
y_pred = clf.predict(test_x)
return metrics.roc_auc_score(
transform_labels(test_y),
transform_labels(y_pred.tolist())
)
def roc_plot(classifier, X, y, n_splits=3, title='', labeller=None):
cv = StratifiedKFold(n_splits=n_splits)
#if labeller:
# y = [labeller(i) for i in y]
y = transform_labels(y)
#cv = ShuffleSplit(n_splits=n_splits)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X.iloc[train], y.iloc[train]).predict_proba(X.iloc[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y.iloc[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic ' + title)
plt.legend(loc="lower right")
plt.show()
return plt
|
[
"matplotlib.pyplot.title",
"numpy.maximum",
"numpy.mean",
"matplotlib.pyplot.fill_between",
"numpy.std",
"sklearn.preprocessing.LabelEncoder",
"numpy.linspace",
"numpy.minimum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"pandas.Series",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"sklearn.metrics.roc_curve",
"sklearn.metrics.auc",
"sklearn.model_selection.StratifiedKFold",
"scipy.interp",
"matplotlib.pyplot.xlabel"
] |
[((444, 453), 'pandas.Series', 'Series', (['y'], {}), '(y)\n', (450, 453), False, 'from pandas import Series\n'), ((720, 754), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_splits'}), '(n_splits=n_splits)\n', (735, 754), False, 'from sklearn.model_selection import StratifiedKFold, ShuffleSplit\n'), ((924, 946), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (935, 946), True, 'import numpy as np\n'), ((1471, 1559), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'linestyle': '"""--"""', 'lw': '(2)', 'color': '"""r"""', 'label': '"""Chance"""', 'alpha': '(0.8)'}), "([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance',\n alpha=0.8)\n", (1479, 1559), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1605), 'numpy.mean', 'np.mean', (['tprs'], {'axis': '(0)'}), '(tprs, axis=0)\n', (1591, 1605), True, 'import numpy as np\n'), ((1644, 1667), 'sklearn.metrics.auc', 'auc', (['mean_fpr', 'mean_tpr'], {}), '(mean_fpr, mean_tpr)\n', (1647, 1667), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1682, 1694), 'numpy.std', 'np.std', (['aucs'], {}), '(aucs)\n', (1688, 1694), True, 'import numpy as np\n'), ((1699, 1831), 'matplotlib.pyplot.plot', 'plt.plot', (['mean_fpr', 'mean_tpr'], {'color': '"""b"""', 'label': "('Mean ROC (AUC = %0.2f $\\\\pm$ %0.2f)' % (mean_auc, std_auc))", 'lw': '(2)', 'alpha': '(0.8)'}), "(mean_fpr, mean_tpr, color='b', label=\n 'Mean ROC (AUC = %0.2f $\\\\pm$ %0.2f)' % (mean_auc, std_auc), lw=2,\n alpha=0.8)\n", (1707, 1831), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1883), 'numpy.std', 'np.std', (['tprs'], {'axis': '(0)'}), '(tprs, axis=0)\n', (1869, 1883), True, 'import numpy as np\n'), ((1901, 1934), 'numpy.minimum', 'np.minimum', (['(mean_tpr + std_tpr)', '(1)'], {}), '(mean_tpr + std_tpr, 1)\n', (1911, 1934), True, 'import numpy as np\n'), ((1952, 1985), 'numpy.maximum', 'np.maximum', (['(mean_tpr - std_tpr)', '(0)'], {}), '(mean_tpr - std_tpr, 0)\n', (1962, 1985), True, 'import numpy as np\n'), ((1990, 2097), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['mean_fpr', 'tprs_lower', 'tprs_upper'], {'color': '"""grey"""', 'alpha': '(0.2)', 'label': '"""$\\\\pm$ 1 std. dev."""'}), "(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.2,\n label='$\\\\pm$ 1 std. dev.')\n", (2006, 2097), True, 'import matplotlib.pyplot as plt\n'), ((2119, 2142), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (2127, 2142), True, 'import matplotlib.pyplot as plt\n'), ((2147, 2170), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.05, 1.05]'], {}), '([-0.05, 1.05])\n', (2155, 2170), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (2185, 2208), True, 'import matplotlib.pyplot as plt\n'), ((2213, 2245), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (2223, 2245), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2305), 'matplotlib.pyplot.title', 'plt.title', (["('Receiver operating characteristic ' + title)"], {}), "('Receiver operating characteristic ' + title)\n", (2259, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2310, 2339), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (2320, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2352, 2354), True, 'import matplotlib.pyplot as plt\n'), ((372, 386), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (384, 386), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1166, 1204), 'sklearn.metrics.roc_curve', 'roc_curve', (['y.iloc[test]', 'probas_[:, 1]'], {}), '(y.iloc[test], probas_[:, 1])\n', (1175, 1204), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1297, 1310), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1300, 1310), False, 'from sklearn.metrics import roc_curve, auc\n'), ((1348, 1437), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'lw': '(1)', 'alpha': '(0.3)', 'label': "('ROC fold %d (AUC = %0.2f)' % (i, roc_auc))"}), "(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i,\n roc_auc))\n", (1356, 1437), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1251), 'scipy.interp', 'interp', (['mean_fpr', 'fpr', 'tpr'], {}), '(mean_fpr, fpr, tpr)\n', (1231, 1251), False, 'from scipy import interp\n')]
|
########################################################################################################################
# Module: tests/test_core.py
# Description: Tests for core and Sampler
#
# Web: https://github.com/SamDuffield/mocat
########################################################################################################################
import unittest
import jax.numpy as jnp
import mocat.src.sample
import numpy.testing as npt
from mocat.src import core
from mocat.src import sample
class Testcdict(unittest.TestCase):
cdict = core.cdict(test_arr=jnp.ones((10, 3)),
test_float=3.)
def test_init(self):
npt.assert_(hasattr(self.cdict, 'test_arr'))
npt.assert_array_equal(self.cdict.test_arr, jnp.ones((10, 3)))
npt.assert_(hasattr(self.cdict, 'test_float'))
npt.assert_equal(self.cdict.test_float, 3.)
def test_copy(self):
cdict2 = self.cdict.copy()
npt.assert_(isinstance(cdict2, core.cdict))
npt.assert_(isinstance(cdict2.test_arr, jnp.DeviceArray))
npt.assert_array_equal(cdict2.test_arr, jnp.ones((10, 3)))
npt.assert_(isinstance(cdict2.test_float, float))
npt.assert_equal(cdict2.test_float, 3.)
cdict2.test_arr = jnp.zeros(5)
npt.assert_array_equal(self.cdict.test_arr, jnp.ones((10, 3)))
cdict2.test_float = 9.
npt.assert_equal(self.cdict.test_float, 3.)
def test_getitem(self):
cdict_0get = self.cdict[0]
npt.assert_(isinstance(cdict_0get, core.cdict))
npt.assert_(isinstance(cdict_0get.test_arr, jnp.DeviceArray))
npt.assert_array_equal(cdict_0get.test_arr, jnp.ones(3))
npt.assert_(isinstance(cdict_0get.test_float, float))
npt.assert_equal(cdict_0get.test_float, 3.)
def test_additem(self):
cdict_other = core.cdict(test_arr=jnp.ones((2, 3)),
test_float=7.,
time=25.)
self.cdict.time = 10.
cdict_add = self.cdict + cdict_other
npt.assert_(isinstance(cdict_add, core.cdict))
npt.assert_(isinstance(cdict_add.test_arr, jnp.DeviceArray))
npt.assert_array_equal(cdict_add.test_arr, jnp.ones((12, 3)))
npt.assert_array_equal(cdict_add.time, 35.)
npt.assert_(isinstance(cdict_add.test_float, float))
npt.assert_equal(cdict_add.test_float, 3.)
npt.assert_array_equal(self.cdict.test_arr, jnp.ones((10, 3)))
npt.assert_equal(self.cdict.test_float, 3.)
npt.assert_equal(self.cdict.time, 10.)
del self.cdict.time
class TestSampler(unittest.TestCase):
sampler = sample.Sampler(name='test', other=jnp.zeros(2))
def test_init(self):
npt.assert_equal(self.sampler.name, 'test')
npt.assert_(hasattr(self.sampler, 'parameters'))
npt.assert_array_equal(self.sampler.parameters.other, jnp.zeros(2))
def test_copy(self):
sampler2 = self.sampler.deepcopy()
npt.assert_(isinstance(sampler2, sample.Sampler))
sampler2.name = 'other'
npt.assert_equal(self.sampler.name, 'test')
sampler2.parameters.other = 10.
npt.assert_array_equal(self.sampler.parameters.other, jnp.zeros(2))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.testing.assert_array_equal",
"numpy.testing.assert_equal",
"jax.numpy.ones",
"jax.numpy.zeros"
] |
[((3308, 3323), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3321, 3323), False, 'import unittest\n'), ((851, 895), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.test_float', '(3.0)'], {}), '(self.cdict.test_float, 3.0)\n', (867, 895), True, 'import numpy.testing as npt\n'), ((1209, 1249), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cdict2.test_float', '(3.0)'], {}), '(cdict2.test_float, 3.0)\n', (1225, 1249), True, 'import numpy.testing as npt\n'), ((1276, 1288), 'jax.numpy.zeros', 'jnp.zeros', (['(5)'], {}), '(5)\n', (1285, 1288), True, 'import jax.numpy as jnp\n'), ((1400, 1444), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.test_float', '(3.0)'], {}), '(self.cdict.test_float, 3.0)\n', (1416, 1444), True, 'import numpy.testing as npt\n'), ((1771, 1815), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cdict_0get.test_float', '(3.0)'], {}), '(cdict_0get.test_float, 3.0)\n', (1787, 1815), True, 'import numpy.testing as npt\n'), ((2276, 2320), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['cdict_add.time', '(35.0)'], {}), '(cdict_add.time, 35.0)\n', (2298, 2320), True, 'import numpy.testing as npt\n'), ((2390, 2433), 'numpy.testing.assert_equal', 'npt.assert_equal', (['cdict_add.test_float', '(3.0)'], {}), '(cdict_add.test_float, 3.0)\n', (2406, 2433), True, 'import numpy.testing as npt\n'), ((2513, 2557), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.test_float', '(3.0)'], {}), '(self.cdict.test_float, 3.0)\n', (2529, 2557), True, 'import numpy.testing as npt\n'), ((2565, 2604), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.cdict.time', '(10.0)'], {}), '(self.cdict.time, 10.0)\n', (2581, 2604), True, 'import numpy.testing as npt\n'), ((2768, 2811), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.sampler.name', '"""test"""'], {}), "(self.sampler.name, 'test')\n", (2784, 2811), True, 'import numpy.testing as npt\n'), ((3114, 3157), 'numpy.testing.assert_equal', 'npt.assert_equal', (['self.sampler.name', '"""test"""'], {}), "(self.sampler.name, 'test')\n", (3130, 3157), True, 'import numpy.testing as npt\n'), ((580, 597), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (588, 597), True, 'import jax.numpy as jnp\n'), ((768, 785), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (776, 785), True, 'import jax.numpy as jnp\n'), ((1123, 1140), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (1131, 1140), True, 'import jax.numpy as jnp\n'), ((1341, 1358), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (1349, 1358), True, 'import jax.numpy as jnp\n'), ((1687, 1698), 'jax.numpy.ones', 'jnp.ones', (['(3)'], {}), '(3)\n', (1695, 1698), True, 'import jax.numpy as jnp\n'), ((2249, 2266), 'jax.numpy.ones', 'jnp.ones', (['(12, 3)'], {}), '((12, 3))\n', (2257, 2266), True, 'import jax.numpy as jnp\n'), ((2486, 2503), 'jax.numpy.ones', 'jnp.ones', (['(10, 3)'], {}), '((10, 3))\n', (2494, 2503), True, 'import jax.numpy as jnp\n'), ((2720, 2732), 'jax.numpy.zeros', 'jnp.zeros', (['(2)'], {}), '(2)\n', (2729, 2732), True, 'import jax.numpy as jnp\n'), ((2931, 2943), 'jax.numpy.zeros', 'jnp.zeros', (['(2)'], {}), '(2)\n', (2940, 2943), True, 'import jax.numpy as jnp\n'), ((3261, 3273), 'jax.numpy.zeros', 'jnp.zeros', (['(2)'], {}), '(2)\n', (3270, 3273), True, 'import jax.numpy as jnp\n'), ((1886, 1902), 'jax.numpy.ones', 'jnp.ones', (['(2, 3)'], {}), '((2, 3))\n', (1894, 1902), True, 'import jax.numpy as jnp\n')]
|
import numpy as np
import diversipy
def test_distance_to_boundary():
points = np.array([[0.1, 0.2], [0.3, 0.9]])
np.testing.assert_almost_equal(
diversipy.distance.distance_to_boundary(points), np.array([0.1, 0.1])
)
np.testing.assert_almost_equal(
diversipy.distance.distance_to_boundary(points, cuboid=((-1, -1), (2, 2))),
np.array([1.1, 1.1]),
)
def test_distance_matrix():
points1 = np.array([[0.1, 0.2], [0.3, 0.9], [0.6, 0.1]])
points2 = np.array([[0.2, 0.2]])
# test L1 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=1),
[[0.1], [0.1 + 0.7], [0.4 + 0.1]],
)
# test L2 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=2),
[[0.1], [(0.1 ** 2 + 0.7 ** 2) ** 0.5], [(0.4 ** 2 + 0.1 ** 2) ** 0.5]],
)
# test toridal L1 distance
np.testing.assert_almost_equal(
diversipy.distance.distance_matrix(points1, points2, norm=1, max_dist=[1, 1]),
[[0.1], [0.1 + (1 - 0.7)], [0.4 + 0.1]],
)
|
[
"diversipy.distance.distance_to_boundary",
"numpy.array",
"diversipy.distance.distance_matrix"
] |
[((84, 118), 'numpy.array', 'np.array', (['[[0.1, 0.2], [0.3, 0.9]]'], {}), '([[0.1, 0.2], [0.3, 0.9]])\n', (92, 118), True, 'import numpy as np\n'), ((439, 485), 'numpy.array', 'np.array', (['[[0.1, 0.2], [0.3, 0.9], [0.6, 0.1]]'], {}), '([[0.1, 0.2], [0.3, 0.9], [0.6, 0.1]])\n', (447, 485), True, 'import numpy as np\n'), ((500, 522), 'numpy.array', 'np.array', (['[[0.2, 0.2]]'], {}), '([[0.2, 0.2]])\n', (508, 522), True, 'import numpy as np\n'), ((163, 210), 'diversipy.distance.distance_to_boundary', 'diversipy.distance.distance_to_boundary', (['points'], {}), '(points)\n', (202, 210), False, 'import diversipy\n'), ((212, 232), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (220, 232), True, 'import numpy as np\n'), ((283, 357), 'diversipy.distance.distance_to_boundary', 'diversipy.distance.distance_to_boundary', (['points'], {'cuboid': '((-1, -1), (2, 2))'}), '(points, cuboid=((-1, -1), (2, 2)))\n', (322, 357), False, 'import diversipy\n'), ((367, 387), 'numpy.array', 'np.array', (['[1.1, 1.1]'], {}), '([1.1, 1.1])\n', (375, 387), True, 'import numpy as np\n'), ((590, 650), 'diversipy.distance.distance_matrix', 'diversipy.distance.distance_matrix', (['points1', 'points2'], {'norm': '(1)'}), '(points1, points2, norm=1)\n', (624, 650), False, 'import diversipy\n'), ((768, 828), 'diversipy.distance.distance_matrix', 'diversipy.distance.distance_matrix', (['points1', 'points2'], {'norm': '(2)'}), '(points1, points2, norm=2)\n', (802, 828), False, 'import diversipy\n'), ((992, 1069), 'diversipy.distance.distance_matrix', 'diversipy.distance.distance_matrix', (['points1', 'points2'], {'norm': '(1)', 'max_dist': '[1, 1]'}), '(points1, points2, norm=1, max_dist=[1, 1])\n', (1026, 1069), False, 'import diversipy\n')]
|
import numpy as np
import os
import configparser
import tensorflow as tf
from pkg_resources import resource_filename
from pyniel.python_tools.path_tools import make_dir_if_not_exists
import crowd_sim # adds CrowdSim-v0 to gym # noqa
from crowd_sim.envs.crowd_sim import CrowdSim # reference to env code # noqa
from crowd_sim.envs.utils.robot import Robot # next line fails otherwise # noqa
from crowd_nav.policy.network_om import SDOADRL
from crowd_sim.envs.utils.state import JointState, FullState, ObservableState
from crowd_sim.envs.utils.action import ActionRot
from navrep.scripts.cross_test_navreptrain_in_ianenv import run_test_episodes
from navrep.tools.commonargs import parse_common_args
from navrep.envs.ianenv import IANEnv
TODO = None
class LuciaRawPolicy(object):
""" legacy SOADRL policy from lucia's paper, takes in agents state, local map
The problem is that in the original implementation, policy and environment are intertwined.
this class goes further into separating them by reimplementing methods from
agents.py, robots.py """
def __init__(self):
self._make_policy()
def _make_policy(self):
# Config
config_dir = resource_filename('crowd_nav', 'config')
config_file = os.path.join(config_dir, 'test_soadrl_static.config')
config = configparser.RawConfigParser()
config.read(config_file)
sess = tf.Session()
policy = SDOADRL()
policy.configure(sess, 'global', config)
policy.set_phase('test')
self.model_path = os.path.expanduser('~/soadrl/Final_models/angular_map_full_FOV/rl_model')
policy.load_model(self.model_path)
self.policy = policy
def act(self, obs):
robot_state, humans_state, local_map = obs
state = JointState(robot_state, humans_state)
action = self.policy.predict(state, local_map, None)
action = ActionRot(robot_state.v_pref * action.v, action.r) # de-normalize
return action
class IANEnvWithLegacySOADRLObs(object):
def __init__(self,
silent=False, max_episode_length=1000, collect_trajectories=False):
# Get lidar values from the SOADRL config
config_dir = resource_filename('crowd_nav', 'config')
config_file = os.path.join(config_dir, 'test_soadrl_static.config')
config = configparser.RawConfigParser()
config.read(config_file)
self.v_pref = config.getfloat('humans', 'v_pref')
# lidar scan expected by SOADRL
self.angular_map_max_range = config.getfloat('map', 'angular_map_max_range')
self.angular_map_dim = config.getint('map', 'angular_map_dim')
self.angular_map_min_angle = config.getfloat('map', 'angle_min') * np.pi
self.angular_map_max_angle = config.getfloat('map', 'angle_max') * np.pi
self.angular_map_angle_increment = (
self.angular_map_max_angle - self.angular_map_min_angle) / self.angular_map_dim
self.lidar_upsampling = 15
# create env
self.env = IANEnv(
silent=silent, max_episode_length=max_episode_length, collect_trajectories=collect_trajectories)
self.reset()
def reset(self):
""" IANEnv destroys and re-creates its iarlenv at every reset, so apply our changes here """
self.env.reset()
# we raytrace at a higher resolution, then downsample back to the original soadrl resolution
# this avoids missing small obstacles due to the small soadrl resolution
self.env.iarlenv.rlenv.virtual_peppers[0].kLidarMergedMaxAngle = self.angular_map_max_angle
self.env.iarlenv.rlenv.virtual_peppers[0].kLidarMergedMinAngle = self.angular_map_min_angle
self.env.iarlenv.rlenv.virtual_peppers[0].kLidarAngleIncrement = \
self.angular_map_angle_increment / self.lidar_upsampling
self.env.iarlenv.rlenv.kMergedScanSize = self.angular_map_dim * self.lidar_upsampling
self.episode_statistics = self.env.episode_statistics
obs, _, _, _ = self.step(ActionRot(0.,0.))
return obs
def step(self, action):
# convert lucia action to IANEnv action
ianenv_action = np.array([0., 0., 0.])
# SOADRL - rotation is dtheta
# IAN - rotation is dtheta/dt
ianenv_action[2] = action.r / self.env._get_dt()
# SOADRL - instant rot, then vel
# IAN - vel, then rot
action_vy = 0. # SOADRL outputs non-holonomic by default
ianenv_action[0] = action.v * np.cos(action.r) - action_vy * np.sin(action.r)
ianenv_action[1] = action.v * np.sin(action.r) + action_vy * np.cos(action.r)
# get obs from IANEnv
obs, rew, done, info = self.env.step(ianenv_action)
# convert to SOADRL style
robot_state = FullState(
self.env.iarlenv.rlenv.virtual_peppers[0].pos[0],
self.env.iarlenv.rlenv.virtual_peppers[0].pos[1],
self.env.iarlenv.rlenv.virtual_peppers[0].vel[0],
self.env.iarlenv.rlenv.virtual_peppers[0].vel[1],
self.env.iarlenv.rlenv.vp_radii[0],
self.env.iarlenv.rlenv.agent_goals[0][0],
self.env.iarlenv.rlenv.agent_goals[0][1],
self.v_pref,
self.env.iarlenv.rlenv.virtual_peppers[0].pos[2],)
humans_state = [ObservableState(
human.pos[0],
human.pos[1],
human.vel[0],
human.vel[1],
r,) for human, r in zip(
self.env.iarlenv.rlenv.virtual_peppers[1:], self.env.iarlenv.rlenv.vp_radii[1:])]
scan = obs[0]
# for each angular section we take the min of the returns
downsampled_scan = scan.reshape((-1, self.lidar_upsampling))
downsampled_scan = np.min(downsampled_scan, axis=1)
self.last_downsampled_scan = downsampled_scan
local_map = np.clip(downsampled_scan / self.angular_map_max_range, 0., 1.)
obs = (robot_state, humans_state, local_map)
return obs, rew, done, info
def _get_dt(self):
return self.env._get_dt()
def render(self, *args, **kwargs):
_, lidar_angles = self.env.iarlenv.rlenv.virtual_peppers[0].get_lidar_update_ijangles(
"merged", self.env.iarlenv.rlenv.kMergedScanSize
)
lidar_angles_downsampled = lidar_angles[::self.lidar_upsampling]
kwargs["lidar_angles_override"] = lidar_angles_downsampled
kwargs["lidar_scan_override"] = self.last_downsampled_scan
return self.env.render(*args, **kwargs)
if __name__ == '__main__':
args, _ = parse_common_args()
if args.n is None:
args.n = 1000
collect_trajectories = False
env = IANEnvWithLegacySOADRLObs(silent=True, collect_trajectories=collect_trajectories)
policy = LuciaRawPolicy()
S = run_test_episodes(env, policy, render=args.render, num_episodes=args.n)
DIR = os.path.expanduser("~/navrep/eval/crosstest")
if args.dry_run:
DIR = "/tmp/navrep/eval/crosstest"
make_dir_if_not_exists(DIR)
if collect_trajectories:
NAME = "lucianavreptrain_in_ianenv_{}.pckl".format(len(S))
PATH = os.path.join(DIR, NAME)
S.to_pickle(PATH)
else:
NAME = "lucianavreptrain_in_ianenv_{}.csv".format(len(S))
PATH = os.path.join(DIR, NAME)
S.to_csv(PATH)
print("{} written.".format(PATH))
|
[
"pkg_resources.resource_filename",
"numpy.clip",
"numpy.sin",
"crowd_sim.envs.utils.action.ActionRot",
"os.path.join",
"pyniel.python_tools.path_tools.make_dir_if_not_exists",
"configparser.RawConfigParser",
"navrep.envs.ianenv.IANEnv",
"crowd_nav.policy.network_om.SDOADRL",
"crowd_sim.envs.utils.state.JointState",
"navrep.scripts.cross_test_navreptrain_in_ianenv.run_test_episodes",
"navrep.tools.commonargs.parse_common_args",
"tensorflow.Session",
"numpy.min",
"crowd_sim.envs.utils.state.ObservableState",
"numpy.cos",
"crowd_sim.envs.utils.state.FullState",
"numpy.array",
"os.path.expanduser"
] |
[((6596, 6615), 'navrep.tools.commonargs.parse_common_args', 'parse_common_args', ([], {}), '()\n', (6613, 6615), False, 'from navrep.tools.commonargs import parse_common_args\n'), ((6827, 6898), 'navrep.scripts.cross_test_navreptrain_in_ianenv.run_test_episodes', 'run_test_episodes', (['env', 'policy'], {'render': 'args.render', 'num_episodes': 'args.n'}), '(env, policy, render=args.render, num_episodes=args.n)\n', (6844, 6898), False, 'from navrep.scripts.cross_test_navreptrain_in_ianenv import run_test_episodes\n'), ((6910, 6955), 'os.path.expanduser', 'os.path.expanduser', (['"""~/navrep/eval/crosstest"""'], {}), "('~/navrep/eval/crosstest')\n", (6928, 6955), False, 'import os\n'), ((7024, 7051), 'pyniel.python_tools.path_tools.make_dir_if_not_exists', 'make_dir_if_not_exists', (['DIR'], {}), '(DIR)\n', (7046, 7051), False, 'from pyniel.python_tools.path_tools import make_dir_if_not_exists\n'), ((1194, 1234), 'pkg_resources.resource_filename', 'resource_filename', (['"""crowd_nav"""', '"""config"""'], {}), "('crowd_nav', 'config')\n", (1211, 1234), False, 'from pkg_resources import resource_filename\n'), ((1257, 1310), 'os.path.join', 'os.path.join', (['config_dir', '"""test_soadrl_static.config"""'], {}), "(config_dir, 'test_soadrl_static.config')\n", (1269, 1310), False, 'import os\n'), ((1328, 1358), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (1356, 1358), False, 'import configparser\n'), ((1408, 1420), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1418, 1420), True, 'import tensorflow as tf\n'), ((1438, 1447), 'crowd_nav.policy.network_om.SDOADRL', 'SDOADRL', ([], {}), '()\n', (1445, 1447), False, 'from crowd_nav.policy.network_om import SDOADRL\n'), ((1556, 1629), 'os.path.expanduser', 'os.path.expanduser', (['"""~/soadrl/Final_models/angular_map_full_FOV/rl_model"""'], {}), "('~/soadrl/Final_models/angular_map_full_FOV/rl_model')\n", (1574, 1629), False, 'import os\n'), ((1795, 1832), 'crowd_sim.envs.utils.state.JointState', 'JointState', (['robot_state', 'humans_state'], {}), '(robot_state, humans_state)\n', (1805, 1832), False, 'from crowd_sim.envs.utils.state import JointState, FullState, ObservableState\n'), ((1911, 1961), 'crowd_sim.envs.utils.action.ActionRot', 'ActionRot', (['(robot_state.v_pref * action.v)', 'action.r'], {}), '(robot_state.v_pref * action.v, action.r)\n', (1920, 1961), False, 'from crowd_sim.envs.utils.action import ActionRot\n'), ((2221, 2261), 'pkg_resources.resource_filename', 'resource_filename', (['"""crowd_nav"""', '"""config"""'], {}), "('crowd_nav', 'config')\n", (2238, 2261), False, 'from pkg_resources import resource_filename\n'), ((2284, 2337), 'os.path.join', 'os.path.join', (['config_dir', '"""test_soadrl_static.config"""'], {}), "(config_dir, 'test_soadrl_static.config')\n", (2296, 2337), False, 'import os\n'), ((2355, 2385), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (2383, 2385), False, 'import configparser\n'), ((3048, 3155), 'navrep.envs.ianenv.IANEnv', 'IANEnv', ([], {'silent': 'silent', 'max_episode_length': 'max_episode_length', 'collect_trajectories': 'collect_trajectories'}), '(silent=silent, max_episode_length=max_episode_length,\n collect_trajectories=collect_trajectories)\n', (3054, 3155), False, 'from navrep.envs.ianenv import IANEnv\n'), ((4190, 4215), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4198, 4215), True, 'import numpy as np\n'), ((4808, 5225), 'crowd_sim.envs.utils.state.FullState', 'FullState', (['self.env.iarlenv.rlenv.virtual_peppers[0].pos[0]', 'self.env.iarlenv.rlenv.virtual_peppers[0].pos[1]', 'self.env.iarlenv.rlenv.virtual_peppers[0].vel[0]', 'self.env.iarlenv.rlenv.virtual_peppers[0].vel[1]', 'self.env.iarlenv.rlenv.vp_radii[0]', 'self.env.iarlenv.rlenv.agent_goals[0][0]', 'self.env.iarlenv.rlenv.agent_goals[0][1]', 'self.v_pref', 'self.env.iarlenv.rlenv.virtual_peppers[0].pos[2]'], {}), '(self.env.iarlenv.rlenv.virtual_peppers[0].pos[0], self.env.\n iarlenv.rlenv.virtual_peppers[0].pos[1], self.env.iarlenv.rlenv.\n virtual_peppers[0].vel[0], self.env.iarlenv.rlenv.virtual_peppers[0].\n vel[1], self.env.iarlenv.rlenv.vp_radii[0], self.env.iarlenv.rlenv.\n agent_goals[0][0], self.env.iarlenv.rlenv.agent_goals[0][1], self.\n v_pref, self.env.iarlenv.rlenv.virtual_peppers[0].pos[2])\n', (4817, 5225), False, 'from crowd_sim.envs.utils.state import JointState, FullState, ObservableState\n'), ((5775, 5807), 'numpy.min', 'np.min', (['downsampled_scan'], {'axis': '(1)'}), '(downsampled_scan, axis=1)\n', (5781, 5807), True, 'import numpy as np\n'), ((5882, 5946), 'numpy.clip', 'np.clip', (['(downsampled_scan / self.angular_map_max_range)', '(0.0)', '(1.0)'], {}), '(downsampled_scan / self.angular_map_max_range, 0.0, 1.0)\n', (5889, 5946), True, 'import numpy as np\n'), ((7163, 7186), 'os.path.join', 'os.path.join', (['DIR', 'NAME'], {}), '(DIR, NAME)\n', (7175, 7186), False, 'import os\n'), ((7304, 7327), 'os.path.join', 'os.path.join', (['DIR', 'NAME'], {}), '(DIR, NAME)\n', (7316, 7327), False, 'import os\n'), ((4052, 4071), 'crowd_sim.envs.utils.action.ActionRot', 'ActionRot', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (4061, 4071), False, 'from crowd_sim.envs.utils.action import ActionRot\n'), ((5335, 5409), 'crowd_sim.envs.utils.state.ObservableState', 'ObservableState', (['human.pos[0]', 'human.pos[1]', 'human.vel[0]', 'human.vel[1]', 'r'], {}), '(human.pos[0], human.pos[1], human.vel[0], human.vel[1], r)\n', (5350, 5409), False, 'from crowd_sim.envs.utils.state import JointState, FullState, ObservableState\n'), ((4528, 4544), 'numpy.cos', 'np.cos', (['action.r'], {}), '(action.r)\n', (4534, 4544), True, 'import numpy as np\n'), ((4559, 4575), 'numpy.sin', 'np.sin', (['action.r'], {}), '(action.r)\n', (4565, 4575), True, 'import numpy as np\n'), ((4614, 4630), 'numpy.sin', 'np.sin', (['action.r'], {}), '(action.r)\n', (4620, 4630), True, 'import numpy as np\n'), ((4645, 4661), 'numpy.cos', 'np.cos', (['action.r'], {}), '(action.r)\n', (4651, 4661), True, 'import numpy as np\n')]
|
from collections import OrderedDict
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.nn as nn
from seaborn.palettes import color_palette
import numpy as np
# import seaborn as sns
import torch
import os
from BatchTransNorm import BatchTransNorm2d
from datasets import (Chest_few_shot, CropDisease_few_shot, EuroSAT_few_shot,
ISIC_few_shot, miniImageNet_few_shot)
def get_visual_domain(BN_list, dataloader_list, dataset_names_list):
label_dataset = []
spatial_mean = []
spatial_var = []
with torch.no_grad():
for i, loader in enumerate(dataloader_list):
# loader_iter = iter(loader)
# x, _ = loader_iter.next()
for x, _ in loader:
out = BN_list[i](x)
spatial_mean += out.mean([2, 3]).tolist()
spatial_var += out.var([2, 3]).tolist()
label_dataset += [dataset_names_list[i]]*len(x)
break
return np.array(spatial_mean), np.array(spatial_var), label_dataset
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
dataset_class_list = [miniImageNet_few_shot, EuroSAT_few_shot]#, CropDisease_few_shot, Chest_few_shot, ISIC_few_shot]
dataset_names_list = ['miniImageNet', 'EuroSAT',
'CropDisease', 'ChestX', 'ISIC']
dataloader_list = []
for i, dataset_class in enumerate(dataset_class_list):
transform = dataset_class.TransformLoader(
224).get_composed_transform(aug=True)
transform_test = dataset_class.TransformLoader(
224).get_composed_transform(aug=False)
# split = 'datasets/split_seed_1/{0}_labeled_20.csv'.format(
# dataset_names_list[i])
# if dataset_names_list[i] == 'miniImageNet':
split = None
dataset = dataset_class.SimpleDataset(
transform, split=split)
loader = torch.utils.data.DataLoader(dataset, batch_size=128,
num_workers=0,
shuffle=True, drop_last=True)
dataloader_list.append(loader)
BN_list = []
btn = BatchTransNorm2d(num_features=3)
with torch.no_grad():
for i, loader in enumerate(dataloader_list):
BN_list.append(nn.BatchNorm2d(num_features=3))
BN_list[-1].train()
for epoch in range(3): # number of epoch
for x, _ in loader:
BN_list[-1](x)
# break
print('dataset {0} epoch {1}'.format(dataset_names_list[i], epoch))
btn.load_state_dict(BN_list[0].state_dict())
vd_mean, vd_var, labels = get_visual_domain(BN_list, dataloader_list, dataset_names_list)
tvd_mean, tvd_var, labels = get_visual_domain([btn]*len(BN_list), dataloader_list, dataset_names_list)
color = sns.color_palette(n_colors=len(dataloader_list))
fig = plt.figure(figsize=(20, 10))
ax = fig.subplots(1,2)
sns.kdeplot(x=vd_mean[:, 0], y=vd_var[:, 0],
hue=labels, ax=ax[0], palette=color)
sns.kdeplot(x=tvd_mean[:, 0], y=tvd_var[:, 0],
hue=labels, ax=ax[1], palette=color)
title = 'Left visual domain, Right transnormed visual domain.'
fig.suptitle(title)
plt.savefig('./lab/visual_domain/{0}.png'.format(title))
plt.savefig('./lab/visual_domain/{0}.svg'.format(title))
print(title)
|
[
"seaborn.kdeplot",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.figure",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"numpy.array",
"BatchTransNorm.BatchTransNorm2d",
"torch.device",
"torch.no_grad"
] |
[((1082, 1107), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1105, 1107), False, 'import torch\n'), ((1159, 1176), 'torch.device', 'torch.device', (['dev'], {}), '(dev)\n', (1171, 1176), False, 'import torch\n'), ((2159, 2191), 'BatchTransNorm.BatchTransNorm2d', 'BatchTransNorm2d', ([], {'num_features': '(3)'}), '(num_features=3)\n', (2175, 2191), False, 'from BatchTransNorm import BatchTransNorm2d\n'), ((2871, 2899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2881, 2899), True, 'import matplotlib.pyplot as plt\n'), ((2927, 3013), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'vd_mean[:, 0]', 'y': 'vd_var[:, 0]', 'hue': 'labels', 'ax': 'ax[0]', 'palette': 'color'}), '(x=vd_mean[:, 0], y=vd_var[:, 0], hue=labels, ax=ax[0], palette=\n color)\n', (2938, 3013), True, 'import seaborn as sns\n'), ((3022, 3109), 'seaborn.kdeplot', 'sns.kdeplot', ([], {'x': 'tvd_mean[:, 0]', 'y': 'tvd_var[:, 0]', 'hue': 'labels', 'ax': 'ax[1]', 'palette': 'color'}), '(x=tvd_mean[:, 0], y=tvd_var[:, 0], hue=labels, ax=ax[1],\n palette=color)\n', (3033, 3109), True, 'import seaborn as sns\n'), ((1924, 2026), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(128)', 'num_workers': '(0)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset, batch_size=128, num_workers=0, shuffle\n =True, drop_last=True)\n', (1951, 2026), False, 'import torch\n'), ((2197, 2212), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2210, 2212), False, 'import torch\n'), ((566, 581), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (579, 581), False, 'import torch\n'), ((1016, 1038), 'numpy.array', 'np.array', (['spatial_mean'], {}), '(spatial_mean)\n', (1024, 1038), True, 'import numpy as np\n'), ((1040, 1061), 'numpy.array', 'np.array', (['spatial_var'], {}), '(spatial_var)\n', (1048, 1061), True, 'import numpy as np\n'), ((2290, 2320), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': '(3)'}), '(num_features=3)\n', (2304, 2320), True, 'import torch.nn as nn\n')]
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Entry/exit point for pulse simulation specified through PulseSimulator backend
"""
from warnings import warn
import numpy as np
from ..system_models.string_model_parser.string_model_parser import NoiseParser
from ..qutip_extra_lite import qobj_generators as qobj_gen
from .digest_pulse_qobj import digest_pulse_qobj
from ..de_solvers.pulse_de_options import OPoptions
from .unitary_controller import run_unitary_experiments
from .mc_controller import run_monte_carlo_experiments
def pulse_controller(qobj, system_model, backend_options):
""" Interprets PulseQobj input, runs simulations, and returns results
Parameters:
qobj (qobj): pulse qobj containing a list of pulse schedules
system_model (PulseSystemModel): contains system model information
backend_options (dict): dict of options, which overrides other parameters
Returns:
list: simulation results
Raises:
ValueError: if input is of incorrect format
Exception: for invalid ODE options
"""
pulse_sim_desc = PulseSimDescription()
if backend_options is None:
backend_options = {}
noise_model = backend_options.get('noise_model', None)
# post warnings for unsupported features
_unsupported_warnings(noise_model)
# ###############################
# ### Extract model parameters
# ###############################
# Get qubit list and number
qubit_list = system_model.subsystem_list
if qubit_list is None:
raise ValueError('Model must have a qubit list to simulate.')
n_qubits = len(qubit_list)
# get Hamiltonian
if system_model.hamiltonian is None:
raise ValueError('Model must have a Hamiltonian to simulate.')
ham_model = system_model.hamiltonian
# For now we dump this into OpSystem, though that should be refactored
pulse_sim_desc.system = ham_model._system
pulse_sim_desc.vars = ham_model._variables
pulse_sim_desc.channels = ham_model._channels
pulse_sim_desc.h_diag = ham_model._h_diag
pulse_sim_desc.evals = ham_model._evals
pulse_sim_desc.estates = ham_model._estates
dim_qub = ham_model._subsystem_dims
dim_osc = {}
# convert estates into a Qutip qobj
estates = [qobj_gen.state(state) for state in ham_model._estates.T[:]]
pulse_sim_desc.initial_state = estates[0]
pulse_sim_desc.global_data['vars'] = list(pulse_sim_desc.vars.values())
# Need this info for evaluating the hamiltonian vars in the c++ solver
pulse_sim_desc.global_data['vars_names'] = list(pulse_sim_desc.vars.keys())
# Get dt
if system_model.dt is None:
raise ValueError('Qobj must have a dt value to simulate.')
pulse_sim_desc.dt = system_model.dt
# Parse noise
if noise_model:
noise = NoiseParser(noise_dict=noise_model, dim_osc=dim_osc, dim_qub=dim_qub)
noise.parse()
pulse_sim_desc.noise = noise.compiled
if any(pulse_sim_desc.noise):
pulse_sim_desc.can_sample = False
# ###############################
# ### Parse qobj_config settings
# ###############################
digested_qobj = digest_pulse_qobj(qobj,
pulse_sim_desc.channels,
pulse_sim_desc.dt,
qubit_list,
backend_options)
# does this even need to be extracted here, or can the relevant info just be passed to the
# relevant functions?
pulse_sim_desc.global_data['shots'] = digested_qobj.shots
pulse_sim_desc.global_data['meas_level'] = digested_qobj.meas_level
pulse_sim_desc.global_data['meas_return'] = digested_qobj.meas_return
pulse_sim_desc.global_data['memory_slots'] = digested_qobj.memory_slots
pulse_sim_desc.global_data['memory'] = digested_qobj.memory
pulse_sim_desc.global_data['n_registers'] = digested_qobj.n_registers
pulse_sim_desc.global_data['pulse_array'] = digested_qobj.pulse_array
pulse_sim_desc.global_data['pulse_indices'] = digested_qobj.pulse_indices
pulse_sim_desc.pulse_to_int = digested_qobj.pulse_to_int
pulse_sim_desc.experiments = digested_qobj.experiments
# Handle qubit_lo_freq
qubit_lo_freq = digested_qobj.qubit_lo_freq
# if it wasn't specified in the PulseQobj, draw from system_model
if qubit_lo_freq is None:
qubit_lo_freq = system_model._qubit_freq_est
# if still None draw from the Hamiltonian
if qubit_lo_freq is None:
qubit_lo_freq = system_model.hamiltonian.get_qubit_lo_from_drift()
warn('Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, ' +
'so it is beign automatically determined from the drift Hamiltonian.')
pulse_sim_desc.freqs = system_model.calculate_channel_frequencies(qubit_lo_freq=qubit_lo_freq)
pulse_sim_desc.global_data['freqs'] = list(pulse_sim_desc.freqs.values())
# ###############################
# ### Parse backend_options
# # solver-specific information should be extracted in the solver
# ###############################
pulse_sim_desc.global_data['seed'] = (int(backend_options['seed']) if 'seed' in backend_options
else None)
pulse_sim_desc.global_data['q_level_meas'] = int(backend_options.get('q_level_meas', 1))
# solver options
allowed_ode_options = ['atol', 'rtol', 'nsteps', 'max_step',
'num_cpus', 'norm_tol', 'norm_steps',
'rhs_reuse', 'rhs_filename']
ode_options = backend_options.get('ode_options', {})
for key in ode_options:
if key not in allowed_ode_options:
raise Exception('Invalid ode_option: {}'.format(key))
pulse_sim_desc.ode_options = OPoptions(**ode_options)
# Set the ODE solver max step to be the half the
# width of the smallest pulse
min_width = np.iinfo(np.int32).max
for key, val in pulse_sim_desc.pulse_to_int.items():
if key != 'pv':
stop = pulse_sim_desc.global_data['pulse_indices'][val + 1]
start = pulse_sim_desc.global_data['pulse_indices'][val]
min_width = min(min_width, stop - start)
pulse_sim_desc.ode_options.max_step = min_width / 2 * pulse_sim_desc.dt
# ########################################
# Determination of measurement operators.
# ########################################
pulse_sim_desc.global_data['measurement_ops'] = [None] * n_qubits
for exp in pulse_sim_desc.experiments:
# Add in measurement operators
# Not sure if this will work for multiple measurements
# Note: the extraction of multiple measurements works, but the simulator itself
# implicitly assumes there is only one measurement at the end
if any(exp['acquire']):
for acq in exp['acquire']:
for jj in acq[1]:
if jj > qubit_list[-1]:
continue
if not pulse_sim_desc.global_data['measurement_ops'][qubit_list.index(jj)]:
q_level_meas = pulse_sim_desc.global_data['q_level_meas']
pulse_sim_desc.global_data['measurement_ops'][qubit_list.index(jj)] = \
qobj_gen.qubit_occ_oper_dressed(jj,
estates,
h_osc=dim_osc,
h_qub=dim_qub,
level=q_level_meas
)
if not exp['can_sample']:
pulse_sim_desc.can_sample = False
op_data_config(pulse_sim_desc)
run_experiments = (run_unitary_experiments if pulse_sim_desc.can_sample
else run_monte_carlo_experiments)
exp_results, exp_times = run_experiments(pulse_sim_desc)
return format_exp_results(exp_results, exp_times, pulse_sim_desc)
def op_data_config(op_system):
""" Preps the data for the opsolver.
This should eventually be replaced by functions that construct different types of DEs
in standard formats
Everything is stored in the passed op_system.
Args:
op_system (OPSystem): An openpulse system.
"""
num_h_terms = len(op_system.system)
H = [hpart[0] for hpart in op_system.system]
op_system.global_data['num_h_terms'] = num_h_terms
# take care of collapse operators, if any
op_system.global_data['c_num'] = 0
if op_system.noise:
op_system.global_data['c_num'] = len(op_system.noise)
op_system.global_data['num_h_terms'] += 1
op_system.global_data['c_ops_data'] = []
op_system.global_data['c_ops_ind'] = []
op_system.global_data['c_ops_ptr'] = []
op_system.global_data['n_ops_data'] = []
op_system.global_data['n_ops_ind'] = []
op_system.global_data['n_ops_ptr'] = []
op_system.global_data['h_diag_elems'] = op_system.h_diag
# if there are any collapse operators
H_noise = 0
for kk in range(op_system.global_data['c_num']):
c_op = op_system.noise[kk]
n_op = c_op.dag() * c_op
# collapse ops
op_system.global_data['c_ops_data'].append(c_op.data.data)
op_system.global_data['c_ops_ind'].append(c_op.data.indices)
op_system.global_data['c_ops_ptr'].append(c_op.data.indptr)
# norm ops
op_system.global_data['n_ops_data'].append(n_op.data.data)
op_system.global_data['n_ops_ind'].append(n_op.data.indices)
op_system.global_data['n_ops_ptr'].append(n_op.data.indptr)
# Norm ops added to time-independent part of
# Hamiltonian to decrease norm
H_noise -= 0.5j * n_op
if H_noise:
H = H + [H_noise]
# construct data sets
op_system.global_data['h_ops_data'] = [-1.0j * hpart.data.data
for hpart in H]
op_system.global_data['h_ops_ind'] = [hpart.data.indices for hpart in H]
op_system.global_data['h_ops_ptr'] = [hpart.data.indptr for hpart in H]
# Convert inital state to flat array in global_data
op_system.global_data['initial_state'] = \
op_system.initial_state.full().ravel()
def format_exp_results(exp_results, exp_times, op_system):
""" format simulation results
Parameters:
exp_results (list): simulation results
exp_times (list): simulation times
op_system (PulseSimDescription): object containing all simulation information
Returns:
list: formatted simulation results
"""
# format the data into the proper output
all_results = []
for idx_exp, exp in enumerate(op_system.experiments):
m_lev = op_system.global_data['meas_level']
m_ret = op_system.global_data['meas_return']
# populate the results dictionary
results = {'seed_simulator': exp['seed'],
'shots': op_system.global_data['shots'],
'status': 'DONE',
'success': True,
'time_taken': exp_times[idx_exp],
'header': exp['header'],
'meas_level': m_lev,
'meas_return': m_ret,
'data': {}}
if op_system.can_sample:
memory = exp_results[idx_exp][0]
results['data']['statevector'] = []
for coef in exp_results[idx_exp][1]:
results['data']['statevector'].append([np.real(coef),
np.imag(coef)])
results['header']['ode_t'] = exp_results[idx_exp][2]
else:
memory = exp_results[idx_exp]
# meas_level 2 return the shots
if m_lev == 2:
# convert the memory **array** into a n
# integer
# e.g. [1,0] -> 2
int_mem = memory.dot(np.power(2.0,
np.arange(memory.shape[1]))).astype(int)
# if the memory flag is set return each shot
if op_system.global_data['memory']:
hex_mem = [hex(val) for val in int_mem]
results['data']['memory'] = hex_mem
# Get hex counts dict
unique = np.unique(int_mem, return_counts=True)
hex_dict = {}
for kk in range(unique[0].shape[0]):
key = hex(unique[0][kk])
hex_dict[key] = unique[1][kk]
results['data']['counts'] = hex_dict
# meas_level 1 returns the <n>
elif m_lev == 1:
if m_ret == 'avg':
memory = [np.mean(memory, 0)]
# convert into the right [real, complex] pair form for json
# this should be cython?
results['data']['memory'] = []
for mem_shot in memory:
results['data']['memory'].append([])
for mem_slot in mem_shot:
results['data']['memory'][-1].append(
[np.real(mem_slot), np.imag(mem_slot)])
if m_ret == 'avg':
results['data']['memory'] = results['data']['memory'][0]
all_results.append(results)
return all_results
def _unsupported_warnings(noise_model):
""" Warns the user about untested/unsupported features.
Parameters:
noise_model (dict): backend_options for simulation
Returns:
Raises:
AerError: for unsupported features
"""
# Warnings that don't stop execution
warning_str = '{} are an untested feature, and therefore may not behave as expected.'
if noise_model is not None:
warn(warning_str.format('Noise models'))
class PulseSimDescription():
""" Object for holding any/all information required for simulation.
Needs to be refactored into different pieces.
"""
def __init__(self):
# The system Hamiltonian in numerical format
self.system = None
# The noise (if any) in numerical format
self.noise = None
# System variables
self.vars = None
# The initial state of the system
self.initial_state = None
# Channels in the Hamiltonian string
# these tell the order in which the channels
# are evaluated in the RHS solver.
self.channels = None
# options of the ODE solver
self.ode_options = None
# time between pulse sample points.
self.dt = None
# Array containing all pulse samples
self.pulse_array = None
# Array of indices indicating where a pulse starts in the self.pulse_array
self.pulse_indices = None
# A dict that translates pulse names to integers for use in self.pulse_indices
self.pulse_to_int = None
# Holds the parsed experiments
self.experiments = []
# Can experiments be simulated once then sampled
self.can_sample = True
# holds global data
self.global_data = {}
# holds frequencies for the channels
self.freqs = {}
# diagonal elements of the hamiltonian
self.h_diag = None
# eigenvalues of the time-independent hamiltonian
self.evals = None
# eigenstates of the time-independent hamiltonian
self.estates = None
|
[
"numpy.iinfo",
"numpy.imag",
"numpy.mean",
"numpy.arange",
"numpy.real",
"warnings.warn",
"numpy.unique"
] |
[((5144, 5312), 'warnings.warn', 'warn', (["('Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, '\n + 'so it is beign automatically determined from the drift Hamiltonian.')"], {}), "(\n 'Warning: qubit_lo_freq was not specified in PulseQobj or in PulseSystemModel, '\n + 'so it is beign automatically determined from the drift Hamiltonian.')\n", (5148, 5312), False, 'from warnings import warn\n'), ((6483, 6501), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (6491, 6501), True, 'import numpy as np\n'), ((12920, 12958), 'numpy.unique', 'np.unique', (['int_mem'], {'return_counts': '(True)'}), '(int_mem, return_counts=True)\n', (12929, 12958), True, 'import numpy as np\n'), ((12145, 12158), 'numpy.real', 'np.real', (['coef'], {}), '(coef)\n', (12152, 12158), True, 'import numpy as np\n'), ((12215, 12228), 'numpy.imag', 'np.imag', (['coef'], {}), '(coef)\n', (12222, 12228), True, 'import numpy as np\n'), ((13294, 13312), 'numpy.mean', 'np.mean', (['memory', '(0)'], {}), '(memory, 0)\n', (13301, 13312), True, 'import numpy as np\n'), ((12609, 12635), 'numpy.arange', 'np.arange', (['memory.shape[1]'], {}), '(memory.shape[1])\n', (12618, 12635), True, 'import numpy as np\n'), ((13682, 13699), 'numpy.real', 'np.real', (['mem_slot'], {}), '(mem_slot)\n', (13689, 13699), True, 'import numpy as np\n'), ((13701, 13718), 'numpy.imag', 'np.imag', (['mem_slot'], {}), '(mem_slot)\n', (13708, 13718), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
atmospheres.py includes functions to calculate atmospheric quantities.
Created on Tue Nov 29 11:45:15 2016
@author: tr1010 (<NAME>)
"""
import sys
sys.path.append('atmosphere_models/Python-NRLMSISE-00-master')
from nrlmsise_00_header import *
from nrlmsise_00 import *
import numpy as np
def nrlmsise00(doy,year,sec,alt,g_lat,g_long,lst,f107A,f107,ap):
"""
nrlmsise00 calculates atmospheric quantities using the NRLMSISE-00
atmosphere published in 2001 by <NAME>, <NAME>, and <NAME>.
Originally written in FORTRAN, it was later implemented in C by Dominik
Brodowski.
This function calls a Python port of Brodowski's C implementation originally
written by <NAME> in 2013. This software was released under an MIT
license (see the license file in the atmosphere_models directory).
The NRLMSISE-00 model uses a number of switches (contained in the flags
class) to modify the model output. At the moment, these defaults are hard-
wired into PETra. Later revisions will give the user the ability to select
these switches. For more detailed information about the inputs/outputs/switches
used in this model, the user is directed to the docstrings of the funcitons
contained in the model files (norlmsise_00_header.py and nrlmsise_00.py).
Inputs:
doy: day of year
year: year (currently ignored)
sec: seconds in day
alt: altitude
g_lat: geodetic latitude
g_long: geodetic longitude
lst: local apparent solar time (hours)
f107A: 81 day average of F10.7 flux (centred on doy)
f107: daily f10.7 flux (for previous day)
ap: magnetic index (daily)
Outputs:
rho: density at the requested altitude
pressure_mixture: pressure at the requested altitude
temperature: temperature at the requested altitude
R_mixture: the gas constant of the mixture
mean_free_path: mean free path of the air at the requested altitude.
In contrast to the other outputs of this function, the
mean free path calculation assumes a single molecule
gas (assumed to be an 'average' air molecule)
eta: viscosity (calcualted using Sutherland's law)
molecular_weight_mixture: the molecular weight of the air at the
requested altitude
SoS: speed of sound (assume ratio of specific heats is constant 1.4
everywhere in the atmosphere)
"""
output = nrlmsise_output()
Input = nrlmsise_input()
# output = [nrlmsise_output() for _ in range(17)]
# Input = [nrlmsise_input() for _ in range(17)]
flags = nrlmsise_flags()
aph = ap_array() # For more detailed ap data (i.e more than daily)
flags.switches[0] = 1 # to have results in m rather than cm
for i in range(1,24):
flags.switches[i]=1
# below 80 km solar & magnetic effects not well established so set to defaults
if alt < 80e3:
f107 = 150.
f107A = 150.
ap = 4.
# fill out Input class
Input.year=year
Input.doy=doy
Input.sec=sec
Input.alt=alt*1e-3 #change input to km
Input.g_lat=g_lat*180/np.pi
Input.g_long=g_long*180/np.pi
Input.lst=lst
Input.f107A=f107A
Input.f107=f107
Input.ap=ap
if alt > 500e3:
gtd7d(Input, flags, output)
else:
gtd7(Input, flags, output)
d = output.d
t = output.t
"""
DEFAULT OUTPUT VARIABLES:
d[0] - HE NUMBER DENSITY(CM-3)
d[1] - O NUMBER DENSITY(CM-3)
d[2] - N2 NUMBER DENSITY(CM-3)
d[3] - O2 NUMBER DENSITY(CM-3)
d[4] - AR NUMBER DENSITY(CM-3)
d[5] - TOTAL MASS DENSITY(GM/CM3) [includes d[8] in td7d]
d[6] - H NUMBER DENSITY(CM-3)
d[7] - N NUMBER DENSITY(CM-3)
d[8] - Anomalous oxygen NUMBER DENSITY(CM-3)
t[0] - EXOSPHERIC TEMPERATURE
t[1] - TEMPERATURE AT ALT
"""
#Now process output to get required values
kb = 1.38064852e-23 # Boltzmann constant (m**2 kg)/(s**2 K)
Na = 6.022140857e26 # avogadro number (molecules per kilomole)
R0 = kb * Na # universal gas constant
#Molecular weights of different components (kg/kmole)
molecular_weights = np.zeros(8)
molecular_weights[0] = 4.002602 #He
molecular_weights[1] = 15.9994 #O
molecular_weights[2] = 28.0134 #N2
molecular_weights[3] = 31.9988 #O2
molecular_weights[4] = 39.948 #AR
molecular_weights[5] = 1.00794 #H
molecular_weights[6] = 14.0067 #N
molecular_weights[7] = 15.9994 #anomalous O
# Calculate partial pressures
partial_p = np.zeros(8)
partial_p[0] = d[0]*kb*t[1] #He
partial_p[1] = d[1]*kb*t[1] #O
partial_p[2] = d[2]*kb*t[1] #N2
partial_p[3] = d[3]*kb*t[1] #O2
partial_p[4] = d[4]*kb*t[1] #AR
partial_p[5] = d[6]*kb*t[1] #H
partial_p[6] = d[7]*kb*t[1] #N
partial_p[7] = d[8]*kb*t[1] #anomalous O
#Assuming perfect gas, calculate atmospheric pressure
pressure_mixture = np.sum(partial_p)
temperature = t[1]
mole_fraction = np.divide(partial_p,pressure_mixture)
molecular_weight_mixture = np.sum(np.multiply(mole_fraction,molecular_weights)) #kg/kmol
mass_fractions = np.multiply(mole_fraction,
np.divide(molecular_weights,molecular_weight_mixture))
specific_gas_constants = R0/molecular_weights
R_mixture = np.sum(np.multiply(mass_fractions,specific_gas_constants))
number_density_mixture = np.sum(d) - d[5]
mean_free_path = (np.sqrt(2)*np.pi*4.15e-10**2*number_density_mixture)**-1
eta = np.float64(1.458e-6*temperature**1.5/(temperature + 110.4)) # dynamic viscosity via sutherland law
SoS = np.float64(np.sqrt(1.4*R_mixture*temperature))
rho = d[5]
return rho, pressure_mixture, temperature, R_mixture, mean_free_path, eta, molecular_weight_mixture, SoS
# US mutant Atmosphere
def US62_76(r,RE):
"""
US62_76 is a very simple atmosphere model that uses the US76 standard
atmosphere below 80 km and the US62 standard atmosphere above 80km
Inputs:
r: altitude
RE: radius of the Earth
Outputs:
rho: density
P: pressure
T: temperature
mfp: mean free path
eta: viscosity (sutherland's law)
MolW: molecular weight
SoS: speed of sound
"""
#Some constants:
#RE = 6378.137e3
Na = np.float64(6.0220978e23)
sig = np.float64(3.65e-10)
# Sea level standard values:
P0 = 101325.0 #Pa
T0 = 288.15 #K
M = np.array([28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.962, 28.962,
28.88, 28.56, 28.07, 26.92, 26.66, 26.4, 25.85,
24.70, 22.66, 19.94, 17.94, 16.84, 16.17]) # Molecular masses with altitude g/mol
R0 = 8.31432 # J/mol-K
g0 = 9.806658 # m/s2
GM_R = g0*M/R0 # GM/R K/km
Z = (r - RE)*1e-3 # convert radius in m to altitude in km
H = me2po(RE,Z) # geopotential altitude
BLH = np.array([0., 11., 20., 32., 47., 51., 71., me2po(RE,86.),
me2po(RE,100.), me2po(RE,110.), me2po(RE,120.), me2po(RE,150.),
me2po(RE,160.), me2po(RE,170.), me2po(RE,190.), me2po(RE,230.),
me2po(RE,300.), me2po(RE,400.), me2po(RE,500.), me2po(RE,600.),
me2po(RE,700.)])
L = np.array([0., -6.5, 0., 1., 2.8, 0., -2.8, -2., 1.693, 5., 10., 20., 15.,
10., 7., 5., 4., 3.3, 2.6, 1.7, 1.1])
BLT = np.zeros((21,))
BLP = np.zeros((21,))
BLT[0] = T0
BLP[0] = P0
for i in range(0, 20):
# Calculate base temperatures
BLT[i+1] = BLT[i] + L[i+1]*(BLH[i+1] - BLH[i])
# Calculate base pressures
if (i+1 == 0) or (i+1 == 2) or (i+1 == 5):
BLP[i+1] = BLP[i]*np.exp(-GM_R[i+1]*(BLH[i+1] - BLH[i])/BLT[i])
else:
BLP[i+1] = BLP[i]*((BLT[i] + L[i+1]*(BLH[i+1] - BLH[i]))/BLT[i])**(-GM_R[i+1]/L[i+1])
# Calculate values at requested altitude
if H > BLH[i] and H <= BLH[i+1]:
# Molecular weight (interpolate)]
MolW = M[i] + (M[i+1] - M[i])*(H - BLH[i])/(BLH[i+1] - BLH[i])
gmrtemp = g0*MolW/R0
# Molecular scale Temperature
T = np.float64(BLT[i] + L[i+1]*(H - BLH[i]))
T = MolW*T/M[0] # Convert molecular scale temperature to kinetic temperature
# Pressure
if i+1 == 0 or i+1 == 2 or i+1 == 5:
P = np.float64(BLP[i]*np.exp(-gmrtemp*(H - BLH[i])/BLT[i]))
else:
P = np.float64(BLP[i]*((BLT[i] + L[i+1]*(H - BLH[i]))/BLT[i])**(-gmrtemp/L[i+1]))
# Density
rho = np.float64(MolW*1e-3*P/(R0*T))
mfp = np.float64(MolW*1e-3/(2**0.5*np.pi*sig**2*rho*Na)) # mean free path
eta = np.float64(1.458e-6*T**1.5/(T + 110.4)) # dynamic viscosity via sutherland law
SoS = np.float64(np.sqrt(1.4*287.085*T))
return rho, P, T, mfp, eta, MolW, SoS
def me2po(RE,Z):
"""
me2po converts geometric altitude to geopotential altitude -- the US
standard atmosphere works in geopotential altitudes, which approximates the
altitude of a pressure surface above the mean sea level.
The reasoning for this is as follows: A change in geometric altitude will
create a change in gravitational potential energy per unit mass (as the
effects of gravity become smaller as two objects move away from each other)
Inputs:
RE: Earth radius
Z: Geometric altitude
Outputs:
H: Geopotential altitude
"""
H = RE*Z/(RE + Z)
return H
|
[
"sys.path.append",
"numpy.divide",
"numpy.sum",
"numpy.multiply",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.float64",
"numpy.sqrt"
] |
[((202, 264), 'sys.path.append', 'sys.path.append', (['"""atmosphere_models/Python-NRLMSISE-00-master"""'], {}), "('atmosphere_models/Python-NRLMSISE-00-master')\n", (217, 264), False, 'import sys\n'), ((4385, 4396), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (4393, 4396), True, 'import numpy as np\n'), ((4770, 4781), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (4778, 4781), True, 'import numpy as np\n'), ((5158, 5175), 'numpy.sum', 'np.sum', (['partial_p'], {}), '(partial_p)\n', (5164, 5175), True, 'import numpy as np\n'), ((5229, 5267), 'numpy.divide', 'np.divide', (['partial_p', 'pressure_mixture'], {}), '(partial_p, pressure_mixture)\n', (5238, 5267), True, 'import numpy as np\n'), ((5792, 5858), 'numpy.float64', 'np.float64', (['(1.458e-06 * temperature ** 1.5 / (temperature + 110.4))'], {}), '(1.458e-06 * temperature ** 1.5 / (temperature + 110.4))\n', (5802, 5858), True, 'import numpy as np\n'), ((6628, 6653), 'numpy.float64', 'np.float64', (['(6.0220978e+23)'], {}), '(6.0220978e+23)\n', (6638, 6653), True, 'import numpy as np\n'), ((6663, 6683), 'numpy.float64', 'np.float64', (['(3.65e-10)'], {}), '(3.65e-10)\n', (6673, 6683), True, 'import numpy as np\n'), ((6771, 6950), 'numpy.array', 'np.array', (['[28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.962, 28.962, \n 28.88, 28.56, 28.07, 26.92, 26.66, 26.4, 25.85, 24.7, 22.66, 19.94, \n 17.94, 16.84, 16.17]'], {}), '([28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.9644, 28.962, \n 28.962, 28.88, 28.56, 28.07, 26.92, 26.66, 26.4, 25.85, 24.7, 22.66, \n 19.94, 17.94, 16.84, 16.17])\n', (6779, 6950), True, 'import numpy as np\n'), ((7583, 7711), 'numpy.array', 'np.array', (['[0.0, -6.5, 0.0, 1.0, 2.8, 0.0, -2.8, -2.0, 1.693, 5.0, 10.0, 20.0, 15.0, \n 10.0, 7.0, 5.0, 4.0, 3.3, 2.6, 1.7, 1.1]'], {}), '([0.0, -6.5, 0.0, 1.0, 2.8, 0.0, -2.8, -2.0, 1.693, 5.0, 10.0, 20.0,\n 15.0, 10.0, 7.0, 5.0, 4.0, 3.3, 2.6, 1.7, 1.1])\n', (7591, 7711), True, 'import numpy as np\n'), ((7724, 7739), 'numpy.zeros', 'np.zeros', (['(21,)'], {}), '((21,))\n', (7732, 7739), True, 'import numpy as np\n'), ((7750, 7765), 'numpy.zeros', 'np.zeros', (['(21,)'], {}), '((21,))\n', (7758, 7765), True, 'import numpy as np\n'), ((5310, 5355), 'numpy.multiply', 'np.multiply', (['mole_fraction', 'molecular_weights'], {}), '(mole_fraction, molecular_weights)\n', (5321, 5355), True, 'import numpy as np\n'), ((5451, 5505), 'numpy.divide', 'np.divide', (['molecular_weights', 'molecular_weight_mixture'], {}), '(molecular_weights, molecular_weight_mixture)\n', (5460, 5505), True, 'import numpy as np\n'), ((5589, 5640), 'numpy.multiply', 'np.multiply', (['mass_fractions', 'specific_gas_constants'], {}), '(mass_fractions, specific_gas_constants)\n', (5600, 5640), True, 'import numpy as np\n'), ((5675, 5684), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (5681, 5684), True, 'import numpy as np\n'), ((5917, 5955), 'numpy.sqrt', 'np.sqrt', (['(1.4 * R_mixture * temperature)'], {}), '(1.4 * R_mixture * temperature)\n', (5924, 5955), True, 'import numpy as np\n'), ((8527, 8571), 'numpy.float64', 'np.float64', (['(BLT[i] + L[i + 1] * (H - BLH[i]))'], {}), '(BLT[i] + L[i + 1] * (H - BLH[i]))\n', (8537, 8571), True, 'import numpy as np\n'), ((8976, 9015), 'numpy.float64', 'np.float64', (['(MolW * 0.001 * P / (R0 * T))'], {}), '(MolW * 0.001 * P / (R0 * T))\n', (8986, 9015), True, 'import numpy as np\n'), ((9025, 9092), 'numpy.float64', 'np.float64', (['(MolW * 0.001 / (2 ** 0.5 * np.pi * sig ** 2 * rho * Na))'], {}), '(MolW * 0.001 / (2 ** 0.5 * np.pi * sig ** 2 * rho * Na))\n', (9035, 9092), True, 'import numpy as np\n'), ((9111, 9157), 'numpy.float64', 'np.float64', (['(1.458e-06 * T ** 1.5 / (T + 110.4))'], {}), '(1.458e-06 * T ** 1.5 / (T + 110.4))\n', (9121, 9157), True, 'import numpy as np\n'), ((8045, 8098), 'numpy.exp', 'np.exp', (['(-GM_R[i + 1] * (BLH[i + 1] - BLH[i]) / BLT[i])'], {}), '(-GM_R[i + 1] * (BLH[i + 1] - BLH[i]) / BLT[i])\n', (8051, 8098), True, 'import numpy as np\n'), ((8844, 8940), 'numpy.float64', 'np.float64', (['(BLP[i] * ((BLT[i] + L[i + 1] * (H - BLH[i])) / BLT[i]) ** (-gmrtemp / L[i +\n 1]))'], {}), '(BLP[i] * ((BLT[i] + L[i + 1] * (H - BLH[i])) / BLT[i]) ** (-\n gmrtemp / L[i + 1]))\n', (8854, 8940), True, 'import numpy as np\n'), ((9219, 9245), 'numpy.sqrt', 'np.sqrt', (['(1.4 * 287.085 * T)'], {}), '(1.4 * 287.085 * T)\n', (9226, 9245), True, 'import numpy as np\n'), ((5720, 5730), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5727, 5730), True, 'import numpy as np\n'), ((8768, 8808), 'numpy.exp', 'np.exp', (['(-gmrtemp * (H - BLH[i]) / BLT[i])'], {}), '(-gmrtemp * (H - BLH[i]) / BLT[i])\n', (8774, 8808), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from nt_toolbox.signal import imageplot
def plot_levelset(Z, level=0, f=[]):
"""
f is supposed to be of the same shape as Z
"""
if len(f) == 0:
f = np.copy(Z)
n,p = np.shape(Z)
X,Y = np.meshgrid(np.arange(0,n),np.arange(0,p))
plt.contour(X, Y, Z,[level],linewidths=2, colors="red")
imageplot(f)
|
[
"numpy.copy",
"nt_toolbox.signal.imageplot",
"numpy.shape",
"matplotlib.pyplot.contour",
"numpy.arange"
] |
[((258, 269), 'numpy.shape', 'np.shape', (['Z'], {}), '(Z)\n', (266, 269), True, 'import numpy as np\n'), ((327, 384), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z', '[level]'], {'linewidths': '(2)', 'colors': '"""red"""'}), "(X, Y, Z, [level], linewidths=2, colors='red')\n", (338, 384), True, 'import matplotlib.pyplot as plt\n'), ((387, 399), 'nt_toolbox.signal.imageplot', 'imageplot', (['f'], {}), '(f)\n', (396, 399), False, 'from nt_toolbox.signal import imageplot\n'), ((228, 238), 'numpy.copy', 'np.copy', (['Z'], {}), '(Z)\n', (235, 238), True, 'import numpy as np\n'), ((292, 307), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (301, 307), True, 'import numpy as np\n'), ((307, 322), 'numpy.arange', 'np.arange', (['(0)', 'p'], {}), '(0, p)\n', (316, 322), True, 'import numpy as np\n')]
|
# python3
#
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example using TF Lite to classify objects with the Raspberry Pi camera."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import argparse
#import io
#import time
import numpy as np
#import picamera
from PIL import Image
from tflite_runtime.interpreter import Interpreter
#from datetime import datetime
#from time import sleep
def saveImageSimple(cropImage):
filePath = "./test/224.jpg"
cropImage.save(filePath, quality=100, subsampling=0)
print("saved", filePath) # log DEBUG
return True
def load_labels(path):
with open(path, 'r') as f:
return {i: line.strip() for i, line in enumerate(f.readlines())}
def set_input_tensor(interpreter, image):
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image
def classify_image(interpreter, image, top_k=1):
"""Returns a sorted array of classification results."""
set_input_tensor(interpreter, image)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = np.squeeze(interpreter.get_tensor(output_details['index']))
# If the model is quantized (uint8 data), then dequantize the results
if output_details['dtype'] == np.uint8:
scale, zero_point = output_details['quantization']
# "output" is list of probablilities, in the same order as labels are in dict.txt
output = scale * (output - zero_point)
# "ordered" is list of numbers that show the order of each probability in "output"
ordered = np.argpartition(-output, top_k)
# print("ordered ", ordered)
# print("output", output)
# best = ordered[0]
# all = [(labels[i], output[i]) for i in ordered[:top_k]]
# print(best, all)
return output
# return ordered # labels
# return output
def formatOutput(output, labels):
all = {}
labelNumber = 0
for i in output:
all[labels[labelNumber]] = i
labelNumber = labelNumber + 1
bestKey = max(all, key=lambda key: all[key])
bestVal = all[bestKey]
# print("best", best)
# TODO: return best key and value as second return value
return bestKey, bestVal, all
# Main function
def classify(cropFrame):
print("Here")
# width = 224
# height = 224
# Hardcoded args
# model = './models/tflite-plumps1_20210328/model.tflite'
# labels = './models/tflite-plumps1_20210328/dict.txt'
model = './models/tflite-plumps2_20210330/model.tflite'
labels = './models/tflite-plumps2_20210330/dict.txt'
# TODO: Do this only once, pass to the function?
labels = load_labels(labels)
interpreter = Interpreter(model)
interpreter.allocate_tensors()
_, height, width, _ = interpreter.get_input_details()[0]['shape']
cropImage = Image.fromarray(cropFrame)
cropImage = cropImage.resize((width, height), Image.ANTIALIAS)
# success = saveImageSimple(cropImage) # test
results = classify_image(interpreter, cropImage, 1)
# print("Results array ", results)
bestKey, bestVal, all = formatOutput(results, labels)
print("res: ", bestKey, bestVal, all)
# label_id, prob = results[0]
# print(labels[label_id], prob)
# return labels[label_id], prob
return bestKey, bestVal, all
|
[
"PIL.Image.fromarray",
"numpy.argpartition",
"tflite_runtime.interpreter.Interpreter"
] |
[((2208, 2239), 'numpy.argpartition', 'np.argpartition', (['(-output)', 'top_k'], {}), '(-output, top_k)\n', (2223, 2239), True, 'import numpy as np\n'), ((3261, 3279), 'tflite_runtime.interpreter.Interpreter', 'Interpreter', (['model'], {}), '(model)\n', (3272, 3279), False, 'from tflite_runtime.interpreter import Interpreter\n'), ((3402, 3428), 'PIL.Image.fromarray', 'Image.fromarray', (['cropFrame'], {}), '(cropFrame)\n', (3417, 3428), False, 'from PIL import Image\n')]
|
"""
Context class for the pushing task as used in the paper
"How to Train Your Differentiable Filter".
"""
# this code only works with tensorflow 1
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_probability as tfp
import numpy as np
import os
import csv
from matplotlib.patches import Ellipse
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
import pickle
from differentiable_filters.contexts import paper_base_context as base
from differentiable_filters.utils.base_layer import BaseLayer
from differentiable_filters.utils import recordio as tfr
from differentiable_filters.utils import push_utils as utils
from differentiable_filters.utils import tensorflow_compatability as compat
class Context(base.PaperaseContext):
def __init__(self, param, mode):
"""
Context class for the pushing task as used in the paper.
Parameters
----------
param : dict
A dictionary of arguments
mode : string
determines which parts of the model are trained. Use "filter" for
the whole model, "pretrain_obs" for pretraining the observation
related functions of the context in isolation or "pretrain_proc"
for pretrainign the process-related functions of the context.
"""
base.PaperBaseContext.__init__(self, param, mode)
if 'normalize' in param.keys():
self.normalize = param['normalize']
else:
self.normalize = 'layer'
# the state size
self.dim_x = 10
self.dim_u = 2
self.dim_z = 8
# dimension names
self.x_names = ['x', 'y', 'theta', 'l', 'mu', 'rx', 'ry', 'nx', 'ny',
's']
self.z_names = ['x', 'y', 'theta', 'rx', 'ry', 'nx', 'ny', 's']
# load the points on the outline of the butter object for visualization
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
with open(os.path.join(path, 'resources',
'butter_points.pkl'), 'rb') as bf:
butter_points = pickle.load(bf)
self.butter_points = np.array(butter_points)
# define initial values for the process noise q and observation noise r
# diagonals
# Important: All values are standard-deviations, so they are
# squared for forming the covariance matrices
if param['q_diag'] is not None:
cov_string = param['q_diag']
cov = list(map(lambda x: float(x), cov_string.split(' ')))
self.q_diag = np.array(cov).astype(np.float32)
else:
self.q_diag = np.ones((self.dim_x)).astype(np.float32)
self.q_diag = self.q_diag.astype(np.float32) / self.scale
if param['r_diag'] is not None:
cov_string = param['r_diag']
cov = list(map(lambda x: float(x), cov_string.split(' ')))
self.r_diag = np.array(cov).astype(np.float32)
else:
self.r_diag = np.ones((self.dim_z)).astype(np.float32)
self.r_diag = self.r_diag.astype(np.float32) / self.scale
# if the noise matrices are not learned, we construct the fixed
# covariance matrices here
q = np.diag(np.square(self.q_diag))
self.Q = tf.convert_to_tensor(q, dtype=tf.float32)
r = np.diag(np.square(self.r_diag))
self.R = tf.convert_to_tensor(r, dtype=tf.float32)
# for state in mm/deg,
# c = np.array([50, 50, 1e-2, 5, 5, 50, 50, 0.5, 0.5, 0.5])
self.noise_list = \
[np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
np.array([49.8394116, -2.3510439, 0, 2.5196417, 1.93745247,
27.6656989, 67.1287098, 0.03124815, -0.18917632,
-0.14730855]),
np.array([27.9914853, -30.3366791, 0, -4.6963326, -2.96631439,
3.6698755, -14.5376077, -0.49956926, 0.56362964,
0.54478971])]
for i, n in enumerate(self.noise_list):
self.noise_list[i] = n.astype(np.float32)
if mode == 'filter':
train_sensor_model = param['train_sensor_model']
train_process_model = param['train_process_model']
train_q = param['train_q']
train_r = param['train_r']
if param['filter'] == 'lstm':
train_process_model = False
train_q = False
train_r = False
# tensorflow does not allow summaries inside rnn-loops
summary = False
else:
train_sensor_model = True
train_process_model = True
train_q = True
train_r = True
summary = True
# all layers used in the context need to be instantiated here, but we
# cannot instantiate layers that will not be used
if mode == 'filter' or mode == 'pretrain_obs':
# don't train the segmentation model is we use a pretrained
# sensor network
self.segmentation_layer = \
SegmentationLayer(self.batch_size, self.normalize, summary,
train_sensor_model)
self.sensor_model_layer = \
SensorLayer(self.batch_size, self.normalize, self.scale,
summary, train_sensor_model)
self.observation_model_layer = ObservationModel(self.dim_z,
self.batch_size)
# group the layers for easier checkpoint restoring
self.observation_models = {'sensor': [self.segmentation_layer,
self.sensor_model_layer],
'obs': self.observation_model_layer}
self.update_ops += self.segmentation_layer.updateable
self.update_ops += self.sensor_model_layer.updateable
else:
self.observation_models = {}
lstm_no_noise = param['filter'] == 'lstm' and \
not param['lstm_structure'] == 'full'
self.observation_noise_models = {}
if param['learn_r'] and param['hetero_r'] and \
param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_hetero_diag = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale,
hetero=True, diag=True, trainable=train_r,
summary=summary)
self.observation_noise_models['het_diag'] = \
self.observation_noise_hetero_diag
if param['learn_r'] and param['hetero_r'] and \
not param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_hetero_full = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale, hetero=True, diag=False,
trainable=train_r, summary=summary)
self.observation_noise_models['het_full'] = \
self.observation_noise_hetero_full
if param['learn_r'] and not param['hetero_r'] and \
param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_const_diag = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale, hetero=False, diag=True,
trainable=train_r, summary=summary)
self.observation_noise_models['const_diag'] = \
self.observation_noise_const_diag
if param['learn_r'] and not param['hetero_r'] and \
not param['diagonal_covar'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.observation_noise_const_full = \
ObservationNoise(self.batch_size, self.dim_z, self.r_diag,
self.scale, hetero=False, diag=False,
trainable=train_r, summary=summary)
self.observation_noise_models['const_full'] = \
self.observation_noise_const_full
if param['learned_likelihood'] and mode == 'filter' and \
not lstm_no_noise or mode == 'pretrain_obs':
self.likelihood_layer = Likelihood(self.dim_z, trainable=train_r,
summary=summary)
self.observation_noise_models['like'] = self.likelihood_layer
self.process_models = {}
lstm_unstructured = param['filter'] == 'lstm' and \
(param['lstm_structure'] == 'none' or
param['lstm_structure'] == 'lstm' or
param['lstm_structure'] == 'lstm1')
if mode == 'filter' and not lstm_unstructured and \
param['learn_process'] or mode == 'pretrain_process':
self.process_model_learned_layer = \
ProcessModel(self.batch_size, self.dim_x, self.scale,
learned=True, jacobian=param['filter'] == 'ekf',
trainable=train_process_model, summary=summary)
self.process_models['learned'] = self.process_model_learned_layer
if mode == 'filter' and not lstm_unstructured and \
not param['learn_process'] or mode == 'pretrain_process':
self.process_model_analytical_layer = \
ProcessModel(self.batch_size, self.dim_x, self.scale,
learned=False, jacobian=param['filter'] == 'ekf',
trainable=train_process_model, summary=summary)
self.process_models['ana'] = self.process_model_analytical_layer
self.process_noise_models = {}
process_noise = (param['learn_q'] and not lstm_no_noise and
mode == 'filter')
if process_noise and param['learn_process'] and param['hetero_q'] and \
param['diagonal_covar'] or mode == 'pretrain_process':
self.process_noise_hetero_diag_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=True, learned=True,
trainable=train_q, summary=summary)
self.process_noise_models['het_diag_lrn'] = \
self.process_noise_hetero_diag_lrn
if process_noise and param['learn_process'] and param['hetero_q'] and \
not param['diagonal_covar'] or mode == 'pretrain_process':
self.process_noise_hetero_full_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=False, learned=True,
trainable=train_q, summary=summary)
self.process_noise_models['het_full_lrn'] = \
self.process_noise_hetero_full_lrn
if process_noise and param['learn_process'] and \
not param['hetero_q'] and param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_diag_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=True, learned=True,
trainable=train_q, summary=summary)
self.process_noise_models['const_diag_lrn'] = \
self.process_noise_const_diag_lrn
if process_noise and param['learn_process'] and \
not param['hetero_q'] and not param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_full_lrn = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=False,
learned=True, trainable=train_q, summary=summary)
self.process_noise_models['const_full_lrn'] = \
self.process_noise_const_full_lrn
if process_noise and not param['learn_process'] and \
param['hetero_q'] and param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_hetero_diag_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=True, learned=False,
trainable=train_q, summary=summary)
self.process_noise_models['het_diag_ana'] = \
self.process_noise_hetero_diag_ana
if process_noise and not param['learn_process'] and \
param['hetero_q'] and not param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_hetero_full_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=True, diag=False,
learned=False, trainable=train_q, summary=summary)
self.process_noise_models['het_full_ana'] = \
self.process_noise_hetero_full_ana
if process_noise and not param['learn_process'] and \
not param['hetero_q'] and param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_diag_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=True,
learned=False, trainable=train_q, summary=summary)
self.process_noise_models['const_diag_ana'] = \
self.process_noise_const_diag_ana
if process_noise and not param['learn_process'] and \
not param['hetero_q'] and not param['diagonal_covar'] or \
mode == 'pretrain_process':
self.process_noise_const_full_ana = \
ProcessNoise(self.batch_size, self.dim_x, self.q_diag,
self.scale, hetero=False, diag=False,
learned=False, trainable=train_q, summary=summary)
self.process_noise_models['const_full_ana'] = \
self.process_noise_const_full_ana
###########################################################################
# observation models
###########################################################################
def run_sensor_model(self, raw_observations, training):
"""
Process raw observations and return an encoding and
predicted observations z for the filter
"""
images, tip_pos, tip_pos_pix, tip_end_pix, start_glimpse = \
raw_observations
seg_out, pix = self.segmentation_layer(images, training)
z, enc = self.sensor_model_layer([images, tip_pos, tip_pos_pix,
tip_end_pix, start_glimpse] +
seg_out, training)
enc = list(enc) + [pix]
return z, enc
def run_process_model(self, old_state, action, learned, training):
"""
Predict the next state from the old state and the action and returns
the jacobian
"""
if learned:
new_state, F = \
self.process_model_learned_layer([old_state, action, self.ob],
training)
else:
new_state, F = \
self.process_model_analytical_layer([old_state, action,
self.ob], training)
new_state = self.correct_state(new_state, diff=False)
return new_state, F
def get_initial_glimpse(self, image, training):
"""
Process the observations for the initial state and return a segmented
glimpse of the object in its initial position
"""
seg_out, pix = self.segmentation_layer(image, training)
mask, pos, glimpse_rot = seg_out
return glimpse_rot, pix, mask
def initial_from_observed(self, base_state, init_z, base_covar, init_R):
state = tf.concat([init_z[:, :3], base_state[:, 3:5], init_z[:, 3:]],
axis=-1)
covar = \
tf.concat([tf.concat([base_covar[:, :3, :3], init_R[:, :3, :3]],
axis=-1),
base_covar[:, 3:5, :],
tf.concat([base_covar[:, 5:, 5:], init_R[:, 3:, 3:]],
axis=-1)],
axis=1)
return state, covar
###########################################################################
# loss functions
###########################################################################
def get_filter_loss(self, prediction, label, step, training):
"""
Compute the loss for the filtering application - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the filtering application
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
particles, weights, states, covars, init_s, init_c, z, r, q = \
prediction
states = tf.reshape(states, [self.batch_size, -1, self.dim_x])
covars = tf.reshape(covars, [self.batch_size, -1, self.dim_x,
self.dim_x])
seq_label, mv_tr, mv_rot, vis = label
diff = seq_label - states
diff = self.correct_state(diff)
# get the likelihood
if self.param['filter'] == 'pf' and self.param['mixture_likelihood']:
num = particles.get_shape()[2].value
seq_label_tiled = tf.tile(seq_label[:, :, None, :], [1, 1, num, 1])
particle_diff = self.correct_state(seq_label_tiled - particles)
likelihood = self._mixture_likelihood(particle_diff, weights)
else:
likelihood = self._likelihood(diff, covars, reduce_mean=False)
# compensate for scaling
offset = tf.ones_like(likelihood)*tf.math.log(self.scale)*2*self.dim_x
likelihood += 0.5 * offset
# compute the errors of the predicted states
total_mse, total_dist = self._mse(diff, reduce_mean=False)
total_mse *= self.scale**2
total_dist *= self.scale
# compute component-wise distances
dists = []
for i in range(self.dim_x):
_, dist = self._mse(diff[:, :, i:i+1], reduce_mean=False)
dists += [dist*self.scale]
# position and orientation error
_, dist_tr = self._mse(diff[:, :, 0:2], reduce_mean=False)
_, dist_rot = self._mse(diff[:, :, 2:3], reduce_mean=False)
# compute the error in the predicted observations (only for monitoring)
diff_obs = tf.concat([seq_label[:, :, :3] - z[:, :, 0:3],
seq_label[:, :, 5:] - z[:, :, 3:]], axis=-1)
diff_obs = self.correct_observation_diff(diff_obs)
# rsme
_, dist_ob = self._mse(diff_obs, reduce_mean=False)
dist_ob *= self.scale
# component-wise
dist_obs = []
for i in range(self.dim_z):
_, dist = self._mse(diff_obs[:, :, i:i+1], reduce_mean=False)
dist = dist*self.scale
dist_obs += [dist]
# compute the correlation between predicted observation noise and
# the number of visible object pixels
# this only makes sense for the heteroscedastic noise
diag_r = tf.linalg.diag_part(r)
diag_r = tf.sqrt(tf.abs(diag_r + 1e-5))
diag_r = tf.reshape(diag_r, [-1, self.dim_z])
corr = []
for i in range(self.dim_z):
corr += \
[tfp.stats.correlation(diag_r[:, i:i+1],
tf.reshape(vis, [-1, 1]),
sample_axis=0, event_axis=-1)]
corr_r = tf.add_n(corr)/self.dim_z
# correlation between noise and contact
corr_r_cont = []
for i in range(self.dim_z):
crs = \
tfp.stats.correlation(diag_r[:, i:i+1],
tf.reshape(seq_label[:, :, 9:], [-1, 1]),
sample_axis=0, event_axis=-1)
corr_r_cont += [crs]
corr_r_cont = tf.add_n(corr_r_cont)/self.dim_z
# same for q
diag_q = tf.linalg.diag_part(q)
diag_q = tf.sqrt(tf.abs(diag_q + 1e-5))
diag_q = tf.reshape(diag_q, [-1, self.dim_x])
corr_q = []
for i in range(self.dim_x-1):
cqs = \
tfp.stats.correlation(diag_q[:, i:i+1],
tf.reshape(seq_label[:, :, 9:], [-1, 1]),
sample_axis=0, event_axis=-1)
corr_q += [cqs]
corr_q = tf.add_n(corr_q)/(self.dim_x-1)
# compute the output metric
m_per_tr, deg_per_deg = \
self._output_loss(states[:, :, :3], seq_label[:, :, :3],
mv_tr, mv_rot)
tf.summary.scalar('out/m_per_tr', m_per_tr)
tf.summary.scalar('out/deg_per_deg', deg_per_deg)
tf.summary.scalar('out/tr_total', tf.reduce_mean(mv_tr))
tf.summary.scalar('out/rot_total', tf.reduce_mean(mv_rot))
tf.summary.scalar('out/tr_error', tf.reduce_mean(dist_tr))
tf.summary.scalar('out/rot_error', tf.reduce_mean(dist_rot))
# get the weight decay
wd = []
for la in self.observation_models.values():
wd += la.losses
for la in self.observation_noise_models.values():
wd += la.losses
for la in self.process_models.values():
wd += la.losses
for la in self.process_noise_models.values():
wd += la.losses
wd = tf.add_n(wd)
# add a bias to all losses that use the likelihood, to set off
# possible negative values of the likelihood
total_tracking = tf.reduce_mean(total_mse)
total_obs = tf.reduce_mean(dist_ob)
if self.loss == 'like':
total_loss = tf.reduce_mean(likelihood)
elif self.loss == 'error':
total_loss = total_tracking
elif self.loss == 'mixed':
total_loss = (total_tracking + tf.reduce_mean(likelihood)) / 2.
elif self.loss == 'mixed_error':
total_loss = total_tracking * 0.75 + \
tf.reduce_mean(likelihood) * 0.25
elif self.loss == 'mixed_like':
total_loss = total_tracking * 0.25 + \
tf.reduce_mean(likelihood) * 0.75
elif self.loss == 'mixed_curr':
total_loss = tf.cond(tf.less(step, self.epoch_size * 3),
lambda: total_tracking,
lambda: tf.reduce_mean(likelihood))
if self.loss == 'mixed_curr':
total_loss_val = tf.reduce_mean(likelihood)
else:
total_loss_val = total_loss
if self.loss != 'error':
total_loss_val += 1000
total = tf.cond(training,
lambda: total_loss + wd, lambda: total_loss_val)
# add summaries
tf.summary.scalar('loss/total', total)
tf.summary.scalar('loss/wd', wd)
tf.summary.scalar('loss/likelihood', tf.reduce_mean(likelihood))
tf.summary.scalar('loss/tracking', total_tracking)
tf.summary.scalar('loss/observations', total_obs)
tf.summary.scalar('loss/corr_r_vis', tf.squeeze(corr_r))
tf.summary.scalar('loss/corr_r_cont', tf.squeeze(corr_r_cont))
tf.summary.scalar('loss/corr_q_cont', tf.squeeze(corr_q))
for i, name in enumerate(self.x_names):
tf.summary.scalar('tracking_loss/' + name,
tf.reduce_mean(dists[i]))
for i, name in enumerate(self.z_names):
tf.summary.scalar('observation_loss/' + name,
tf.reduce_mean(dist_obs[i]))
return total, [likelihood, total_dist, dist_ob, total_mse,
dist_tr, dist_rot, m_per_tr, deg_per_deg, vis,
seq_label[:, :, 9], diag_r, diag_q, wd] +\
dists, ['likelihood', 'dist', 'dist_obs', 'mse', 'dist_tr',
'dist_rot', 'm_tr', 'deg_rot', 'vis', 'cont', 'r_pred',
'q_pred', 'wd'] + \
self.x_names
def _output_loss(self, pred, label, mv_tr, mv_rot):
endpoint_error = self._compute_sq_distance(pred[:, -1, 0:2],
label[:, -1, 0:2])
endpoint_error_rot = self._compute_sq_distance(pred[:, -1, 2:3],
label[:, -1, 2:3], True)
m_per_tr = tf.where(tf.greater(mv_tr, 0),
endpoint_error**0.5/mv_tr, endpoint_error)
deg_per_deg = tf.where(tf.greater(mv_rot, 0),
endpoint_error_rot**0.5/mv_rot,
endpoint_error_rot)
return tf.reduce_mean(m_per_tr), tf.reduce_mean(deg_per_deg)
def _compute_sq_distance(self, pred, label, rotation=False):
diff = pred - label
if rotation:
diff = self._adapt_orientation(diff, self.ob, 1)
diff = tf.square(diff)
diff = tf.reduce_sum(diff, axis=-1)
diff = tf.where(tf.greater(diff, 0), tf.sqrt(diff), diff)
return diff
def get_observation_loss(self, prediction, labels, step, training):
"""
Compute the loss for the observation functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: are we doing training or validation
Returns:
loss: the total loss for training the observation preprocessing
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
z, pix_pred, seg_pred, initial_pix_pred, initial_seg_pred, \
R_const_diag, R_const_tri, R_het_diag, R_het_tri, \
like_good, like_bad = prediction
label, pix_pos, initial_pix_pos, seg, initial_seg, vis = labels
diff = self.correct_observation_diff(label - z)
likelihood_const_diag = self._likelihood(tf.stop_gradient(diff),
R_const_diag,
reduce_mean=False)
likelihood_const_tri = self._likelihood(tf.stop_gradient(diff),
R_const_tri,
reduce_mean=False)
likelihood_het_diag = self._likelihood(diff, R_het_diag,
reduce_mean=False)
likelihood_het_tri = self._likelihood(diff, R_het_tri,
reduce_mean=False)
likelihood = (likelihood_const_diag + likelihood_const_tri +
likelihood_het_diag + likelihood_het_tri) / 4.
# compute the correlation between predicted observation noise and
# the number of visible object pixels
# this only makes sense for the heteroscedastic noise
diag_r_het_diag = tf.linalg.diag_part(R_het_diag)
diag_r_het_diag = tf.sqrt(tf.abs(diag_r_het_diag + 1e-5))
diag_r_het_diag = tf.reshape(diag_r_het_diag, [-1, self.dim_z])
diag_r_het_tri = tf.linalg.diag_part(R_het_tri)
diag_r_het_tri = tf.sqrt(tf.abs(diag_r_het_tri + 1e-5))
diag_r_het_tri = tf.reshape(diag_r_het_tri, [-1, self.dim_z])
corr_diag = []
corr_full = []
for i in range(self.dim_z):
corr_diag += \
[tfp.stats.correlation(diag_r_het_diag[:, i:i+1],
tf.reshape(vis, [-1, 1]),
sample_axis=0, event_axis=-1)]
corr_full += \
[tfp.stats.correlation(diag_r_het_tri[:, i:i+1],
tf.reshape(vis, [-1, 1]),
sample_axis=0, event_axis=-1)]
corr_r_diag = tf.add_n(corr_diag)/self.dim_z
corr_r_full = tf.add_n(corr_full)/self.dim_z
# compute the errors of the predicted observations
dist_obs = []
mses = []
cont = label[:, 7:8]
for i in range(self.dim_z):
mse, dist = self._mse(diff[:, i:i+1], reduce_mean=False)
# undo the overall scaling for dist and mse, but only undo the
# component-wise scaling for dist
scale_dist = self.scale
scale_mse = self.scale**2
# mask out non-contact cases for contact point and normal
if i in [3, 4, 5, 6]:
dist_obs += [tf.reduce_mean(dist*scale_dist*cont)]
mses += [tf.reduce_sum(mse*scale_mse*cont)]
else:
dist_obs += [tf.reduce_mean(dist*scale_dist)]
mses += [tf.reduce_sum(mse*scale_mse)]
mse = tf.add_n(mses)
# segmentatuin error
height = seg.get_shape()[1]
width = seg.get_shape()[2]
seg_pred = tf.image.resize(seg_pred, [height, width])
initial_seg_pred = tf.image.resize(initial_seg_pred, [height, width])
seg_loss = tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.squeeze(seg_pred, axis=-1),
labels=tf.squeeze(seg, axis=-1))
seg_loss = tf.reduce_mean(tf.reduce_sum(seg_loss, axis=[1, 2]))
seg_loss2 = tf.nn.sigmoid_cross_entropy_with_logits(
logits=tf.squeeze(initial_seg_pred, axis=-1),
labels=tf.squeeze(initial_seg, axis=-1))
seg_loss += tf.reduce_mean(tf.reduce_sum(seg_loss2, axis=[1, 2]))
# get the pixel prediction error for the position
pix_diff = pix_pred - pix_pos
pix_mse, pix_dist = self._mse(pix_diff, reduce_mean=False)
pix_mse = tf.reduce_mean(pix_mse)
_, dist_3d = self._mse(diff[:, :2], reduce_mean=False)
initial_pix_diff = initial_pix_pred - initial_pix_pos
initial_pix_mse, initial_pix_dist = self._mse(initial_pix_diff,
reduce_mean=False)
initial_pix_mse = tf.reduce_mean(initial_pix_mse)
# compute the angle-loss of the normals
norm_pred = z[:, 5:7]
norm_label = label[:, 5:7]
normal_ang = self.normal_loss(norm_pred, norm_label)
# compute the contact loss
contact_loss, ce = self.contact_loss(z[:, 7:8], label[:, 7:8])
# compute the loss for the learned likelihood model of the pf
good_loss = tf.reduce_mean(-tf.math.log(tf.maximum(like_good, 1e-6)))
bad_loss = \
tf.reduce_mean(-tf.math.log(tf.maximum(1.0 - like_bad, 1e-6)))
like_loss = good_loss + bad_loss
# add a penalty term for predicted rotation values greater than pi
rot_pred = tf.abs(z[:, 2])
rot_penalty = tf.where(tf.greater(rot_pred, 180),
tf.square(rot_pred - 180),
tf.zeros_like(rot_pred))
rot_penalty = tf.reduce_mean(rot_penalty)
wd = []
for la in self.observation_models.values():
wd += la.losses
for la in self.observation_noise_models.values():
wd += la.losses
wd = tf.add_n(wd)
# start by training only the localization for two epochs
total_train = \
tf.cond(tf.less(step, self.epoch_size*2),
lambda: 10 * (pix_mse + initial_pix_mse) + seg_loss,
lambda: (10 * tf.add_n(mses) +
10 * (pix_mse + initial_pix_mse) +
100 * tf.reduce_mean(normal_ang) +
100 * tf.reduce_mean(contact_loss) +
1e-4 * tf.reduce_mean(likelihood) +
1e-3 * like_loss +
rot_penalty + 0.01 * seg_loss + 0.01 * wd))
total_train = \
tf.cond(tf.less(step, self.epoch_size*5),
lambda: total_train,
lambda: (10 * tf.add_n(mses) +
10 * (pix_mse + initial_pix_mse) +
100 * tf.reduce_mean(normal_ang) +
100 * tf.reduce_mean(contact_loss) +
0.1 * (tf.reduce_mean(likelihood) + like_loss) +
rot_penalty + 0.001 * seg_loss + wd))
total_val = 10 * tf.add_n(mses) + 10 * tf.reduce_mean(normal_ang) + \
100 * tf.reduce_mean(contact_loss) + \
tf.reduce_mean(likelihood) + like_loss + 100
total = tf.cond(training, lambda: total_train, lambda: total_val)
# add summaries
tf.summary.scalar('loss/total', total)
tf.summary.scalar('loss/wd', wd)
tf.summary.scalar('loss/likelihood_const_diag',
tf.reduce_mean(likelihood_const_diag))
tf.summary.scalar('loss/likelihood_const_tri',
tf.reduce_mean(likelihood_const_tri))
tf.summary.scalar('loss/likelihood_het_diag',
tf.reduce_mean(likelihood_het_diag))
tf.summary.scalar('loss/likelihood_het_tri',
tf.reduce_mean(likelihood_het_tri))
for i, name in enumerate(self.z_names):
tf.summary.scalar('label/' + name, label[0, i])
for i, name in enumerate(self.z_names):
tf.summary.scalar('observation_loss/' + name,
tf.reduce_mean(dist_obs[i]))
for i, name in enumerate(self.z_names):
tf.summary.scalar('noise_loss/diag_' + name,
tf.reduce_mean(corr_diag[i]))
tf.summary.scalar('noise_loss/full_' + name,
tf.reduce_mean(corr_full[i]))
tf.summary.scalar('noise_loss/corr_diag', tf.reduce_mean(corr_r_diag))
tf.summary.scalar('noise_loss/corr_full', tf.reduce_mean(corr_r_full))
tf.summary.scalar('observation_loss/normal_ang',
tf.reduce_mean(normal_ang))
tf.summary.scalar('observation_loss/mean_vis',
tf.reduce_mean(vis))
tf.summary.scalar('observation_loss/dist_pix',
tf.reduce_mean(pix_dist))
tf.summary.scalar('observation_loss/dist_3d',
tf.reduce_mean(dist_3d))
tf.summary.scalar('observation_loss/contact_cross',
tf.reduce_mean(ce))
tf.summary.scalar('observation_loss/rot_penalty', rot_penalty)
tf.summary.scalar('loss/like_good', good_loss)
tf.summary.scalar('loss/like_bad', bad_loss)
tf.summary.scalar('loss/like_loss', like_loss)
tf.summary.scalar('loss/segmentation', seg_loss)
tf.summary.image('loss/seg_label', seg)
tf.summary.image('loss/seg_pred', seg_pred)
tf.summary.image('loss/initial_seg_label', initial_seg)
tf.summary.image('loss/inital_seg_pred', initial_seg_pred)
return total, [likelihood_const_diag, likelihood_const_tri,
likelihood_het_diag, likelihood_het_tri,
mse, like_loss, tf.reduce_mean(normal_ang),
tf.reduce_mean(ce), tf.reshape(vis, [-1, 1]),
diag_r_het_diag, diag_r_het_tri, wd] + dist_obs, \
['likelihood_const_diag', 'likelihood_const_tri',
'likelihood_het_diag', 'likelihood_het_tri', 'mse', 'like',
'normal_ang', 'contact_cross', 'vis', 'r_het_diag',
'r_het_tri', 'wd'] + self.z_names
def get_process_loss(self, prediction, labels, step, training):
"""
Compute the loss for the process functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the process model
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
state, Q_const_diag, Q_const_tri, Q_het_diag, Q_het_tri, \
state_ana, Q_const_diag_ana, Q_const_tri_ana, Q_het_diag_ana, \
Q_het_tri_ana = prediction
label, start = labels
diff = label - state
diff = self.correct_state(diff)
likelihood_const_diag = self._likelihood(diff, Q_const_diag,
reduce_mean=False)
likelihood_const_tri = self._likelihood(diff, Q_const_tri,
reduce_mean=False)
likelihood_het_diag = self._likelihood(diff, Q_het_diag,
reduce_mean=False)
likelihood_het_tri = self._likelihood(diff, Q_het_tri,
reduce_mean=False)
likelihood = (likelihood_const_diag + likelihood_const_tri +
likelihood_het_diag + likelihood_het_tri) / 4.
diff_ana = label - state_ana
diff_ana = self.correct_state(diff_ana)
likelihood_const_diag_ana = self._likelihood(diff_ana,
Q_const_diag_ana,
reduce_mean=False)
likelihood_const_tri_ana = self._likelihood(diff_ana, Q_const_tri_ana,
reduce_mean=False)
likelihood_het_diag_ana = self._likelihood(diff_ana, Q_het_diag_ana,
reduce_mean=False)
likelihood_het_tri_ana = self._likelihood(diff_ana, Q_het_tri_ana,
reduce_mean=False)
likelihood_ana = \
(likelihood_const_diag_ana + likelihood_const_tri_ana +
likelihood_het_diag_ana + likelihood_het_tri_ana) / 4.
# compute the errors of the predicted states from the learned model
mses = []
dists = []
for i in range(self.dim_x):
mse, dist = self._mse(diff[:, i:i+1], reduce_mean=False)
# undo the overall scaling for dist and mse
mses += [tf.reduce_mean(mse*self.scale**2)]
dists += [tf.reduce_mean(dist*self.scale)]
mse = tf.add_n(mses)
# compute the errors of the predicted states from the analytical model
dists_ana = []
for i in range(self.dim_x):
_, dist = self._mse(diff_ana[:, i:i+1], reduce_mean=False)
dists_ana += [tf.reduce_mean(dist*self.scale)]
wd = []
for la in self.process_models.values():
wd += la.losses
for la in self.process_noise_models.values():
wd += la.losses
wd = tf.add_n(wd)
total_loss = \
tf.cond(tf.less(step, self.epoch_size*5),
lambda: (1000 * tf.reduce_mean(mse) +
1e-5 * tf.reduce_mean(likelihood) +
1e-5 * tf.reduce_mean(likelihood_ana)),
lambda: (tf.reduce_mean(likelihood) +
tf.reduce_mean(likelihood_ana) +
1000 * tf.reduce_mean(mse)))
total = \
tf.cond(training,
lambda: total_loss + wd,
lambda: (tf.reduce_mean(likelihood) + 100 +
tf.reduce_mean(likelihood_ana) +
10 * tf.reduce_mean(mse)))
# add summaries
tf.summary.scalar('loss/total', total)
tf.summary.scalar('loss/wd', wd)
tf.summary.scalar('loss/likelihood_const_diag',
tf.reduce_mean(likelihood_const_diag))
tf.summary.scalar('loss/likelihood_const_tri',
tf.reduce_mean(likelihood_const_tri))
tf.summary.scalar('loss/likelihood_het_diag',
tf.reduce_mean(likelihood_het_diag))
tf.summary.scalar('loss/likelihood_het_tri',
tf.reduce_mean(likelihood_het_tri))
tf.summary.scalar('loss/likelihood_const_diag_ana',
tf.reduce_mean(likelihood_const_diag_ana))
tf.summary.scalar('loss/likelihood_const_tri_ana',
tf.reduce_mean(likelihood_const_tri_ana))
tf.summary.scalar('loss/likelihood_het_diag_ana',
tf.reduce_mean(likelihood_het_diag_ana))
tf.summary.scalar('loss/likelihood_het_tri_ana',
tf.reduce_mean(likelihood_het_tri_ana))
tf.summary.scalar('loss/tracking', tf.reduce_mean(mse))
for i, name in enumerate(self.x_names):
tf.summary.scalar('tracking_loss/' + name,
tf.reduce_mean(dists[i]))
tf.summary.scalar('tracking_loss/' + name + '_ana',
tf.reduce_mean(dists_ana[i]))
for i in range(min(self.batch_size, 1)):
tf.summary.scalar('label/x_' + str(i), label[i, 0])
tf.summary.scalar('label/y_' + str(i), label[i, 1])
tf.summary.scalar('label/theta_' + str(i), label[i, 2])
tf.summary.scalar('label/l_' + str(i), label[i, 3])
tf.summary.scalar('label/mu_' + str(i), label[i, 4])
tf.summary.scalar('label/rx_' + str(i), label[i, 5])
tf.summary.scalar('label/ry_' + str(i), label[i, 6])
tf.summary.scalar('label/nx_' + str(i), label[i, 7])
tf.summary.scalar('label/ny_' + str(i), label[i, 8])
tf.summary.scalar('label/s_' + str(i), label[i, 9])
tf.summary.scalar('start/x_' + str(i), start[i, 0])
tf.summary.scalar('start/y_' + str(i), start[i, 1])
tf.summary.scalar('start/theta_' + str(i), start[i, 2])
tf.summary.scalar('start/l_' + str(i), start[i, 3])
tf.summary.scalar('start/mu_' + str(i), start[i, 4])
tf.summary.scalar('start/rx_' + str(i), start[i, 5])
tf.summary.scalar('start/ry_' + str(i), start[i, 6])
tf.summary.scalar('start/nx_' + str(i), start[i, 7])
tf.summary.scalar('start/ny_' + str(i), start[i, 8])
tf.summary.scalar('start/s_' + str(i), start[i, 9])
tf.summary.scalar('pred/x_ana_' + str(i), state_ana[i, 0])
tf.summary.scalar('pred/y_ana_' + str(i), state_ana[i, 1])
tf.summary.scalar('pred/theta_ana_' + str(i), state_ana[i, 2])
tf.summary.scalar('pred/l_ana_' + str(i), state_ana[i, 3])
tf.summary.scalar('pred/mu_ana_' + str(i), state_ana[i, 4])
tf.summary.scalar('pred/rx_ana_' + str(i), state_ana[i, 5])
tf.summary.scalar('pred/ry_ana_' + str(i), state_ana[i, 6])
tf.summary.scalar('pred/nx_ana_' + str(i), state_ana[i, 7])
tf.summary.scalar('pred/ny_ana_' + str(i), state_ana[i, 8])
tf.summary.scalar('pred/s_ana_' + str(i), state_ana[i, 9])
tf.summary.scalar('pred/x_' + str(i), state[i, 0])
tf.summary.scalar('pred/y_' + str(i), state[i, 1])
tf.summary.scalar('pred/theta_' + str(i), state[i, 2])
tf.summary.scalar('pred/l_' + str(i), state[i, 3])
tf.summary.scalar('pred/mu_' + str(i), state[i, 4])
tf.summary.scalar('pred/rx_' + str(i), state[i, 5])
tf.summary.scalar('pred/ry_' + str(i), state[i, 6])
tf.summary.scalar('pred/nx_' + str(i), state[i, 7])
tf.summary.scalar('pred/ny_' + str(i), state[i, 8])
tf.summary.scalar('pred/s_' + str(i), state[i, 9])
return total, \
[likelihood_const_diag, likelihood_const_tri,
likelihood_het_diag, likelihood_het_tri,
likelihood_const_diag_ana, likelihood_const_tri_ana,
likelihood_het_diag_ana, likelihood_het_tri_ana, wd] + dists + \
dists_ana, \
['likelihood_const_diag', 'likelihood_const_tri',
'likelihood_het_diag', 'likelihood_het_tri',
'likelihood_const_diag_ana', 'likelihood_const_tri_ana',
'likelihood_het_diag_ana', 'likelihood_het_tri_ana', 'wd'] + \
self.x_names + list(map(lambda x: x + '_ana', self.x_names))
def normal_loss(self, pred, label, name=""):
# normalize both
pred_norm = tf.norm(pred, axis=-1, keep_dims=True)
label_norm = tf.norm(label, axis=-1, keep_dims=True)
pred = tf.nn.l2_normalize(pred, -1)
label = tf.nn.l2_normalize(label, -1)
# calculate the angles between them
if len(pred.get_shape().as_list()) == 3:
prod = tf.matmul(tf.reshape(pred, [self.batch_size, -1, 1, 2]),
tf.reshape(label, [self.batch_size, -1, 2, 1]))
prod = tf.clip_by_value(prod, -0.999999999, 0.999999999)
prod = tf.acos(tf.reshape(prod, [self.batch_size, -1, 1]))
else:
prod = tf.matmul(tf.reshape(pred, [self.batch_size, 1, 2]),
tf.reshape(label, [self.batch_size, 2, 1]))
prod = tf.clip_by_value(prod, -0.999999999, 0.999999999)
prod = tf.acos(tf.reshape(prod, [self.batch_size, 1]))
# mask out invalid values and non-contact cases
greater = tf.logical_and(tf.greater(pred_norm, 1e-6),
tf.greater(label_norm, 1e-6))
ang_mask = tf.logical_and(greater, tf.math.is_finite(prod))
ang = tf.where(ang_mask, tf.abs(prod), tf.zeros_like(prod))
# correct values over 180 deg.
ang = tf.where(tf.greater(tf.abs(ang), np.pi),
2*np.pi - tf.abs(ang), tf.abs(ang))*180./np.pi
return ang
def contact_loss(self, pred, label, name=""):
# calculate the error
label = tf.reshape(label, [self.batch_size, -1, 1])
pred = tf.reshape(pred, [self.batch_size, -1, 1])
# limit pred to [0..1]
pred = tf.clip_by_value(pred, 0, 1.)
# slightly downweight the loss for in-contact-cases to reduce the
# amount of false-positives
loss = (1 - label) * -tf.math.log(tf.maximum(1 - pred, 1e-7)) + \
label * -tf.math.log(tf.maximum(pred, 1e-7))
ce = (1 - label) * -tf.math.log(tf.maximum(1 - pred, 1e-7)) + \
label * -tf.math.log(tf.maximum(pred, 1e-7))
return loss, ce
###########################################################################
# keeping the state correct
###########################################################################
def correct_state(self, state, diff=True):
"""
Correct the state to make sure theta is in the right interval
Args:
state: The current state
Returns:
state: The corrected state
"""
shape = state.get_shape().as_list()
if len(shape) > 2:
state = tf.reshape(state, [-1, self.dim_x])
sc = self.scale
if diff:
state = \
tf.concat([state[:, :2],
self._adapt_orientation(state[:, 2:3], self.ob, sc),
state[:, 3:]], axis=-1)
else:
state = \
tf.concat([state[:, :2],
self._adapt_orientation(state[:, 2:3], self.ob, sc),
self._adapt_fr(state[:, 3:4]),
self._adapt_m(state[:, 4:5]),
state[:, 5:7],
self._adapt_n(state[:, 7:9], state[:, 5:7],
state[:, 0:2]),
self._adapt_s(state[:, 9:])], axis=-1)
if len(shape) > 2:
state = tf.reshape(state, shape[:-1] + [self.dim_x])
return state
def correct_observation_diff(self, diff):
"""
Correct a difference in observations to account for angle intervals
Args:
state: The difference
Returns:
state: The corrected difference
"""
shape = diff.get_shape().as_list()
if len(shape) > 2:
diff = tf.reshape(diff, [-1, self.dim_z])
sc = 1 * self.scale
diff = tf.concat([diff[:, :2],
self._adapt_orientation(diff[:, 2:3], self.ob, sc),
diff[:, 3:]], axis=-1)
if len(shape) > 2:
diff = tf.reshape(diff, shape[:-1] + [self.dim_z])
return diff
def weighted_state_mean_with_angles(self, points, weights):
ps = tf.concat([points[:, :, :2],
tf.sin(points[:, :, 2:3]*self.scale*np.pi/180.0),
tf.cos(points[:, :, 2:3]*self.scale*np.pi/180.0),
points[:, :, 3:]], axis=-1)
mult = tf.multiply(ps, weights)
mean = tf.reduce_sum(mult, axis=1)
ang1 = tf.math.atan2(mean[:, 2:3], mean[:, 3:4])*180.0/np.pi
out = tf.concat([mean[:, :2], ang1/self.scale, mean[:, 4:]], axis=-1)
return out
def weighted_observation_mean_with_angles(self, points, weights, axis=1):
ps = tf.concat([points[:, :, :2],
tf.sin(points[:, :, 2:3]*self.scale*np.pi/180.0),
tf.cos(points[:, :, 2:3]*self.scale*np.pi/180.0),
points[:, :, 3:]], axis=-1)
mult = tf.multiply(ps, weights)
mean = tf.reduce_sum(mult, axis=axis)
ang = tf.math.atan2(mean[:, 2:3], mean[:, 3:4])*180.0/np.pi
out = tf.concat([mean[:, :2], ang/self.scale, mean[:, 4:]], axis=-1)
return out
def _adapt_fr(self, fr):
# prevent l from getting too small or too big
fr = tf.clip_by_value(fr, 0.1/self.scale, 5e3/self.scale)
return fr
def _adapt_m(self, m):
# prevent m from getting negative or too large
m = tf.clip_by_value(m, 0.1/self.scale, 90./self.scale)
return m
def _adapt_s(self, s):
# keep the contact indicator between 0 and 1
s = tf.clip_by_value(s, 0., 1.)
return s
def _adapt_n(self, n, r, o):
# normalize -- not good at all!
# n_norm = tf.linalg.norm(n, axis=-1, keepdims=True)
# n = tf.where(tf.greater(tf.squeeze(n_norm), 1e-6), n/n_norm, n)
# # make sure the normal points towards the object
# dir_center = o[:, :2] - r[:, :2]
# dir_center_norm = tf.linalg.norm(dir_center, axis=-1, keepdims=True)
# dir_center = tf.where(tf.greater(tf.squeeze(dir_center_norm), 0.),
# dir_center/dir_center_norm, dir_center)
# prod = tf.matmul(tf.reshape(dir_center, [bs, 1, 2]),
# tf.reshape(n, [bs, 2, 1]))
# ang = tf.acos(tf.reshape(prod, [bs]))
# # correct values over 180 deg.
# ang = tf.where(tf.greater(tf.abs(ang), np.pi),
# 2*np.pi - tf.abs(ang), tf.abs(ang))*180./np.pi
# # if the angle is greater than 90 degree, we need to flip the
# # normal
# n = tf.where(tf.greater(ang, np.pi/2.), n, -1 * n)
return n
def _adapt_orientation(self, rot, ob, sc):
rot = rot * sc
# in most cases, the maximum rotation range is 180deg, but some have
# more or fewer symmetries
# we first apply a modulo operation to make sure that no value is
# larger than the maximum rotation range. Then we have to deal with the
# periodicity of the interval
rot_max = tf.ones_like(rot) * 180
ob = tf.squeeze(ob)
ob = tf.strings.regex_replace(ob, "\000", "")
ob = tf.strings.regex_replace(ob, "\00", "")
if len(ob.get_shape()) < 1:
rot_max = \
tf.case({tf.equal(ob, 'ellip1'): lambda: tf.zeros_like(rot),
tf.equal(ob, 'rect1'): lambda: tf.ones_like(rot)*90.,
tf.equal(ob, 'tri1'): lambda: tf.ones_like(rot)*360.,
tf.equal(ob, 'tri2'): lambda: tf.ones_like(rot)*360.,
tf.equal(ob, 'tri3'): lambda: tf.ones_like(rot)*360.,
tf.equal(ob, 'hex'): lambda: tf.ones_like(rot)*60.},
default=lambda: rot_max, exclusive=True)
rot_new = \
tf.cond(tf.equal(ob, 'ellip1'), lambda: tf.zeros_like(rot),
lambda: tf.math.mod(tf.abs(rot), rot_max)*tf.sign(rot))
# now make sure that the measured rotation is the smallest
# posslibel value in the interval - rot_max/2, rot_max/2
rot_add = tf.where(tf.greater(rot_new, rot_max/2.),
rot_new - rot_max, rot_new)
rot_add = tf.where(tf.less(rot_add, -rot_max/2.),
rot_add + rot_max, rot_add)
else:
if ob.get_shape()[0].value < rot.get_shape()[0].value:
mult = rot.get_shape()[0].value // ob.get_shape()[0].value
ob = tf.reshape(ob, [-1, 1])
ob = tf.reshape(tf.tile(ob, [1, mult]), [-1])
rot_max = tf.where(tf.equal(ob, 'ellip1'), tf.zeros_like(rot),
rot_max)
rot_max = tf.where(tf.equal(ob, 'rect1'), tf.ones_like(rot)*90,
rot_max)
rot_max = tf.where(tf.equal(ob, 'tri1'), tf.ones_like(rot)*360,
rot_max)
rot_max = tf.where(tf.equal(ob, 'tri2'), tf.ones_like(rot)*360,
rot_max)
rot_max = tf.where(tf.equal(ob, 'tri3'), tf.ones_like(rot)*360,
rot_max)
rot_max = tf.where(tf.equal(ob, 'hex'), tf.ones_like(rot)*60,
rot_max)
rot_new = tf.where(tf.equal(ob, 'ellip1'), tf.zeros_like(rot),
tf.math.mod(tf.abs(rot), rot_max)*tf.sign(rot))
# now make sure that the measured rotation is the smallest
# posslibel value in the interval - rot_max/2, rot_max/2
rot_add = tf.where(tf.greater(rot_new, rot_max/2.),
rot_new - rot_max, rot_new)
rot_add = tf.where(tf.less(rot_add, -rot_max/2.),
rot_add + rot_max, rot_add)
rot_add /= sc
return rot_add
###########################################################################
# data loading
###########################################################################
def tf_record_map(self, path, name, dataset, data_mode, train_mode,
num_threads=5):
"""
Defines how to read in the data from a tf record
"""
keys = ['pos', 'object', 'contact_point', 'normal', 'contact',
'tip', 'friction', 'coord', 'image', 'material', 'pix_tip',
'pix_pos', 'segmentation']
record_meta = tfr.RecordMeta.load(path, name + '_' + data_mode + '_')
if train_mode == 'filter':
dataset = dataset.map(
lambda x: self._parse_function(x, keys, record_meta,
data_mode),
num_parallel_calls=num_threads)
elif train_mode == 'pretrain_obs':
dataset = dataset.map(
lambda x: self._parse_function_obs(x, keys, record_meta,
data_mode),
num_parallel_calls=num_threads)
elif train_mode == 'pretrain_process':
dataset = dataset.map(
lambda x: self._parse_function_process(x, keys, record_meta,
data_mode),
num_parallel_calls=num_threads)
else:
self.log.error('unknown training mode: ' + train_mode)
dataset = \
dataset.flat_map(lambda x, y:
tf.data.Dataset.from_tensor_slices((x, y)))
return dataset
def _parse_example(self, example_proto, keys, record_meta):
features = {}
for key in keys:
record_meta.add_tf_feature(key, features)
parsed_features = tf.io.parse_single_example(example_proto,
features)
for key in keys:
features[key] = record_meta.reshape_and_cast(key,
parsed_features)
return features
def _parse_function_obs(self, example_proto, keys, record_meta, data_mode):
features = self._parse_example(example_proto, keys, record_meta)
pose = features['pos']
ori = self._adapt_orientation(pose[:, 3:]*(180.0/np.pi),
features['object'], 1)
pose = tf.concat([pose[:, 0:1]*1000/self.scale,
pose[:, 1:2]*1000/self.scale,
ori/self.scale], axis=1)
n = tf.squeeze(features['normal'])/self.scale
con = tf.cast(features['contact'], tf.float32)
con = tf.reshape(con, [-1, 1])/self.scale
tips = features['tip']
cp = features['contact_point'][:, :2]*1000
con_norm = tf.linalg.norm(cp, axis=-1)
cp = tf.where(tf.less(con_norm, 1e-6),
tips[:, :2]*1000, cp)/self.scale
pix_tip = features['pix_tip']
im = features['image']
coord = features['coord']
mask = features['segmentation']
mask = tf.cast(tf.where(tf.greater(mask, 2.5), tf.ones_like(mask),
tf.zeros_like(mask)), tf.float32)
vis = tf.reduce_sum(mask, axis=[1, 2, 3])
seq_len = im.get_shape()[0].value
im = tf.concat([im, coord], axis=-1)
pix = features['pix_pos'][:, :2]
ob = tf.reshape(features['object'], [1])
mat = tf.reshape(features['material'], [1])
# sanity check for reprojection betwen pixels and 3d
# # load a plane image for reprojecting
# path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# path = os.path.join(path, 'resources', 'plane_image.npy')
# print('loading plane image from: ', path)
# plane_depth = tf.convert_to_tensor(np.load(path))[none, :, :, none]
# pix_pos = features['pix_pos'][1:2]
# pos_3d = features['pos'][1:2, :3]
# projected1 = utils._to_3d(pix_pos, im[1:2, :, :, -1:])
# projected2 = utils._to_3d(pix_pos, plane_depth)
# pix_pro = utils._to_2d(pos_3d)
# cp = tf.print(cp, [pix_pos, pix_pro],
# summarize=1000, message='pix, pix_pro\n')
# cp = tf.print(cp, [pos_3d, projected1, projected2],
# summarize=1000, message='3d, pro_d, pro_plane \n')
# we use several steps of the sequence
if data_mode == 'train':
start_inds = np.random.randint(2, seq_len-2, 5)
self.train_multiplier = len(start_inds)
else:
# use every eighth data point
start_inds = np.arange(2, seq_len-2, 8)
num = len(start_inds)
# prepare the lists of output tensors
viss = []
ims = []
start_ims = []
start_ts = []
tes = []
labels = []
good_zs = []
bad_zs = []
pixs = []
pixts = []
pixte = []
start_pixs = []
segs = []
start_segs = []
for si in start_inds:
start_ts += [tips[1]]
start_ims += [im[1]]
start_pixs += [pix[1]]
start_segs += [mask[1]]
viss += [vis[si]]
segs += [mask[si]]
ims += [im[si]]
pixs += [pix[si]]
pixts += [pix_tip[si]]
pixte += [pix_tip[si+1]]
tes += [tips[si]]
relative_rot = \
self._adapt_orientation(pose[si, 2:3] - pose[1, 2:3], ob,
self.scale)
label = tf.concat([pose[si, :2], relative_rot, cp[si], n[si],
con[si]], axis=0)
labels += [label]
good_noise = np.random.normal(loc=0, scale=1e-1, size=(24, 8))
good_noise[0, :] = 0
bad_noise = np.random.normal(loc=10, scale=5, size=(24, 8))
bad_noise[12:] = np.random.normal(loc=-10, scale=5,
size=(12, 8))
# downscale noise for normal and contact
good_noise[:, 5:] /= 10
bad_noise[:, 5:] /= 10
# upscale for pos and or
bad_noise[:, :2] *= 10
bad_noise[:, 2:3] *= 2
good_noise[:, :2] *= 10
good_noise[:, 2:3] *= 2
# adapt to scaling
bad_noise /= self.scale
good_noise /= self.scale
bad_zs += [tf.tile(label[None, :], [24, 1]) + bad_noise]
good_zs += [tf.tile(label[None, :], [24, 1]) + good_noise]
ims = tf.stack(ims)
start_ims = tf.stack(start_ims)
start_ts = tf.stack(start_ts)
tes = tf.stack(tes)
pixts = tf.stack(pixts)
pixte = tf.stack(pixte)
ob = tf.tile(ob, [num])
mat = tf.tile(mat, [num])
values = [(ims, tes, pixts, pixte), tf.stack(labels),
tf.stack(good_zs),
tf.stack(bad_zs), (start_ims, start_ts), (ob, mat)]
labels = [tf.stack(labels), tf.stack(pixs), tf.stack(start_pixs),
tf.stack(segs), tf.stack(start_segs), tf.stack(viss)]
return tuple(values), tuple(labels)
def _parse_function_process(self, example_proto, keys, record_meta,
data_mode):
features = self._parse_example(example_proto, keys, record_meta)
pose = features['pos']
ori = self._adapt_orientation(pose[:, 3:]*180./np.pi,
features['object'], 1)
pose = tf.concat([pose[:, 0:1]*1000, pose[:, 1:2]*1000, ori],
axis=1)/self.scale
n = tf.squeeze(features['normal'])/self.scale
con = tf.cast(features['contact'], tf.float32)
con = tf.reshape(con, [-1, 1])/self.scale
tips = features['tip']
cp = features['contact_point'][:, :2]
con_norm = tf.linalg.norm(cp, axis=-1)
cp = tf.where(tf.less(con_norm, 1e-6),
tips[:, :2], cp)*1000/self.scale
friction = \
tf.square(tf.reshape(features['friction'], [1]) * 1000.)
friction = friction/(100*self.scale)
mu = tf.atan(tf.ones([1], dtype=tf.float32) * 0.25)*180./np.pi
mu = mu/self.scale
ob = tf.reshape(features['object'], [1])
mat = tf.reshape(features['material'], [1])
seq_len = features['pos'].get_shape()[0].value
# calculate the actions - scale them by the same amount as the
# position
t_end = tips[1:, :2]
t_start = tips[:-1, :2]
u = (t_end - t_start) * 1000./self.scale
# we use several steps of the sequence
if data_mode == 'train':
start_inds = np.random.randint(2, seq_len-1, 10)
self.train_multiplier = len(start_inds)
else:
# use every eigth data point
start_inds = np.arange(2, seq_len-1, 8)
num = len(start_inds)
# prepare the lists of output tensors
start_state = []
us = []
labels = []
for si in start_inds:
p_start = pose[si-1][:2]
s_start = tf.concat([p_start, tf.zeros([1]), friction, mu,
cp[si-1], n[si-1], con[si-1]], axis=0)
start_state += [s_start]
us += [u[si-1]]
relative_rot = pose[si, 2:3] - pose[si-1, 2:3]
relative_rot = \
self._adapt_orientation(relative_rot, ob, self.scale)
label = tf.concat([pose[si, :2], relative_rot, friction, mu,
cp[si], n[si], con[si]], axis=0)
labels += [label]
start_state = tf.stack(start_state)
us = tf.stack(us)
ob = tf.tile(ob, [num])
mat = tf.tile(mat, [num])
values = [start_state, us, (ob, mat)]
labels = [labels, start_state]
return tuple(values), tuple(labels)
def _parse_function(self, example_proto, keys, record_meta, data_mode):
features = self._parse_example(example_proto, keys, record_meta)
pose = features['pos']
ori = self._adapt_orientation(pose[:, 3:]*180./np.pi,
features['object'], 1)
pose = tf.concat([pose[:, 0:1]*1000, pose[:, 1:2]*1000, ori],
axis=1)/self.scale
n = tf.squeeze(features['normal'])/self.scale
con = tf.cast(features['contact'], tf.float32)
con = tf.reshape(con, [-1, 1])/self.scale
tips = features['tip']
cp = features['contact_point'][:, :2]
con_norm = tf.linalg.norm(cp, axis=-1)
cp = tf.where(tf.less(con_norm, 1e-6),
tips[:, :2], cp)*1000/self.scale
friction = \
tf.square(tf.reshape(features['friction'], [1]) * 1000.)
friction = friction/(100*self.scale)
mu = tf.atan(tf.ones([1], dtype=tf.float32) * 0.25)*180./np.pi
mu = mu/self.scale
# calculate the actions - scale them by the same amount as the
# position
t_end = tips[1:, :2]
t_start = tips[:-1, :2]
u = (t_end - t_start) * 1000./self.scale
im = features['image']
coord = features['coord']
mask = features['segmentation']
mask = tf.cast(tf.where(tf.greater(mask, 2.5), tf.ones_like(mask),
tf.zeros_like(mask)), tf.float32)
vis = tf.reduce_sum(mask, axis=[1, 2, 3])
im = tf.concat([im, coord], axis=-1)
pix_tip = features['pix_tip']
ob = tf.reshape(features['object'], [1])
mat = tf.reshape(features['material'], [1])
seq_len = features['pos'].get_shape()[0].value
# we use several steps of the sequence
if data_mode == 'train':
num = 1
start_inds = np.random.randint(1, seq_len-self.sl-2, num)
elif data_mode == 'val':
num = 1
# we use several sub-sequences of the validation sequence
start_inds = np.arange(1, seq_len-self.sl-2, (self.sl+1)//2)
start_inds = start_inds[:num]
else:
if self.sl > seq_len//2:
start_inds = [1]
else:
start_inds = np.arange(1, seq_len-self.sl-2, 20)
num = len(start_inds)
self.test_multiplier = num
# prepare the lists of output tensors
ims = []
start_ims = []
start_ts = []
start_state = []
us = []
tes = []
pixts = []
pixte = []
labels = []
mv_trs = []
mv_rots = []
viss = []
for si in start_inds:
p_start = pose[si][:2]
s_start = tf.concat([p_start, tf.zeros([1]), friction, mu,
cp[si], n[si], con[si]], axis=0)
start_state += [s_start]
start_ts += [tips[si]]
start_ims += [im[si]]
start = si + 1
end = si + 1 + self.sl
ims += [im[start:end]]
us += [u[start:end]]
tes += [tips[start:end]]
pixts += [pix_tip[start:end]]
pixte += [pix_tip[start+1:end+1]]
relative_rot = pose[start:end, 2:3] - \
tf.tile(pose[si:si+1, 2:3], [self.sl, 1])
relative_rot = \
self._adapt_orientation(relative_rot, ob, self.scale)
label = tf.concat([pose[start:end, :2], relative_rot,
tf.tile(friction[None, :], [self.sl, 1]),
tf.tile(mu[None, :], [self.sl, 1]),
cp[start:end], n[start:end],
con[start:end]], axis=-1)
labels += [label]
viss += [vis[start:end]]
mv = pose[start:end] - pose[si:end-1]
mv_trs += [tf.reduce_sum(tf.norm(mv[:, :2], axis=-1))]
mvr = self._adapt_orientation(mv[:, 2], ob, self.scale)
mv_rots += [tf.reduce_sum(tf.abs(mvr))]
ims = tf.stack(ims)
start_ims = tf.stack(start_ims)
start_ts = tf.stack(start_ts)
start_state = tf.stack(start_state)
us = tf.stack(us)
tes = tf.stack(tes)
pixts = tf.stack(pixts)
pixte = tf.stack(pixte)
mv_trs = tf.stack(mv_trs)
mv_rots = tf.stack(mv_rots)
viss = tf.stack(viss)
ob = tf.tile(ob, [num])
mat = tf.tile(mat, [num])
values = [(ims, tes, pixts, pixte), us, (start_ims, start_ts),
start_state, (ob, mat)]
labels = [labels, mv_trs, mv_rots, viss]
return tuple(values), tuple(labels)
######################################
# Evaluation
######################################
def save_log(self, log_dict, out_dir, step, num, mode):
if mode == 'filter':
keys = ['noise_num', 'likelihood', 'likelihood_std', 'dist_tr',
'dist_tr_std', 'dist_rot', 'dist_rot_std', 'corr_r_vis',
'corr_r_cont', 'corr_q_cont',
'm_tr', 'm_tr_std', 'deg_rot', 'deg_rot_std', 'dist',
'dist_std', 'dist_obs', 'dist_obs_std']
keys += self.x_names + list(map(lambda x: x + '_std',
self.x_names))
keys_corr = ['noise_num']
keys_corr += list(map(lambda x: 'cq_cont_' + x, self.x_names))
keys_corr += list(map(lambda x: 'cr_cont_' + x, self.z_names))
keys_corr += list(map(lambda x: 'cr_vis_' + x, self.z_names))
log_file = open(os.path.join(out_dir, str(step) + '_res.csv'), 'a')
log = csv.DictWriter(log_file, keys)
if num == 0:
log.writeheader()
log_file_corr = open(os.path.join(out_dir,
str(step) + '_corr.csv'), 'a')
log_corr = csv.DictWriter(log_file_corr, keys_corr)
if num == 0:
log_corr.writeheader()
row = {}
for k, v in log_dict.items():
if k in keys and type(v[0]) not in [str, bool, np.str,
np.bool]:
row[k] = np.mean(v)
row[k + '_std'] = np.std(v)
# corr_r cannot be properly evaluated per-example when batch size
# is 1, so we have to evaluate it here before outputting it
row_corr = {}
r_pred = log_dict['r_pred'].reshape(-1, self.dim_z).T
vis = log_dict['vis'].reshape(-1, 1).T
cont = log_dict['cont'].reshape(-1, 1).T
corr_vis = []
corr_cont = []
for i, n in enumerate(self.z_names):
r_c = np.corrcoef(r_pred[i:i+1], cont)[0, 1]
r_v = np.corrcoef(r_pred[i:i+1], vis)[0, 1]
corr_vis += [r_v]
corr_cont += [r_c]
row_corr['cr_cont_' + n] = r_c
row_corr['cr_vis_' + n] = r_v
row['corr_r_vis'] = np.mean(corr_vis)
row['corr_r_cont'] = np.mean(corr_cont)
q_pred = log_dict['q_pred'].reshape(-1, self.dim_x).T
corr_cont = []
for i, n in enumerate(self.x_names):
q_c = np.corrcoef(q_pred[i:i+1], cont)[0, 1]
corr_cont += [q_c]
row_corr['cq_cont_' + n] = q_c
row['corr_q_cont'] = np.mean(corr_cont)
row['noise_num'] = num
log.writerow(row)
log_file.close()
row_corr['noise_num'] = num
log_corr.writerow(row_corr)
log_file_corr.close()
else:
row = {}
for k, v in log_dict.items():
if type(v[0]) not in [str, bool, np.str, np.bool]:
row[k] = np.mean(v)
row[k + '_std'] = np.std(v)
if mode == 'pretrain_obs':
# corr_r cannot be properly evaluated per-example when batch
# size is 1, so we have to evaluate it here
r_het_diag = log_dict['r_het_diag'].reshape(-1, self.dim_z).T
r_het_tri = log_dict['r_het_tri'].reshape(-1, self.dim_z).T
vis = log_dict['vis'].reshape(-1, 1).T
corr_diags = []
corr_fulls = []
for i in range(self.dim_z):
corr_diags += [np.corrcoef(r_het_diag[i:i+1], vis)[0, 1]]
corr_fulls += [np.corrcoef(r_het_tri[i:i+1], vis)[0, 1]]
row['corr_r_het_diag'] = np.mean(corr_diags)
row['corr_r_het_tri'] = np.mean(corr_fulls)
for i, n in enumerate(self.z_names):
row['corr_' + n + '_diag'] = corr_diags[i]
row['corr_' + n + '_full'] = corr_fulls[i]
log_file = open(os.path.join(out_dir, str(step) + '_res.csv'),
'w')
log = csv.DictWriter(log_file, sorted(row.keys()))
log.writeheader()
log.writerow(row)
log_file.close()
return
def _eigsorted(self, cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
def plot_tracking(self, seq_pred, cov_pred, z, seq, q_pred, r_pred, vis,
out_dir, num, diffs, likes, actions, ob, init,
full_out=False):
pos_pred = np.squeeze(seq_pred[:, :2])
or_pred = np.squeeze(seq_pred[:, 2])
l_pred = np.squeeze(seq_pred[:, 3])
mu_pred = np.squeeze(seq_pred[:, 4])
cp_pred = np.squeeze(seq_pred[:, 5:7])
n_pred = np.squeeze(seq_pred[:, 7:9])
s_pred = np.squeeze(seq_pred[:, 9])
vis = vis / np.max(vis)
if z is not None:
pos_obs = np.squeeze(z[:, :2])
or_obs = np.squeeze(z[:, 2])
r_obs = np.squeeze(z[:, 3:5])
n_obs = np.squeeze(z[:, 5:7])
s_obs = np.squeeze(z[:, 7])
if cov_pred is not None:
cov_pred = cov_pred.reshape(self.sl, self.dim_x, self.dim_x)
cx = np.sqrt(np.squeeze(cov_pred[:, 0, 0]))
cy = np.sqrt(np.squeeze(cov_pred[:, 1, 1]))
ct = np.sqrt(np.squeeze(cov_pred[:, 2, 2]))
cl = np.sqrt(np.squeeze(cov_pred[:, 3, 3]))
cmu = np.sqrt(np.squeeze(cov_pred[:, 4, 4]))
crx = np.sqrt(np.squeeze(cov_pred[:, 5, 5]))
cry = np.sqrt(np.squeeze(cov_pred[:, 6, 6]))
cnx = np.sqrt(np.squeeze(cov_pred[:, 7, 7]))
cny = np.sqrt(np.squeeze(cov_pred[:, 8, 8]))
cs = np.sqrt(np.squeeze(cov_pred[:, 9, 9]))
q_pred = q_pred.reshape(self.sl, self.dim_x, self.dim_x)
r_pred = r_pred.reshape(self.sl, self.dim_z, self.dim_z)
qx = np.sqrt(np.squeeze(q_pred[:, 0, 0]))
qy = np.sqrt(np.squeeze(q_pred[:, 1, 1]))
qt = np.sqrt(np.squeeze(q_pred[:, 2, 2]))
ql = np.sqrt(np.squeeze(q_pred[:, 3, 3]))
qmu = np.sqrt(np.squeeze(q_pred[:, 4, 4]))
qrx = np.sqrt(np.squeeze(q_pred[:, 5, 5]))
qry = np.sqrt(np.squeeze(q_pred[:, 6, 6]))
qnx = np.sqrt(np.squeeze(q_pred[:, 7, 7]))
qny = np.sqrt(np.squeeze(q_pred[:, 8, 8]))
qs = np.sqrt(np.squeeze(q_pred[:, 9, 9]))
rx = np.sqrt(np.squeeze(r_pred[:, 0, 0]))
ry = np.sqrt(np.squeeze(r_pred[:, 1, 1]))
rt = np.sqrt(np.squeeze(r_pred[:, 2, 2]))
rrx = np.sqrt(np.squeeze(r_pred[:, 3, 3]))
rry = np.sqrt(np.squeeze(r_pred[:, 4, 4]))
rnx = np.sqrt(np.squeeze(r_pred[:, 5, 5]))
rny = np.sqrt(np.squeeze(r_pred[:, 6, 6]))
rs = np.sqrt(np.squeeze(r_pred[:, 7, 7]))
fig, ax = plt.subplots(2, 3, figsize=[20, 15])
ts = np.arange(pos_pred.shape[0])
ax[0, 0].plot(ts, pos_pred[:, 0], '-r', label='x predicted')
ax[0, 0].plot(ts, seq[:, 0], '--g', label='x true')
ax[0, 0].plot(ts, pos_obs[:, 0], 'kx', label='x observed')
ax[0, 0].plot(ts, pos_pred[:, 1], '-m', label='y predicted')
ax[0, 0].plot(ts, seq[:, 1], '--c', label='y true')
ax[0, 0].plot(ts, pos_obs[:, 1], 'ko', label='y observed')
ax[0, 0].set_title('position')
ax[0, 0].legend()
ax[0, 1].plot(ts, or_pred, '-r', label='predicted')
ax[0, 1].plot(ts, seq[:, 2], '--g', label='true')
ax[0, 1].plot(ts, or_obs, 'kx', label='observed')
ax[0, 1].set_title('heading')
ax[0, 1].legend()
ax[0, 2].plot(ts, cp_pred[:, 0], '-r', label='x predicted')
ax[0, 2].plot(ts, seq[:, 5], '--g', label='x true')
ax[0, 2].plot(ts, r_obs[:, 0], 'kx', label='x observed')
ax[0, 2].plot(ts, cp_pred[:, 1], '-m', label='y predicted')
ax[0, 2].plot(ts, seq[:, 6], '--c', label='y true')
ax[0, 2].plot(ts, r_obs[:, 1], 'ko', label='y observed')
ax[0, 2].set_title('contact point')
ax[0, 2].legend()
ax[1, 2].plot(ts, n_pred[:, 0], '-r', label='x predicted')
ax[1, 2].plot(ts, seq[:, 7], '--g', label='x true')
ax[1, 2].plot(ts, n_obs[:, 0], 'kx', label='x observed')
ax[1, 2].plot(ts, n_pred[:, 1], '-m', label='y predicted')
ax[1, 2].plot(ts, seq[:, 8], '--c', label='y true')
ax[1, 2].plot(ts, n_obs[:, 1], 'ko', label='y observed')
ax[1, 2].set_title('normal')
ax[1, 2].legend()
ax[1, 0].plot(ts, mu_pred, '-r', label='mu predicted')
ax[1, 0].plot(ts, seq[:, 4], '--g', label='mu true')
ax[1, 0].plot(ts, l_pred, '-m', label='l predicted')
ax[1, 0].plot(ts, seq[:, 3], '--c', label='l true')
ax[1, 0].set_title('friction')
ax[1, 0].legend()
ax[1, 1].plot(ts, s_pred, '-r', label='predicted')
ax[1, 1].plot(ts, seq[:, 9], '--g', label='true')
ax[1, 1].plot(ts, s_obs, 'kx', label='observed')
ax[1, 1].plot(ts, vis, '-b', label='visibility')
ax[1, 1].set_title('contact')
ax[1, 1].legend()
if cov_pred is not None:
ax[0, 0].fill_between(ts, pos_pred[:, 0] - cx,
pos_pred[:, 0] + cx, color="lightblue")
ax[0, 0].fill_between(ts, pos_pred[:, 1] - cy,
pos_pred[:, 1] + cy, color="lightblue")
ax[0, 1].fill_between(ts, (or_pred - ct), (or_pred + ct),
color="lightblue")
ax[0, 2].fill_between(ts, cp_pred[:, 0] - crx,
cp_pred[:, 0] + crx, color="lightblue")
ax[0, 2].fill_between(ts, cp_pred[:, 1] - cry,
cp_pred[:, 1] + cry, color="lightblue")
ax[1, 0].fill_between(ts, (l_pred - cl),
(l_pred + cl), color="lightblue")
ax[1, 0].fill_between(ts, mu_pred - cmu,
mu_pred + cmu, color="lightblue")
ax[1, 1].fill_between(ts, (s_pred - cs), (s_pred + cs),
color="lightblue")
ax[1, 2].fill_between(ts, n_pred[:, 0] - cnx,
n_pred[:, 0] + cnx, color="lightblue")
ax[1, 2].fill_between(ts, n_pred[:, 1] - cny,
n_pred[:, 1] + cny, color="lightblue")
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.85,
wspace=0.1, hspace=0.3)
fig.savefig(os.path.join(out_dir, str(num) + "_tracking"),
bbox_inches="tight")
# plot the noise estimates
fig, ax = plt.subplots(2, 3, figsize=[20, 15])
ts = np.arange(pos_pred.shape[0])
sc = np.max([np.max(qx), np.max(qy), np.max(rx), np.max(ry)])
sc = max(1., sc)
ax[0, 0].plot(ts, qx, '-r', label='qx')
ax[0, 0].plot(ts, rx, '--g', label='rx')
ax[0, 0].plot(ts, qy, '-m', label='qy')
ax[0, 0].plot(ts, ry, '--c', label='ry')
ax[0, 0].plot(ts, vis*sc, '-b', label='visibility')
ax[0, 0].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[0, 0].set_title('position')
ax[0, 0].legend()
sc = np.max([np.max(qt), np.max(rt)])
sc = max(1., sc)
ax[0, 1].plot(ts, qt, '-r', label='q')
ax[0, 1].plot(ts, rt, '--g', label='r')
ax[0, 1].plot(ts, vis*sc, '-b', label='visibility')
ax[0, 1].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[0, 1].set_title('heading')
ax[0, 1].legend()
sc = np.max([np.max(qrx), np.max(qry), np.max(rrx), np.max(rry)])
sc = max(1., sc)
ax[0, 2].plot(ts, qrx, '-r', label='qx')
ax[0, 2].plot(ts, rrx, '--g', label='rx')
ax[0, 2].plot(ts, qry, '-m', label='qy')
ax[0, 2].plot(ts, rry, '--c', label='ry')
ax[0, 2].plot(ts, vis*sc, '-b', label='visibility')
ax[0, 2].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[0, 2].set_title('contact point')
ax[0, 2].legend()
sc = np.max([np.max(qnx), np.max(qny), np.max(rnx), np.max(rny)])
sc = max(1., sc)
ax[1, 2].plot(ts, qnx, '-r', label='qx')
ax[1, 2].plot(ts, rnx, '--g', label='rx')
ax[1, 2].plot(ts, qny, '-m', label='qy')
ax[1, 2].plot(ts, rny, '--c', label='ry')
ax[1, 2].plot(ts, vis*sc, '-b', label='visibility')
ax[1, 2].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[1, 2].set_title('normal')
ax[1, 2].legend()
sc = np.max([np.max(qmu), np.max(ql)])
sc = max(1., sc)
ax[1, 0].plot(ts, qmu, '-r', label='qmu')
ax[1, 0].plot(ts, ql, '-m', label='ql')
ax[1, 0].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[1, 0].set_title('friction')
ax[1, 0].legend()
sc = np.max([np.max(qs), np.max(rs)])
sc = max(1., sc)
ax[1, 1].plot(ts, qs, '-r', label='q')
ax[1, 1].plot(ts, rs, '--g', label='r')
ax[1, 1].plot(ts, vis*sc, '-b', label='visibility')
ax[1, 1].plot(ts, seq[:, 9]*sc, '-k', label='contact')
ax[1, 1].set_title('contact')
ax[1, 1].legend()
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.95, top=0.85,
wspace=0.1, hspace=0.3)
fig.savefig(os.path.join(out_dir, str(num) + "_noise"),
bbox_inches="tight")
log_file = open(os.path.join(out_dir, str(num) + '_seq.csv'), 'w')
keys = ['t', 'x', 'y', 'or', 'l', 'mu', 'rx', 'ry', 'nx', 'ny', 's',
'x_p', 'y_p', 'or_p', 'l_p', 'mu_p', 'rx_p', 'ry_p', 'nx_p',
'ny_p', 's_p']
if cov_pred is not None and z is not None:
keys += ['x_c', 'y_c', 'or_c', 'l_c', 'mu_c', 'rx_c', 'ry_c',
'nx_c', 'ny_c', 's_c', 'x_ob', 'y_ob', 'or_ob', 'rx_ob',
'ry_ob', 'nx_ob', 'ny_ob', 's_ob']
log = csv.DictWriter(log_file, keys)
log.writeheader()
for t in ts:
row = {'x': seq[t, 0], 'y': seq[t, 1], 'or': seq[t, 2],
'l': seq[t, 3], 'mu': seq[t, 4], 'rx': seq[t, 5],
'ry': seq[t, 6], 'nx': seq[t, 7], 'ny': seq[t, 8],
's': seq[t, 9],
'x_p': seq_pred[t, 0], 'y_p': seq_pred[t, 1],
'or_p': seq_pred[t, 2], 'l_p': seq_pred[t, 3],
'mu_p': seq_pred[t, 4], 'rx_p': seq_pred[t, 5],
'ry_p': seq_pred[t, 6], 'nx_p': seq_pred[t, 7],
'ny_p': seq_pred[t, 8], 's_p': seq_pred[t, 9],
'x_c': cx[t], 'y_c': cy[t], 'or_c': ct[t], 'l_c': cl[t],
'mu_c': cmu[t], 'rx_c': crx[t], 'ry_c': cry[t],
'nx_c': cnx[t], 'ny_c': cny[t], 's_c': cs[t],
'x_ob': pos_obs[t, 0], 'y_ob': pos_obs[t, 1],
'or_ob': or_obs[t], 'rx_ob': r_obs[t, 0],
'ry_ob': r_obs[t, 1], 'nx_ob': n_obs[t, 0],
'ny_ob': n_obs[t, 1], 's_ob': s_obs[t]}
log.writerow(row)
else:
log = csv.DictWriter(log_file, keys)
log.writeheader()
for t in ts:
row = {'x': seq[t, 0], 'y': seq[t, 1], 'or': seq[t, 2],
'l': seq[t, 3], 'mu': seq[t, 4], 'rx': seq[t, 5],
'ry': seq[t, 6], 'nx': seq[t, 7], 'ny': seq[t, 8],
's': seq[t, 9],
'x_p': seq_pred[t, 0], 'y_p': seq_pred[t, 1],
'or_p': seq_pred[t, 2], 'l_p': seq_pred[t, 3],
'mu_p': seq_pred[t, 4], 'rx_p': seq_pred[t, 5],
'ry_p': seq_pred[t, 6], 'nx_p': seq_pred[t, 7],
'ny_p': seq_pred[t, 8], 's_p': seq_pred[t, 9]}
log.writerow(row)
log_file.close()
# save debug output
if full_out:
name = os.path.join(out_dir, str(num))
np.save(name + '_init', init)
np.save(name + '_true', seq)
np.save(name + '_pred', seq_pred)
np.save(name + '_obs', z)
np.save(name + '_c', cov_pred)
np.save(name + '_q', q_pred)
np.save(name + '_r', r_pred)
np.save(name + '_vis', vis)
np.save(name + '_u', actions)
np.save(name + '_ob', ob)
def plot_trajectory(self, particles, weights, seq, cov_pred, seq_pred,
ob, out_dir, num):
if particles is not None:
particles = particles.reshape(self.sl, -1, self.dim_x)
weights = weights.reshape(self.sl, -1)
if cov_pred is not None:
cov_pred = cov_pred.reshape(self.sl, self.dim_x, self.dim_x)
# get the object shape (deal with some encoding problems)
ob = np.asscalar(ob).decode("utf-8").replace('\0', '')
if 'rect' in ob:
# c-----d
# | |
# a-----b
# get the positions of the corner points
if '1' in ob:
points = [[-0.045, -0.045], [0.045, -0.045],
[0.045, 0.045], [-0.045, 0.045]]
if '2' in ob:
points = [[-0.044955, -0.05629], [0.044955, -0.05629],
[0.044955, 0.05629], [-0.044955, 0.05629]]
if '3' in ob:
points = [[-0.067505, -0.04497], [0.067505, -0.04497],
[0.067505, 0.04497], [-0.067505, 0.04497]]
elif 'tri' in ob:
# b ----- a
# |
# |
# c
# get the positions of the points
if '1' in ob:
points = [[0.045, 0.045], [-0.0809, 0.045], [0.045, -0.08087]]
if '2' in ob:
points = [[0.045, 0.045], [-0.106, 0.045], [0.045, -0.08087]]
if '3' in ob:
points = [[0.045, 0.045], [-0.1315, 0.045], [0.045, -0.08061]]
elif 'ellip' in ob:
if '1' in ob:
a = 0.0525
b = 0.0525
elif '2' in ob:
a = 0.0525
b = 0.065445
elif '3' in ob:
a = 0.0525
b = 0.0785
elif 'hex' in ob:
points = []
for i in range(6):
theta = (np.pi/3)*i
points += [[0.06050*np.cos(theta),
0.06050*np.sin(theta)]]
elif 'butter' in ob:
points = self.butter_points[:]
pos_pred = np.squeeze(seq_pred[:, :2])
minx = min(np.min(seq[:, 0]), np.min(pos_pred[:, 0]))
miny = min(np.min(seq[:, 1]), np.min(pos_pred[:, 1]))
maxx = max(np.max(seq[:, 0]), np.max(pos_pred[:, 0]))
maxy = max(np.max(seq[:, 1]), np.max(pos_pred[:, 1]))
fig, ax = plt.subplots(figsize=[15, 15])
ax.set_aspect('equal')
fig2, ax2 = plt.subplots(figsize=[17, 17])
ax2.set_aspect('equal')
for i in range(self.sl - 1):
if cov_pred is not None:
# plot the confidence ellipse
vals, vecs = self._eigsorted(cov_pred[i, :2, :2])
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 4 * np.sqrt(vals)
ellip = Ellipse(xy=pos_pred[i], width=width, height=height,
angle=theta, alpha=0.1)
ax.add_artist(ellip)
if particles is not None:
# sort the particles by weight
p = weights[i].argsort()
par = particles[i][p]
wei = weights[i][p]
# plot the 20 best weighted particles with colour depending on
# weight
if i == 0:
ax.scatter(par[:20, 0], par[:20, 1],
c=wei[:20], cmap='jet', marker='x',
alpha=0.5, label='particles')
else:
ax.scatter(par[:20, 0], par[:20, 1],
c=wei[:20], cmap='jet', marker='x',
alpha=0.5)
# plot a marker for the starting point of the sequence
if i == 0:
ax.plot(seq[i, 0], seq[i, 1], 'cx', markersize=15.,
label='start')
# plot the mean trajectory
ax.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r',
label='predicted')
# plot the real trajectory
ax.plot([seq[i, 0], seq[i+1, 0]], [seq[i, 1], seq[i+1, 1]],
'-g', label='true')
ax2.plot(seq[i, 0], seq[i, 1], 'cx', markersize=15.,
label='start')
# plot the mean trajectory
ax2.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r',
label='predicted')
# plot the real trajectory
ax2.plot([seq[i, 0], seq[i+1, 0]], [seq[i, 1], seq[i+1, 1]],
'-g', label='true')
else:
# plot the mean trajectory
ax.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r')
# plot the real trajectory
ax.plot([seq[i, 0], seq[i+1, 0]],
[seq[i, 1], seq[i+1, 1]], '-g')
# plot the mean trajectory
ax2.plot([pos_pred[i, 0], pos_pred[i+1, 0]],
[pos_pred[i, 1], pos_pred[i+1, 1]], '-r')
# plot the real trajectory
ax2.plot([seq[i, 0], seq[i+1, 0]],
[seq[i, 1], seq[i+1, 1]], '-g')
# plot the mean trajectory
ax.plot(pos_pred[i, 0], pos_pred[i, 1], 'ro')
ax.plot(seq[i, 0], seq[i, 1], 'go')
if i % 5 == 0:
if 'ellip' in ob:
ax2.add_artist(Ellipse((pos_pred[i, 0], pos_pred[i, 1]),
2*a*1000, 2*b*1000, seq_pred[i, 2],
alpha=0.1, facecolor='r',
edgecolor='r'))
ax2.add_artist(Ellipse((seq[i, 0], seq[i, 1]),
2*a*1000, 2*b*1000, seq[i, 2],
alpha=0.1, facecolor='g',
edgecolor='g'))
else:
r_p = np.zeros((2, 2))
r_pred = seq_pred[i, 2]*np.pi/180.
r_p[0, 0] = np.cos(r_pred)
r_p[0, 1] = -np.sin(r_pred)
r_p[1, 0] = np.sin(r_pred)
r_p[1, 1] = np.cos(r_pred)
r_l = np.zeros((2, 2))
r_la = seq[i, 2]*np.pi/180.
r_l[0, 0] = np.cos(r_la)
r_l[0, 1] = -np.sin(r_la)
r_l[1, 0] = np.sin(r_la)
r_l[1, 1] = np.cos(r_la)
points_p = []
points_l = []
for p in points:
# rotate and translate the points according to the
# object's pose
pt = np.array(p).reshape(2, 1) * 1000
points_p += [np.dot(r_p, pt).reshape(2)+pos_pred[i]]
points_l += [np.dot(r_l, pt).reshape(2)+seq[i, :2]]
ax2.add_artist(Polygon(points_p, alpha=0.1, facecolor='r',
edgecolor='r'))
ax2.add_artist(Polygon(points_l, alpha=0.1, facecolor='g',
edgecolor='g'))
ax.legend()
# plot the last step
if cov_pred is not None:
vals, vecs = self._eigsorted(cov_pred[-1, :2, :2])
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * 2 * np.sqrt(vals)
ellip = Ellipse(xy=pos_pred[-1], width=width, height=height,
angle=theta, alpha=0.1)
ax.add_artist(ellip)
# plot the mean trajectory
ax.plot(pos_pred[-1, 0], pos_pred[-1, 1], 'ro')
# plot the real trajectory
ax.plot(seq[-1, 0], seq[-1, 1], 'go')
if particles is not None:
p = weights[-1].argsort()
par = particles[-1][p]
wei = weights[-1][p]
# plot the particles with colour depending on weight
ax.scatter(par[:20, 0], par[:20, 1],
c=wei[:20], cmap='jet', marker='x', alpha=0.5)
fig.savefig(os.path.join(out_dir, str(num) + "_tracking_2d"),
bbox_inches="tight")
ax2.set_xlim([minx-100, maxx+100])
ax2.set_ylim([miny-100, maxy+100])
fig2.savefig(os.path.join(out_dir, str(num) + "_tracking_vis"),
bbox_inches="tight")
class SegmentationLayer(BaseLayer):
def __init__(self, batch_size, normalize, summary, trainable):
super(SegmentationLayer, self).__init__()
self.summary = summary
self.batch_size = batch_size
self.normalize = normalize
# load a plane image for reprojecting
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
path = os.path.join(path, 'resources', 'plane_image.npy')
self.plane_depth = \
tf.convert_to_tensor(np.load(path))[None, :, :, None]
self.plane_depth = tf.tile(self.plane_depth,
[self.batch_size, 1, 1, 1])
# segmenting the image
self.im_c1 = self._conv_layer('segment/conv1', 7, 8,
trainable=trainable)
self.im_c2 = self._conv_layer('segment/conv2', 5, 16,
trainable=trainable)
self.im_c3 = self._conv_layer('segment/conv3', 3, 32,
trainable=trainable)
self.im_d1 = self._deconv_layer('segment/deconv1', 13, 16,
trainable=trainable)
self.im_d2 = self._deconv_layer('segment/deconv2', 3, 8,
trainable=trainable)
self.im_d3 = self._deconv_layer('segment/deconv3', 3, 1,
activation=None, trainable=trainable)
if self.normalize == 'layer':
self.im_n1 =\
tf.keras.layers.LayerNormalization(name='segment/norm1',
trainable=trainable)
self.im_n2 =\
tf.keras.layers.LayerNormalization(name='segment/norm2',
trainable=trainable)
self.im_n3 =\
tf.keras.layers.LayerNormalization(name='segment/norm3',
trainable=trainable)
self.im_n4 = \
tf.keras.layers.LayerNormalization(name='segment/norm4',
trainable=trainable)
self.im_n5 = \
tf.keras.layers.LayerNormalization(name='segment/norm5',
trainable=trainable)
elif self.normalize == 'batch':
self.im_n1 =\
tf.keras.layers.BatchNormalization(name='segment/norm1',
trainable=trainable)
self.im_n2 =\
tf.keras.layers.BatchNormalization(name='segment/norm2',
trainable=trainable)
self.im_n3 =\
tf.keras.layers.BatchNormalization(name='segment/norm3',
trainable=trainable)
self.im_n4 = \
tf.keras.layers.BatchNormalization(name='segment/norm4',
trainable=trainable)
self.im_n5 = \
tf.keras.layers.BatchNormalization(name='segment/norm5',
trainable=trainable)
self.updateable = [self.im_n1, self.im_n2, self.im_n3, self.im_n4,
self.im_n5]
def call(self, inputs, training):
# unpack the inputs
images = inputs[:, :, :, 0:3]
coords = inputs[:, :, :, 3:]
height = images.get_shape()[1].value
width = images.get_shape()[2].value
# disable the topmost name scope so that the summaries don't end up all
# under one tab in tensorbaord
with tf.name_scope(""):
# segment the image
with tf.name_scope('segment'):
conv1 = self.im_c1(inputs)
conv1 = tf.nn.max_pool2d(conv1, 3, 2, padding='SAME')
if self.normalize == 'layer':
conv1 = self.im_n1(conv1)
elif self.normalize == 'batch':
conv1 = self.im_n1(conv1, training)
conv2 = self.im_c2(conv1)
conv2 = tf.nn.max_pool2d(conv2, 3, 2, padding='SAME')
if self.normalize == 'layer':
conv2 = self.im_n2(conv2)
elif self.normalize == 'batch':
conv2 = self.im_n2(conv2, training)
conv3 = self.im_c3(conv2)
conv3 = tf.nn.max_pool2d(conv3, 5, 4, padding='SAME')
if self.normalize == 'layer':
conv3 = self.im_n3(conv3)
elif self.normalize == 'batch':
conv3 = self.im_n3(conv3, training)
deconv1 = self.im_d1(conv3)
deconv1 = tf.image.resize(deconv1, conv2.get_shape()[1:3])
deconv1 = deconv1 + conv2
if self.normalize == 'layer':
deconv1 = self.im_n4(deconv1)
elif self.normalize == 'batch':
deconv1 = self.im_n4(deconv1, training)
deconv2 = self.im_d2(deconv1)
deconv2 = tf.image.resize(deconv2, [height // 2, width // 2])
if self.normalize == 'layer':
deconv2 = self.im_n5(deconv2)
elif self.normalize == 'batch':
deconv2 = self.im_n5(deconv2, training)
mask_out = self.im_d3(deconv2)
mask = tf.image.resize(mask_out, [height, width])
if self.summary:
if self.normalize == 'batch':
tf.summary.histogram('n1_mean', self.im_n1.moving_mean)
tf.summary.histogram('n1_var',
self.im_n1.moving_variance)
tf.summary.image('rgb', images[:, :, :, :3])
tf.summary.image('depth', coords[:, :, :, -1:])
tf.summary.image('conv1_im', conv1[0:1, :, :, 0:1])
tf.summary.histogram('conv1_out', conv1)
tf.summary.image('conv2_im', conv2[0:1, :, :, 0:1])
tf.summary.histogram('conv2_out', conv2)
tf.summary.image('conv3_im', conv3[0:1, :, :, 0:1])
tf.summary.histogram('conv3_out', conv3)
tf.summary.image('deconv1_im', deconv1[0:1, :, :, 0:1])
tf.summary.histogram('deconv1_out', deconv1)
tf.summary.image('deconv2_im', deconv2[0:1, :, :, 0:1])
tf.summary.histogram('deconv2_out', deconv2)
tf.summary.image('mask', mask_out[0:1])
# predict the object position
pos_pix = self._spatial_softmax(mask, 'pos', method='softmax',
summary=self.summary)
pos_pix = tf.reshape(pos_pix, [self.batch_size, 2])
pos = utils._to_3d(pos_pix, self.plane_depth)
# extract the glimpses for rotation estimation and parameter
# estimation
coords_rot = tf.concat([pos_pix[:, 1:2] * 2, pos_pix[:, 0:1] * 2],
axis=1)
glimpse_rot = \
tf.image.extract_glimpse(images, size=[72, 72],
offsets=coords_rot,
centered=True, normalized=False)
return [mask_out, pos, glimpse_rot], pos_pix
class SensorLayer(BaseLayer):
def __init__(self, batch_size, normalize, scale, summary, trainable):
super(SensorLayer, self).__init__()
self.summary = summary
self.batch_size = batch_size
self.scale = scale
self.normalize = normalize
# load a plane image for reprojecting
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
path = os.path.join(path, 'resources', 'plane_image.npy')
self.plane_depth = \
tf.convert_to_tensor(np.load(path))[None, :, :, None]
self.plane_depth = tf.tile(self.plane_depth,
[self.batch_size, 1, 1, 1])
# processing the glimpse
self.g_c1 = self._conv_layer('glimpse/conv1', 3, 8,
trainable=trainable)
self.g_c2 = self._conv_layer('glimpse/conv2', 3, 16,
trainable=trainable)
self.g_c3 = self._conv_layer('glimpse/conv2', 3, 32,
trainable=trainable)
self.g_fc1 = self._fc_layer('glimpse/r_fc1', 128, trainable=trainable)
self.g_rfc2 = self._fc_layer('glimpse/r_fc2', 64, trainable=trainable)
self.g_r = self._fc_layer('glimpse/r', 2, activation=None,
trainable=trainable)
self.g_nfc2 = self._fc_layer('glimpse/n_fc2', 64, trainable=trainable)
self.g_n = self._fc_layer('glimpse/n', 2, activation=None,
trainable=trainable)
self.g_s = self._fc_layer('glimpse/s', 1, activation=None, bias=-0.1,
trainable=trainable)
# get the rotation
self.r_c1 = self._conv_layer('rot/conv1', 3, 32, trainable=trainable)
self.r_c2 = self._conv_layer('rot/conv2', 3, 64, trainable=trainable)
self.r_fc1 = self._fc_layer('rot/fc1', 128, trainable=trainable)
self.r_fc2 = self._fc_layer('rot/fc2', 64, trainable=trainable)
self.r_rot = self._fc_layer('rot/rot', 1, activation=None,
trainable=trainable)
if self.normalize == 'layer':
self.g_n1 = \
tf.keras.layers.LayerNormalization(name='glimpse/norm1',
trainable=trainable)
self.g_n2 = \
tf.keras.layers.LayerNormalization(name='glimpse/norm2',
trainable=trainable)
self.g_n3 = \
tf.keras.layers.LayerNormalization(name='glimpse/norm3',
trainable=trainable)
self.r_n1 = \
tf.keras.layers.LayerNormalization(name='rot/norm1',
trainable=trainable)
self.r_n2 = \
tf.keras.layers.LayerNormalization(name='rot/norm2',
trainable=trainable)
elif self.normalize == 'batch':
self.g_n1 = \
tf.keras.layers.BatchNormalization(name='glimpse/norm1',
trainable=trainable)
self.g_n2 = \
tf.keras.layers.BatchNormalization(name='glimpse/norm2',
trainable=trainable)
self.g_n3 = \
tf.keras.layers.BatchNormalization(name='glimpse/norm3',
trainable=trainable)
self.r_n1 = \
tf.keras.layers.BatchNormalization(name='rot/norm1',
trainable=trainable)
self.r_n2 = \
tf.keras.layers.BatchNormalization(name='rot/norm2',
trainable=trainable)
self.updateable = [self.g_n1, self.g_n2, self.g_n3, self.r_n1,
self.r_n2]
def call(self, inputs, training):
# unpack the inputs
pc, tip_pos, tip_pix, tip_pix_end, start_glimpse, mask, pos, \
glimpse_rot = inputs
# unpack the inputs
image = pc[:, :, :, 0:3]
coord = pc[:, :, :, 3:]
# disable the topmost name scope so that the summaries don't end up all
# under one tab in tensorbaord
with tf.name_scope(""):
# predict the orientation
with tf.name_scope('rot'):
# in_data = tf.concat([glimpse_rot, start_glimpse], axis=-1)
in_data = start_glimpse - glimpse_rot
rot_conv1 = self.r_c1(in_data)
if self.normalize == 'layer':
rot_conv1 = self.r_n1(rot_conv1)
elif self.normalize == 'batch':
rot_conv1 = self.r_n1(rot_conv1, training)
rot_conv1 = tf.nn.max_pool2d(rot_conv1, 3, 2, padding='VALID')
rot_conv2 = self.r_c2(rot_conv1)
if self.normalize == 'layer':
rot_conv2 = self.r_n2(rot_conv2)
elif self.normalize == 'batch':
rot_conv2 = self.r_n2(rot_conv2, training)
rot_conv2 = tf.nn.max_pool2d(rot_conv2, 3, 2, padding='VALID')
rot_fc1 = self.r_fc1(tf.reshape(rot_conv2,
[self.batch_size, -1]))
rot_fc2 = self.r_fc2(rot_fc1)
rot = self.r_rot(rot_fc2)
if self.summary:
tf.summary.image('glimpse_rot',
glimpse_rot[0:1, :, :, :3])
tf.summary.image('glimpse_start',
start_glimpse[0:1, :, :, :3])
tf.summary.image('conv1_im', rot_conv1[0:1, :, :, 0:1])
tf.summary.histogram('conv1_out', rot_conv1)
tf.summary.image('conv2_im', rot_conv2[0:1, :, :, 0:1])
tf.summary.histogram('conv2_out', rot_conv2)
tf.summary.histogram('fc1_out', rot_fc1)
tf.summary.histogram('fc2_out', rot_fc2)
tf.summary.histogram('rot_out', rot)
# process the glimpse
with tf.name_scope('glimpse'):
tip_pix_x = tf.slice(tip_pix, [0, 0], [-1, 1]) * 2
tip_pix_y = tf.slice(tip_pix, [0, 1], [-1, 1]) * 2
coords = tf.concat([tip_pix_y, tip_pix_x], axis=1)
glimpse = \
tf.image.extract_glimpse(coord, size=[64, 64],
offsets=coords,
centered=True, normalized=False)
im_glimpse = \
tf.image.extract_glimpse(image, size=[64, 64],
offsets=coords,
centered=True, normalized=False)
# subtract the tip pose to normalize the z coordinates
glimpse -= tip_pos[:, None, None, :]
in_g = tf.concat([im_glimpse, glimpse], axis=-1)
g_conv1 = self.g_c1(in_g)
g_conv1 = tf.nn.max_pool2d(g_conv1, 3, 2, padding='VALID')
if self.normalize == 'layer':
g_conv1 = self.g_n1(g_conv1)
elif self.normalize == 'batch':
g_conv1 = self.g_n1(g_conv1, training)
g_conv2 = self.g_c2(g_conv1)
g_conv2 = tf.nn.max_pool2d(g_conv2, 3, 2, padding='VALID')
if self.normalize == 'layer':
g_conv2 = self.g_n2(g_conv2)
elif self.normalize == 'batch':
g_conv2 = self.g_n2(g_conv2, training)
g_conv3 = self.g_c3(g_conv2)
# g_conv3 = tf.nn.max_pool2d(g_conv3, 3, 2, padding='VALID')
if self.normalize == 'layer':
g_conv3 = self.g_n3(g_conv3)
elif self.normalize == 'batch':
g_conv3 = self.g_n3(g_conv3, training)
glimpse_encoding = tf.reshape(g_conv3, [self.batch_size, -1])
# add the action
pix_u = tf.concat([tip_pix_end - tip_pix, tip_pix], axis=1)
glimpse_encoding = tf.concat([glimpse_encoding, pix_u],
axis=-1)
# extract contact point and push velocity from the glimpse
g_fc1 = self.g_fc1(glimpse_encoding)
g_rfc2 = self.g_rfc2(g_fc1)
r_pix = self.g_r(g_rfc2)
# add the tip's global postition to the local estimate and
# transform to 2d (using the tip's depth if necessary)
r_pix = r_pix + tip_pix
# r = utils._to_3d(r_pix, self.plane_depth)
r = utils._to_3d_d(r_pix, coord[:, :, :, -1:], tip_pos)
g_nfc2 = self.g_nfc2(g_fc1)
n_pix = self.g_n(g_nfc2)
# calculate the pixel end point to get the z-value
# for projecting the predicted normal from pixels to 3d
n_end_pix = tf.stop_gradient(r_pix) + n_pix
# n_end = utils._to_3d(n_end_pix, self.plane_depth)
n_end = utils._to_3d_d(n_end_pix, coord[:, :, :, -1:],
tip_pos)
n = n_end - tf.stop_gradient(r)
# get the contact annotation
s = self.g_s(glimpse_encoding)
s = tf.nn.sigmoid(s)
# here we have to adapt the observations to the scale, since
# the network can't learn it itself due to the sigmoid
s = s / self.scale
if self.summary:
tf.summary.image('glimpse_z', glimpse[0:1, :, :, -1:])
tf.summary.image('glimpse_rgb', im_glimpse[0:1])
tf.summary.image('conv1_im', g_conv1[0:1, :, :, 0:1])
tf.summary.histogram('conv1_out', g_conv1)
tf.summary.image('conv2_im', g_conv2[0:1, :, :, 0:1])
tf.summary.histogram('conv2_out', g_conv2)
tf.summary.image('conv3_im', g_conv3[0:1, :, :, 0:1])
tf.summary.histogram('g_fc1_out', g_fc1)
tf.summary.histogram('g_rfc2_out', g_rfc2)
tf.summary.histogram('r_pix_out', r_pix)
tf.summary.histogram('g_nfc2_out', g_nfc2)
tf.summary.histogram('n_pix_out', n_pix)
tf.summary.histogram('n_end_pix_out', n_end_pix)
# assemble the observations: remove the z(up) coordinates,
# convert to centimeter, normalize
n_norm = tf.linalg.norm(n[:, :2], axis=1, keepdims=True)
n = tf.where(tf.greater(tf.squeeze(n_norm), 1e-5),
n[:, :2] / n_norm, n[:, :2])
n = tf.where(tf.greater_equal(tf.tile(s, [1, 2]), 0.5), n, 0 * n)
# we only care for the position in the table plane
r = r[:, :2] * 1000. / self.scale
n = n[:, :2] / self.scale
pos = pos[:, :2] * 1000. / self.scale
z = tf.concat([pos, rot, r, n, s], axis=-1)
if self.summary:
tf.summary.scalar('r_x', r[0, 0])
tf.summary.scalar('r_y', r[0, 1])
tf.summary.scalar('n_x', n[0, 0])
tf.summary.scalar('n_y', n[0, 1])
tf.summary.scalar('o_x', pos[0, 0])
tf.summary.scalar('o_y', pos[0, 1])
tf.summary.scalar('t_x', tip_pos[0, 0])
tf.summary.scalar('t_y', tip_pos[0, 1])
tf.summary.scalar('s', s[0, 0])
tf.summary.scalar('rot', rot[0, 0])
return z, [mask, rot_fc2, g_fc1]
class ObservationNoise(BaseLayer):
def __init__(self, batch_size, dim_z, r_diag, scale, hetero, diag,
trainable, summary):
super(ObservationNoise, self).__init__()
self.hetero = hetero
self.diag = diag
self.batch_size = batch_size
self.dim_z = dim_z
self.scale = scale
self.r_diag = r_diag
self.summary = summary
self.trainable = trainable
def build(self, input_shape):
init_const = np.ones(self.dim_z) * 1e-3 // self.scale**2
init = np.sqrt(np.maximum(np.square(self.r_diag) - init_const, 0))
# the constant bias keeps the predicted covariance away from zero
self.bias_fixed = \
self.add_weight(name='bias_fixed', shape=[self.dim_z],
trainable=False,
initializer=tf.constant_initializer(init_const))
num = self.dim_z * (self.dim_z + 1) // 2
wd = 1e-3 * self.scale**2
if self.hetero and self.diag:
# for heteroscedastic noise with diagonal covariance matrix
# position
self.het_diag_pos_c1 = self._conv_layer('het_diag_pos_c1', 5, 16,
stride=[2, 2],
trainable=self.trainable)
self.het_diag_pos_c2 = self._conv_layer('het_diag_pos_c2', 3, 32,
stride=[2, 2],
trainable=self.trainable)
self.het_diag_pos_fc1 = self._fc_layer('het_diag_pos_fc1', 64,
trainable=self.trainable)
self.het_diag_pos_fc2 = self._fc_layer('het_diag_pos_fc2', 2,
mean=0, std=1e-3,
activation=None,
trainable=self.trainable)
# rotation, normal, contact point and contact
self.het_diag_rot_fc = self._fc_layer('het_diag_rot_fc', 1,
mean=0, std=1e-3,
activation=None,
trainable=self.trainable)
self.het_diag_fc1 = self._fc_layer('het_diag_fc1', 64, std=1e-4,
trainable=self.trainable)
self.het_diag_fc2 = self._fc_layer('het_diag_fc2', 32, std=1e-3,
trainable=self.trainable)
self.het_diag_fc3 = self._fc_layer('het_diag_fc3', 5, std=1e-2,
activation=None,
trainable=self.trainable)
self.het_diag_init_bias = \
self.add_weight(name='het_diag_init_bias',
shape=[self.dim_z],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and self.diag:
# for constant noise with diagonal covariance matrix
self.const_diag = \
self.add_weight(name='const_diag',
shape=[self.dim_z],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and not self.diag:
# for heteroscedastic noise with full covariance matrix
self.het_full_pos_c1 = self._conv_layer('het_full_pos_c1', 5, 16,
stride=[2, 2],
trainable=self.trainable)
self.het_full_pos_c2 = self._conv_layer('het_full_pos_c2', 3, 32,
stride=[2, 2],
trainable=self.trainable)
self.het_full_pos_fc = self._fc_layer('het_full_pos_fc',
self.dim_z,
trainable=self.trainable)
# rotation, normal, contact point and contact
self.het_full_rot_fc = self._fc_layer('het_full_rot_fc',
self.dim_z,
trainable=self.trainable)
self.het_full_g_fc1 = self._fc_layer('het_full_g_fc1', 64,
std=1e-3,
trainable=self.trainable)
self.het_full_g_fc2 = self._fc_layer('het_full_g_f2', 32,
trainable=self.trainable)
self.het_full_fc1 = self._fc_layer('het_full_fc1', 64, std=1e-3,
trainable=self.trainable)
self.het_full_fc2 = \
self._fc_layer('het_full_fc2', num,
activation=None, trainable=self.trainable)
self.het_full_init_bias = \
self.add_weight(name='het_full_init_bias',
shape=[self.dim_z], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
else:
# for constant noise with full covariance matrix
self.const_full = \
self.add_weight(name='const_tri', shape=[num],
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(0.),
trainable=self.trainable)
self.const_full_init_bias = \
self.add_weight(name='const_full_init_bias',
shape=[self.dim_z],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
def call(self, inputs, training):
mask, rot_encoding, glimpse_encoding, pix = inputs
if self.hetero and self.diag:
het_diag_pos_c1 = self.het_diag_pos_c1(mask)
het_diag_pos_c2 = self.het_diag_pos_c2(het_diag_pos_c1)
het_diag_pos_c2 = tf.reshape(het_diag_pos_c2,
[self.batch_size, -1])
het_diag_pos_fc1 = self.het_diag_pos_fc1(het_diag_pos_c2)
het_diag_pos = self.het_diag_pos_fc2(het_diag_pos_fc1)
# rotation, normal, contact point and contact
het_diag_rot = self.het_diag_rot_fc(rot_encoding)
het_diag_fc1 = self.het_diag_fc1(glimpse_encoding)
het_diag_fc2 = self.het_diag_fc2(het_diag_fc1)
het_diag_rns = self.het_diag_fc3(het_diag_fc2)
diag = tf.concat([het_diag_pos, het_diag_rot, het_diag_rns],
axis=-1)
if self.summary:
tf.summary.image('het_diag_pos_c1_im',
het_diag_pos_c1[0:1, :, :, 0:1])
tf.summary.histogram('het_diag_pos_c1_out', het_diag_pos_c1)
tf.summary.histogram('het_diag_pos_c2_out', het_diag_pos_c2)
tf.summary.histogram('het_diag_pos_fc1_out', het_diag_pos_fc1)
tf.summary.histogram('het_diag_pos_fc2_out', het_diag_pos)
tf.summary.histogram('het_diag_rot_fc_out', het_diag_rot)
tf.summary.histogram('het_diag_rns_fc1_out', het_diag_fc1)
tf.summary.histogram('het_diag_rns_fc2_out', het_diag_fc2)
tf.summary.histogram('het_diag_rns_fc3_out', het_diag_rns)
tf.summary.histogram('het_diag_out', diag)
diag = tf.square(diag + self.het_diag_init_bias)
diag += self.bias_fixed
R = tf.linalg.diag(diag)
elif not self.hetero and self.diag:
diag = self.const_diag
diag = tf.square(diag) + self.bias_fixed
R = tf.linalg.tensor_diag(diag)
R = tf.tile(R[None, :, :], [self.batch_size, 1, 1])
elif self.hetero and not self.diag:
het_full_pos_c1 = self.het_full_pos_c1(mask)
het_full_pos_c2 = self.het_full_pos_c2(het_full_pos_c1)
het_full_pos_c2 = tf.reshape(het_full_pos_c2,
[self.batch_size, -1])
het_full_pos = self.het_full_pos_fc(het_full_pos_c2)
# rotation, normal, contact point and contact
het_full_rot = self.het_full_rot_fc(rot_encoding)
het_full_g1 = self.het_full_g_fc1(glimpse_encoding)
het_full_g2 = self.het_full_g_fc2(het_full_g1)
input_data = tf.concat([het_full_pos, het_full_rot, het_full_g2],
axis=-1)
het_full_fc1 = self.het_full_fc1(input_data)
tri = self.het_full_fc2(het_full_fc1)
if self.summary:
tf.summary.image('het_full_pos_c1_im',
het_full_pos_c1[0:1, :, :, 0:1])
tf.summary.histogram('het_full_pos_c1_out', het_full_pos_c1)
tf.summary.histogram('het_full_pos_c2_out', het_full_pos_c2)
tf.summary.histogram('het_full_pos_fc_out', het_full_pos)
tf.summary.histogram('het_full_rot_fc_out', het_full_rot)
tf.summary.histogram('het_full_g_fc1_out', het_full_g1)
tf.summary.histogram('het_full_g_fc2_out', het_full_g2)
tf.summary.histogram('het_full_fc1_out', het_full_fc1)
tf.summary.histogram('het_tri_out', tri)
R = compat.fill_triangular(tri)
R += tf.linalg.diag(self.het_full_init_bias)
R = tf.matmul(R, tf.linalg.matrix_transpose(R))
R = R + tf.linalg.diag(self.bias_fixed)
else:
tri = self.const_full
R = compat.fill_triangular(tri)
R += tf.linalg.diag(self.const_full_init_bias)
R = tf.matmul(R, tf.linalg.matrix_transpose(R))
R = R + tf.linalg.diag(self.bias_fixed)
R = tf.tile(R[None, :, :], [self.batch_size, 1, 1])
return R
class Likelihood(BaseLayer):
def __init__(self, dim_z, trainable, summary):
super(Likelihood, self).__init__()
self.summary = summary
self.dim_z = dim_z
self.like_pos_c1 = self._conv_layer('like_pos_c1', 5, 16,
stride=[2, 2],
trainable=self.trainable)
self.like_pos_c2 = self._conv_layer('like_pos_c2', 3, 32,
trainable=self.trainable)
self.like_pos_fc = self._fc_layer('like_pos_fc', 2*self.dim_z,
trainable=self.trainable)
# rotation, normal, contact point and contact
self.like_rot_fc = self._fc_layer('like_rot_fc', self.dim_z,
trainable=self.trainable)
self.like_rns_fc1 = self._fc_layer('like_rns_fc1', 128,
trainable=self.trainable)
self.like_rns_fc2 = self._fc_layer('like_rn2_fc2', 5*self.dim_z,
trainable=self.trainable)
self.fc1 = self._fc_layer('fc1', 128, trainable=trainable)
self.fc2 = self._fc_layer('fc2', 128, trainable=trainable)
self.fc3 = self._fc_layer('fc3', 1, trainable=trainable,
activation=tf.nn.sigmoid)
def call(self, inputs, training):
# unpack the inputs
particles, encoding = inputs
bs = particles.get_shape()[0].value
num_pred = particles.get_shape()[1].value
# diff, encoding = inputs
mask, rot_encoding, glimpse_encoding, pix = encoding
# preprocess the encodings
# mask
pos_c1 = self.like_pos_c1(mask)
pos_c2 = self.like_pos_c2(pos_c1)
pos_c2 = tf.reshape(pos_c2, [bs, -1])
pos_fc = self.like_pos_fc(pos_c2)
# rotation, normal, contact point and contact
rot_fc = self.like_rot_fc(rot_encoding)
rns_fc1 = self.like_rns_fc1(glimpse_encoding)
rns_fc2 = self.like_rns_fc2(rns_fc1)
# concatenate and tile the preprocessed encoding
encoding = tf.concat([pos_fc, rot_fc, rns_fc2], axis=-1)
encoding = tf.tile(encoding[:, None, :], [1, num_pred, 1])
input_data = tf.concat([encoding, particles], axis=-1)
input_data = tf.reshape(input_data, [bs * num_pred, -1])
fc1 = self.fc1(input_data)
if self.summary:
tf.summary.histogram('fc1_out', fc1)
fc2 = self.fc2(fc1)
if self.summary:
tf.summary.histogram('fc2_out', fc2)
like = self.fc3(fc2)
if self.summary:
tf.summary.histogram('pos_c1_out', pos_c1)
tf.summary.histogram('pos_c2_out', pos_c2)
tf.summary.histogram('pos_fc_out', pos_fc)
tf.summary.histogram('rot_fc_out', rot_fc)
tf.summary.histogram('rns_fc1_out', rns_fc1)
tf.summary.histogram('rns_fc2_out', rns_fc2)
tf.summary.histogram('fc1_out', fc1)
tf.summary.histogram('fc2_out', fc2)
tf.summary.histogram('like', like)
return like
class ObservationModel(BaseLayer):
def __init__(self, dim_z, batch_size):
super(ObservationModel, self).__init__()
self.dim_z = dim_z
self.batch_size = batch_size
def call(self, inputs, training):
H = tf.concat(
[tf.tile(np.array([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]],
dtype=np.float32), [self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]],
dtype=np.float32), [self.batch_size, 1, 1])],
axis=1)
z_pred = tf.concat([inputs[:, :3], inputs[:, 5:]], axis=1)
return z_pred, H
class ProcessModel(BaseLayer):
def __init__(self, batch_size, dim_x, scale, learned, jacobian,
trainable, summary):
super(ProcessModel, self).__init__()
self.summary = summary
self.batch_size = batch_size
self.dim_x = dim_x
self.learned = learned
self.jacobian = jacobian
self.scale = scale
if learned:
self.fc1 = self._fc_layer('fc1', 256, std=1e-4,
trainable=trainable)
self.fc2 = self._fc_layer('fc2', 128, trainable=trainable)
self.fc3 = self._fc_layer('fc3', 128, trainable=trainable)
self.update = self._fc_layer('fc4', self.dim_x, activation=None,
trainable=trainable)
def call(self, inputs, training):
# unpack the inputs
last_state, actions, ob = inputs
if self.learned:
fc1 = self.fc1(tf.concat([last_state, actions[:, :2]], axis=-1))
fc2 = self.fc2(fc1)
fc3 = self.fc3(fc2)
update = self.update(fc3)
# for the circular object, the orientation is always zero,
# so we have to set the prediction to 0 and adapt the
# jacobian
ob = tf.reshape(ob, [self.batch_size, 1])
bs = last_state.get_shape()[0]
ob = tf.tile(ob, [1, bs // self.batch_size])
ob = tf.reshape(ob, [-1])
ob = tf.strings.regex_replace(ob, "\000", "")
ob = tf.strings.regex_replace(ob, "\00", "")
rot_pred = update[:, 2:3]
rot_pred = tf.where(tf.equal(ob, 'ellip1'),
tf.zeros_like(rot_pred), rot_pred)
update = tf.concat([update[:, :2], rot_pred, update[:, 3:]],
axis=-1)
new_state = last_state + update
if self.summary:
tf.summary.histogram('fc1_out', fc1)
tf.summary.histogram('fc2_out', fc2)
tf.summary.histogram('fc3_out', fc3)
tf.summary.histogram('update_out', update)
if self.jacobian:
F = self._compute_jacobian(new_state, last_state)
else:
F = None
else:
if self.jacobian:
# with tf.GradientTape() as tape:
# tape.watch(last_state)
# # split the state into parts and undo the scaling
# last_state *= self.scale
# pos = last_state[:, :2]
# ori = last_state[:, 2:3]
# fr = last_state[:, 3:4]
# fr_mu = last_state[:, 4:5]
# cp = last_state[:, 5:7]
# n = last_state[:, 7:9]
# s = last_state[:, 9:]
# # undo the scaling for the actions as well
# actions *= self.scale
# # apply the analytical model to get predicted translation
# # and rotation
# tr_pred, rot_pred, keep_contact = \
# utils.physical_model(pos, cp, n, actions, fr, fr_mu, s)
# pos_pred = pos + tr_pred
# ori_pred = ori + rot_pred * 180.0/np.pi
# fr_pred = fr
# fr_mu_pred = fr_mu
# cp_pred = cp + actions
# keep_contact = tf.cast(keep_contact, tf.float32)
# n_pred = n * keep_contact
# s_pred = s * keep_contact
# # piece together the new state and apply scaling again
# new_state = \
# tf.concat([pos_pred, ori_pred, fr_pred,
# fr_mu_pred, cp_pred, n_pred, s_pred],
# axis=1) / self.scale
# # block vectorization to avoid excessive memory usage for
# # long sequences
# F = tape.batch_jacobian(new_state, last_state,
# experimental_use_pfor=False)
# split the state into parts and undo the scaling
last_state *= self.scale
pos = last_state[:, :2]
ori = last_state[:, 2:3]
fr = last_state[:, 3:4]
fr_mu = last_state[:, 4:5]
cp = last_state[:, 5:7]
n = last_state[:, 7:9]
s = last_state[:, 9:]
# undo the scaling for the actions as well
actions *= self.scale
# apply the analytical model to get predicted translation and
# rotation
tr_pred, rot_pred, keep_contact, dx, dy, dom = \
utils.physical_model_derivative(pos, cp, n, actions, fr,
fr_mu, s)
# for the circular object, the orientation is always zero,
# so we have to set the prediction to 0 and adapt the
# jacobian
ob = tf.squeeze(ob)
ob = tf.strings.regex_replace(ob, "\000", "")
ob = tf.strings.regex_replace(ob, "\00", "")
rot_pred = tf.where(tf.equal(ob, 'ellip1'),
tf.zeros_like(rot_pred), rot_pred)
dom = tf.where(tf.equal(ob, 'ellip1'),
tf.zeros_like(dom), dom)
pos_pred = pos + tr_pred
ori_pred = ori + rot_pred * 180.0 / np.pi
fr_pred = fr
fr_mu_pred = fr_mu
cp_pred = cp + actions
keep_contact = tf.cast(keep_contact, tf.float32)
n_pred = n * keep_contact
s_pred = s * keep_contact
# piece together the new state and apply scaling again
new_state = \
tf.concat([pos_pred, ori_pred, fr_pred,
fr_mu_pred, cp_pred, n_pred, s_pred],
axis=1) / self.scale
# piece together the jacobian (I found this to work better than
# getting the whole jacobian from tensorflow)
dom *= 180.0 / np.pi
dnx = tf.concat([tf.zeros([self.batch_size, 7]),
tf.cast(keep_contact, tf.float32),
tf.zeros([self.batch_size, 2])],
axis=-1)
dny = tf.concat([tf.zeros([self.batch_size, 8]),
tf.cast(keep_contact, tf.float32),
tf.zeros([self.batch_size, 1])],
axis=-1)
ds = tf.concat([tf.zeros([self.batch_size, 9]),
tf.cast(keep_contact, tf.float32)],
axis=-1)
F = tf.concat(
[dx + np.array([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
dy + np.array([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
dom + np.array([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
tf.tile(np.array([[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 1, 0, 0, 0, 0, 0.]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.tile(np.array([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]],
dtype=np.float32),
[self.batch_size, 1, 1]),
tf.reshape(dnx, [-1, 1, self.dim_x]),
tf.reshape(dny, [-1, 1, self.dim_x]),
tf.reshape(ds, [-1, 1, self.dim_x])], axis=1)
else:
# split the state into parts and undo the scaling
last_state *= self.scale
pos = last_state[:, :2]
ori = last_state[:, 2:3]
fr = last_state[:, 3:4]
fr_mu = last_state[:, 4:5]
cp = last_state[:, 5:7]
n = last_state[:, 7:9]
s = last_state[:, 9:]
# undo the scaling for the actions as well
actions *= self.scale
# apply the analytical model to get predicted translation and
# rotation
tr_pred, rot_pred, keep_contact = \
utils.physical_model(pos, cp, n, actions, fr, fr_mu, s)
pos_pred = pos + tr_pred
ori_pred = ori + rot_pred * 180.0 / np.pi
fr_pred = fr
fr_mu_pred = fr_mu
cp_pred = cp + actions
keep_contact = tf.cast(keep_contact, tf.float32)
n_pred = n * keep_contact
s_pred = s * keep_contact
# piece together the new state and apply scaling again
new_state = \
tf.concat([pos_pred, ori_pred, fr_pred,
fr_mu_pred, cp_pred, n_pred, s_pred],
axis=1) / self.scale
F = None
if self.jacobian:
F = tf.stop_gradient(F)
return new_state, F
class ProcessNoise(BaseLayer):
def __init__(self, batch_size, dim_x, q_diag, scale, hetero, diag, learned,
trainable, summary):
super(ProcessNoise, self).__init__()
self.hetero = hetero
self.diag = diag
self.learned = learned
self.trainable = trainable
self.dim_x = dim_x
self.q_diag = q_diag
self.scale = scale
self.batch_size = batch_size
self.summary = summary
def build(self, input_shape):
init_const = np.ones(self.dim_x) * 1e-5 / self.scale**2
init = np.sqrt(np.square(self.q_diag) - init_const)
# the constant bias keeps the predicted covariance away from zero
self.bias_fixed = \
self.add_weight(name='bias_fixed', shape=[self.dim_x],
trainable=False,
initializer=tf.constant_initializer(init_const))
num = self.dim_x * (self.dim_x + 1) // 2
wd = 1e-3 * self.scale**2
if self.hetero and self.diag and self.learned:
# for heteroscedastic noise with diagonal covariance matrix
self.het_diag_lrn_fc1 = self._fc_layer('het_diag_lrn_fc1', 128,
trainable=self.trainable)
self.het_diag_lrn_fc2 = self._fc_layer('het_diag_lrn_fc2', 64,
trainable=self.trainable)
self.het_diag_lrn_fc3 = \
self._fc_layer('het_diag_lrn_fc3', self.dim_x, mean=0,
std=1e-3, activation=None,
trainable=self.trainable)
self.het_diag_lrn_init_bias = \
self.add_weight(name='het_diag_lrn_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and self.diag and self.learned:
# for constant noise with diagonal covariance matrix
self.const_diag_lrn = \
self.add_weight(name='const_diag_lrn', shape=[self.dim_x],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and not self.diag and self.learned:
# for heteroscedastic noise with full covariance matrix
self.het_full_lrn_fc1 = self._fc_layer('het_full_lrn_fc1', 128,
trainable=self.trainable)
self.het_full_lrn_fc2 = self._fc_layer('het_full_lrn_fc2', 64,
trainable=self.trainable)
self.het_full_lrn_fc3 = \
self._fc_layer('het_full_lrn_fc3', num, mean=0, std=1e-3,
activation=None, trainable=self.trainable)
self.het_full_lrn_init_bias = \
self.add_weight(name='het_full_lrn_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and not self.diag and self.learned:
# for constant noise with full covariance matrix
self.const_full_lrn = \
self.add_weight(name='const_tri_lrn', shape=[num],
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(0.),
trainable=self.trainable)
self.const_full_lrn_init_bias = \
self.add_weight(name='const_full_lrn_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and self.diag and not self.learned:
# for heteroscedastic noise with diagonal covariance matrix
self.het_diag_ana_fc1 = self._fc_layer('het_diag_ana_fc1', 128,
std=1e-3,
trainable=self.trainable)
self.het_diag_ana_fc2 = self._fc_layer('het_diag_ana_fc2', 64,
trainable=self.trainable)
self.het_diag_ana_fc3 = \
self._fc_layer('het_diag_ana_fc3', self.dim_x, mean=0,
std=1e-3, activation=None,
trainable=self.trainable)
self.het_diag_ana_init_bias = \
self.add_weight(name='het_diag_ana_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and self.diag and not self.learned:
# for constant noise with diagonal covariance matrix
self.const_diag_ana = \
self.add_weight(name='const_diag_ana', shape=[self.dim_x],
trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif self.hetero and not self.diag and not self.learned:
# for heteroscedastic noise with full covariance matrix
self.het_full_ana_fc1 = self._fc_layer('het_full_ana_fc1', 128,
std=1e-3,
trainable=self.trainable)
self.het_full_ana_fc2 = self._fc_layer('het_full_ana_fc2', 64,
trainable=self.trainable)
self.het_full_ana_fc3 = \
self._fc_layer('het_full_ana_fc3', num, mean=0, std=1e-3,
activation=None, trainable=self.trainable)
self.het_full_ana_init_bias = \
self.add_weight(name='het_full_ana_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
elif not self.hetero and not self.diag and not self.learned:
# for constant noise with full covariance matrix
self.const_full_ana = \
self.add_weight(name='const_tri_ana', shape=[num],
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(0.),
trainable=self.trainable)
self.const_full_ana_init_bias = \
self.add_weight(name='const_full_ana_init_bias',
shape=[self.dim_x], trainable=self.trainable,
regularizer=tf.keras.regularizers.l2(l=wd),
initializer=tf.constant_initializer(init))
def call(self, inputs, training):
old_state, actions = inputs
# exclude l from the inputs for stability
input_data = tf.concat([old_state[:, :3], old_state[:, 4:], actions],
axis=-1)
# input_data = tf.concat([old_state, actions], axis=-1)
if self.learned:
if self.hetero and self.diag:
fc1 = self.het_diag_lrn_fc1(input_data)
fc2 = self.het_diag_lrn_fc2(fc1)
diag = self.het_diag_lrn_fc3(fc2)
if self.summary:
tf.summary.histogram('het_diag_lrn_fc1_out', fc1)
tf.summary.histogram('het_diag_lrn_fc2_out', fc2)
tf.summary.histogram('het_diag_lrn_fc3_out', diag)
diag = tf.square(diag + self.het_diag_lrn_init_bias)
diag += self.bias_fixed
Q = tf.linalg.diag(diag)
elif not self.hetero and self.diag:
diag = self.const_diag_lrn
diag = tf.square(diag) + self.bias_fixed
Q = tf.linalg.tensor_diag(diag)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
elif self.hetero and not self.diag:
fc1 = self.het_full_lrn_fc1(input_data)
fc2 = self.het_full_lrn_fc2(fc1)
tri = self.het_full_lrn_fc3(fc2)
if self.summary:
tf.summary.histogram('het_full_lrn_fc1_out', fc1)
tf.summary.histogram('het_full_lrn_fc2_out', fc2)
tf.summary.histogram('het_full_lrn_out', tri)
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.het_full_lrn_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
else:
tri = self.const_full_lrn
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.const_full_lrn_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
else:
if self.hetero and self.diag:
fc1 = self.het_diag_ana_fc1(input_data)
fc2 = self.het_diag_ana_fc2(fc1)
diag = self.het_diag_ana_fc3(fc2)
if self.summary:
tf.summary.histogram('het_diag_ana_fc1_out', fc1)
tf.summary.histogram('het_diag_ana_fc2_out', fc2)
tf.summary.histogram('het_diag_ana_fc3_out', diag)
diag = tf.square(diag + self.het_diag_ana_init_bias)
diag += self.bias_fixed
Q = tf.linalg.diag(diag)
elif not self.hetero and self.diag:
diag = self.const_diag_ana
diag = tf.square(diag) + self.bias_fixed
Q = tf.linalg.tensor_diag(diag)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
elif self.hetero and not self.diag:
fc1 = self.het_full_ana_fc1(input_data)
fc2 = self.het_full_ana_fc2(fc1)
tri = self.het_full_ana_fc3(fc2)
if self.summary:
tf.summary.histogram('het_full_ana_fc1_out', fc1)
tf.summary.histogram('het_full_ana_fc2_out', fc2)
tf.summary.histogram('het_full_ana_out', tri)
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.het_full_ana_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
else:
tri = self.const_full_ana
Q = compat.fill_triangular(tri)
Q += tf.linalg.diag(self.const_full_ana_init_bias)
Q = tf.matmul(Q, tf.linalg.matrix_transpose(Q))
Q = Q + tf.linalg.diag(self.bias_fixed)
Q = tf.tile(Q[None, :, :], [self.batch_size, 1, 1])
return Q
|
[
"tensorflow.compat.v1.zeros",
"numpy.arctan2",
"numpy.ones",
"tensorflow.compat.v1.summary.histogram",
"matplotlib.patches.Polygon",
"numpy.arange",
"differentiable_filters.utils.push_utils.physical_model",
"tensorflow.compat.v1.name_scope",
"os.path.dirname",
"tensorflow.compat.v1.constant_initializer",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.multiply",
"differentiable_filters.utils.push_utils._to_3d_d",
"numpy.max",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.norm",
"matplotlib.pyplot.subplots",
"numpy.save",
"tensorflow.compat.v1.linalg.matrix_transpose",
"tensorflow.compat.v1.stop_gradient",
"numpy.min",
"numpy.squeeze",
"tensorflow.compat.v1.summary.scalar",
"tensorflow.compat.v1.nn.sigmoid",
"numpy.linalg.eigh",
"tensorflow.compat.v1.abs",
"numpy.array",
"tensorflow.compat.v1.math.atan2",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.keras.layers.BatchNormalization",
"tensorflow.compat.v1.sign",
"tensorflow.compat.v1.linalg.norm",
"numpy.asscalar",
"tensorflow.compat.v1.nn.max_pool2d",
"tensorflow.compat.v1.strings.regex_replace",
"differentiable_filters.utils.push_utils._to_3d",
"differentiable_filters.utils.push_utils.physical_model_derivative",
"numpy.square",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.greater",
"tensorflow.compat.v1.sin",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.keras.layers.LayerNormalization",
"differentiable_filters.utils.tensorflow_compatability.fill_triangular",
"tensorflow.compat.v1.equal",
"pickle.load",
"numpy.mean",
"numpy.sin",
"os.path.join",
"tensorflow.compat.v1.slice",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.add_n",
"tensorflow.compat.v1.cast",
"numpy.corrcoef",
"tensorflow.compat.v1.cos",
"numpy.dot",
"tensorflow.compat.v1.linalg.diag_part",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.math.log",
"differentiable_filters.contexts.paper_base_context.PaperBaseContext.__init__",
"numpy.zeros",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.image.extract_glimpse",
"tensorflow.compat.v1.sqrt",
"numpy.load",
"tensorflow.compat.v1.nn.l2_normalize",
"tensorflow.compat.v1.io.parse_single_example",
"tensorflow.compat.v1.linalg.tensor_diag",
"numpy.random.randint",
"numpy.random.normal",
"tensorflow.compat.v1.image.resize",
"tensorflow.compat.v1.cond",
"csv.DictWriter",
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.ones",
"numpy.std",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.linalg.diag",
"tensorflow.compat.v1.summary.image",
"numpy.cos",
"matplotlib.patches.Ellipse",
"tensorflow.compat.v1.math.is_finite",
"differentiable_filters.utils.recordio.RecordMeta.load",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.keras.regularizers.l2",
"numpy.sqrt"
] |
[((183, 207), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (205, 207), True, 'import tensorflow.compat.v1 as tf\n'), ((1337, 1386), 'differentiable_filters.contexts.paper_base_context.PaperBaseContext.__init__', 'base.PaperBaseContext.__init__', (['self', 'param', 'mode'], {}), '(self, param, mode)\n', (1367, 1386), True, 'from differentiable_filters.contexts import paper_base_context as base\n'), ((2174, 2197), 'numpy.array', 'np.array', (['butter_points'], {}), '(butter_points)\n', (2182, 2197), True, 'import numpy as np\n'), ((3308, 3349), 'tensorflow.compat.v1.convert_to_tensor', 'tf.convert_to_tensor', (['q'], {'dtype': 'tf.float32'}), '(q, dtype=tf.float32)\n', (3328, 3349), True, 'import tensorflow.compat.v1 as tf\n'), ((3411, 3452), 'tensorflow.compat.v1.convert_to_tensor', 'tf.convert_to_tensor', (['r'], {'dtype': 'tf.float32'}), '(r, dtype=tf.float32)\n', (3431, 3452), True, 'import tensorflow.compat.v1 as tf\n'), ((16446, 16516), 'tensorflow.compat.v1.concat', 'tf.concat', (['[init_z[:, :3], base_state[:, 3:5], init_z[:, 3:]]'], {'axis': '(-1)'}), '([init_z[:, :3], base_state[:, 3:5], init_z[:, 3:]], axis=-1)\n', (16455, 16516), True, 'import tensorflow.compat.v1 as tf\n'), ((17833, 17886), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['states', '[self.batch_size, -1, self.dim_x]'], {}), '(states, [self.batch_size, -1, self.dim_x])\n', (17843, 17886), True, 'import tensorflow.compat.v1 as tf\n'), ((17904, 17969), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['covars', '[self.batch_size, -1, self.dim_x, self.dim_x]'], {}), '(covars, [self.batch_size, -1, self.dim_x, self.dim_x])\n', (17914, 17969), True, 'import tensorflow.compat.v1 as tf\n'), ((19428, 19523), 'tensorflow.compat.v1.concat', 'tf.concat', (['[seq_label[:, :, :3] - z[:, :, 0:3], seq_label[:, :, 5:] - z[:, :, 3:]]'], {'axis': '(-1)'}), '([seq_label[:, :, :3] - z[:, :, 0:3], seq_label[:, :, 5:] - z[:, :,\n 3:]], axis=-1)\n', (19437, 19523), True, 'import tensorflow.compat.v1 as tf\n'), ((20139, 20161), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['r'], {}), '(r)\n', (20158, 20161), True, 'import tensorflow.compat.v1 as tf\n'), ((20227, 20263), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_r', '[-1, self.dim_z]'], {}), '(diag_r, [-1, self.dim_z])\n', (20237, 20263), True, 'import tensorflow.compat.v1 as tf\n'), ((21036, 21058), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['q'], {}), '(q)\n', (21055, 21058), True, 'import tensorflow.compat.v1 as tf\n'), ((21124, 21160), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_q', '[-1, self.dim_x]'], {}), '(diag_q, [-1, self.dim_x])\n', (21134, 21160), True, 'import tensorflow.compat.v1 as tf\n'), ((21713, 21756), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""out/m_per_tr"""', 'm_per_tr'], {}), "('out/m_per_tr', m_per_tr)\n", (21730, 21756), True, 'import tensorflow.compat.v1 as tf\n'), ((21765, 21814), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""out/deg_per_deg"""', 'deg_per_deg'], {}), "('out/deg_per_deg', deg_per_deg)\n", (21782, 21814), True, 'import tensorflow.compat.v1 as tf\n'), ((22468, 22480), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['wd'], {}), '(wd)\n', (22476, 22480), True, 'import tensorflow.compat.v1 as tf\n'), ((22631, 22656), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['total_mse'], {}), '(total_mse)\n', (22645, 22656), True, 'import tensorflow.compat.v1 as tf\n'), ((22677, 22700), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_ob'], {}), '(dist_ob)\n', (22691, 22700), True, 'import tensorflow.compat.v1 as tf\n'), ((23724, 23792), 'tensorflow.compat.v1.cond', 'tf.cond', (['training', '(lambda : total_loss + wd)', '(lambda : total_loss_val)'], {}), '(training, lambda : total_loss + wd, lambda : total_loss_val)\n', (23731, 23792), True, 'import tensorflow.compat.v1 as tf\n'), ((23848, 23886), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/total"""', 'total'], {}), "('loss/total', total)\n", (23865, 23886), True, 'import tensorflow.compat.v1 as tf\n'), ((23895, 23927), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/wd"""', 'wd'], {}), "('loss/wd', wd)\n", (23912, 23927), True, 'import tensorflow.compat.v1 as tf\n'), ((24009, 24059), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/tracking"""', 'total_tracking'], {}), "('loss/tracking', total_tracking)\n", (24026, 24059), True, 'import tensorflow.compat.v1 as tf\n'), ((24068, 24117), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/observations"""', 'total_obs'], {}), "('loss/observations', total_obs)\n", (24085, 24117), True, 'import tensorflow.compat.v1 as tf\n'), ((25960, 25975), 'tensorflow.compat.v1.square', 'tf.square', (['diff'], {}), '(diff)\n', (25969, 25975), True, 'import tensorflow.compat.v1 as tf\n'), ((25991, 26019), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['diff'], {'axis': '(-1)'}), '(diff, axis=-1)\n', (26004, 26019), True, 'import tensorflow.compat.v1 as tf\n'), ((28024, 28055), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['R_het_diag'], {}), '(R_het_diag)\n', (28043, 28055), True, 'import tensorflow.compat.v1 as tf\n'), ((28148, 28193), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_r_het_diag', '[-1, self.dim_z]'], {}), '(diag_r_het_diag, [-1, self.dim_z])\n', (28158, 28193), True, 'import tensorflow.compat.v1 as tf\n'), ((28219, 28249), 'tensorflow.compat.v1.linalg.diag_part', 'tf.linalg.diag_part', (['R_het_tri'], {}), '(R_het_tri)\n', (28238, 28249), True, 'import tensorflow.compat.v1 as tf\n'), ((28339, 28383), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diag_r_het_tri', '[-1, self.dim_z]'], {}), '(diag_r_het_tri, [-1, self.dim_z])\n', (28349, 28383), True, 'import tensorflow.compat.v1 as tf\n'), ((29836, 29850), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (29844, 29850), True, 'import tensorflow.compat.v1 as tf\n'), ((29971, 30013), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['seg_pred', '[height, width]'], {}), '(seg_pred, [height, width])\n', (29986, 30013), True, 'import tensorflow.compat.v1 as tf\n'), ((30041, 30091), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['initial_seg_pred', '[height, width]'], {}), '(initial_seg_pred, [height, width])\n', (30056, 30091), True, 'import tensorflow.compat.v1 as tf\n'), ((30747, 30770), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['pix_mse'], {}), '(pix_mse)\n', (30761, 30770), True, 'import tensorflow.compat.v1 as tf\n'), ((31068, 31099), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['initial_pix_mse'], {}), '(initial_pix_mse)\n', (31082, 31099), True, 'import tensorflow.compat.v1 as tf\n'), ((31763, 31778), 'tensorflow.compat.v1.abs', 'tf.abs', (['z[:, 2]'], {}), '(z[:, 2])\n', (31769, 31778), True, 'import tensorflow.compat.v1 as tf\n'), ((31973, 32000), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['rot_penalty'], {}), '(rot_penalty)\n', (31987, 32000), True, 'import tensorflow.compat.v1 as tf\n'), ((32197, 32209), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['wd'], {}), '(wd)\n', (32205, 32209), True, 'import tensorflow.compat.v1 as tf\n'), ((33571, 33630), 'tensorflow.compat.v1.cond', 'tf.cond', (['training', '(lambda : total_train)', '(lambda : total_val)'], {}), '(training, lambda : total_train, lambda : total_val)\n', (33578, 33630), True, 'import tensorflow.compat.v1 as tf\n'), ((33662, 33700), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/total"""', 'total'], {}), "('loss/total', total)\n", (33679, 33700), True, 'import tensorflow.compat.v1 as tf\n'), ((33709, 33741), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/wd"""', 'wd'], {}), "('loss/wd', wd)\n", (33726, 33741), True, 'import tensorflow.compat.v1 as tf\n'), ((35466, 35528), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""observation_loss/rot_penalty"""', 'rot_penalty'], {}), "('observation_loss/rot_penalty', rot_penalty)\n", (35483, 35528), True, 'import tensorflow.compat.v1 as tf\n'), ((35537, 35583), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/like_good"""', 'good_loss'], {}), "('loss/like_good', good_loss)\n", (35554, 35583), True, 'import tensorflow.compat.v1 as tf\n'), ((35592, 35636), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/like_bad"""', 'bad_loss'], {}), "('loss/like_bad', bad_loss)\n", (35609, 35636), True, 'import tensorflow.compat.v1 as tf\n'), ((35645, 35691), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/like_loss"""', 'like_loss'], {}), "('loss/like_loss', like_loss)\n", (35662, 35691), True, 'import tensorflow.compat.v1 as tf\n'), ((35701, 35749), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/segmentation"""', 'seg_loss'], {}), "('loss/segmentation', seg_loss)\n", (35718, 35749), True, 'import tensorflow.compat.v1 as tf\n'), ((35758, 35797), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/seg_label"""', 'seg'], {}), "('loss/seg_label', seg)\n", (35774, 35797), True, 'import tensorflow.compat.v1 as tf\n'), ((35806, 35849), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/seg_pred"""', 'seg_pred'], {}), "('loss/seg_pred', seg_pred)\n", (35822, 35849), True, 'import tensorflow.compat.v1 as tf\n'), ((35858, 35913), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/initial_seg_label"""', 'initial_seg'], {}), "('loss/initial_seg_label', initial_seg)\n", (35874, 35913), True, 'import tensorflow.compat.v1 as tf\n'), ((35922, 35980), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""loss/inital_seg_pred"""', 'initial_seg_pred'], {}), "('loss/inital_seg_pred', initial_seg_pred)\n", (35938, 35980), True, 'import tensorflow.compat.v1 as tf\n'), ((39445, 39459), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (39453, 39459), True, 'import tensorflow.compat.v1 as tf\n'), ((39917, 39929), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['wd'], {}), '(wd)\n', (39925, 39929), True, 'import tensorflow.compat.v1 as tf\n'), ((40687, 40725), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/total"""', 'total'], {}), "('loss/total', total)\n", (40704, 40725), True, 'import tensorflow.compat.v1 as tf\n'), ((40734, 40766), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""loss/wd"""', 'wd'], {}), "('loss/wd', wd)\n", (40751, 40766), True, 'import tensorflow.compat.v1 as tf\n'), ((45536, 45574), 'tensorflow.compat.v1.norm', 'tf.norm', (['pred'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(pred, axis=-1, keep_dims=True)\n', (45543, 45574), True, 'import tensorflow.compat.v1 as tf\n'), ((45596, 45635), 'tensorflow.compat.v1.norm', 'tf.norm', (['label'], {'axis': '(-1)', 'keep_dims': '(True)'}), '(label, axis=-1, keep_dims=True)\n', (45603, 45635), True, 'import tensorflow.compat.v1 as tf\n'), ((45651, 45679), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (['pred', '(-1)'], {}), '(pred, -1)\n', (45669, 45679), True, 'import tensorflow.compat.v1 as tf\n'), ((45696, 45725), 'tensorflow.compat.v1.nn.l2_normalize', 'tf.nn.l2_normalize', (['label', '(-1)'], {}), '(label, -1)\n', (45714, 45725), True, 'import tensorflow.compat.v1 as tf\n'), ((47007, 47050), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['label', '[self.batch_size, -1, 1]'], {}), '(label, [self.batch_size, -1, 1])\n', (47017, 47050), True, 'import tensorflow.compat.v1 as tf\n'), ((47066, 47108), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pred', '[self.batch_size, -1, 1]'], {}), '(pred, [self.batch_size, -1, 1])\n', (47076, 47108), True, 'import tensorflow.compat.v1 as tf\n'), ((47155, 47185), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['pred', '(0)', '(1.0)'], {}), '(pred, 0, 1.0)\n', (47171, 47185), True, 'import tensorflow.compat.v1 as tf\n'), ((50010, 50034), 'tensorflow.compat.v1.multiply', 'tf.multiply', (['ps', 'weights'], {}), '(ps, weights)\n', (50021, 50034), True, 'import tensorflow.compat.v1 as tf\n'), ((50050, 50077), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mult'], {'axis': '(1)'}), '(mult, axis=1)\n', (50063, 50077), True, 'import tensorflow.compat.v1 as tf\n'), ((50163, 50228), 'tensorflow.compat.v1.concat', 'tf.concat', (['[mean[:, :2], ang1 / self.scale, mean[:, 4:]]'], {'axis': '(-1)'}), '([mean[:, :2], ang1 / self.scale, mean[:, 4:]], axis=-1)\n', (50172, 50228), True, 'import tensorflow.compat.v1 as tf\n'), ((50582, 50606), 'tensorflow.compat.v1.multiply', 'tf.multiply', (['ps', 'weights'], {}), '(ps, weights)\n', (50593, 50606), True, 'import tensorflow.compat.v1 as tf\n'), ((50622, 50652), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mult'], {'axis': 'axis'}), '(mult, axis=axis)\n', (50635, 50652), True, 'import tensorflow.compat.v1 as tf\n'), ((50737, 50801), 'tensorflow.compat.v1.concat', 'tf.concat', (['[mean[:, :2], ang / self.scale, mean[:, 4:]]'], {'axis': '(-1)'}), '([mean[:, :2], ang / self.scale, mean[:, 4:]], axis=-1)\n', (50746, 50801), True, 'import tensorflow.compat.v1 as tf\n'), ((50916, 50975), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['fr', '(0.1 / self.scale)', '(5000.0 / self.scale)'], {}), '(fr, 0.1 / self.scale, 5000.0 / self.scale)\n', (50932, 50975), True, 'import tensorflow.compat.v1 as tf\n'), ((51082, 51138), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['m', '(0.1 / self.scale)', '(90.0 / self.scale)'], {}), '(m, 0.1 / self.scale, 90.0 / self.scale)\n', (51098, 51138), True, 'import tensorflow.compat.v1 as tf\n'), ((51244, 51273), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['s', '(0.0)', '(1.0)'], {}), '(s, 0.0, 1.0)\n', (51260, 51273), True, 'import tensorflow.compat.v1 as tf\n'), ((52768, 52782), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['ob'], {}), '(ob)\n', (52778, 52782), True, 'import tensorflow.compat.v1 as tf\n'), ((52796, 52836), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (52820, 52836), True, 'import tensorflow.compat.v1 as tf\n'), ((52850, 52890), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (52874, 52890), True, 'import tensorflow.compat.v1 as tf\n'), ((56178, 56233), 'differentiable_filters.utils.recordio.RecordMeta.load', 'tfr.RecordMeta.load', (['path', "(name + '_' + data_mode + '_')"], {}), "(path, name + '_' + data_mode + '_')\n", (56197, 56233), True, 'from differentiable_filters.utils import recordio as tfr\n'), ((57451, 57502), 'tensorflow.compat.v1.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'features'], {}), '(example_proto, features)\n', (57477, 57502), True, 'import tensorflow.compat.v1 as tf\n'), ((58068, 58178), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[:, 0:1] * 1000 / self.scale, pose[:, 1:2] * 1000 / self.scale, ori /\n self.scale]'], {'axis': '(1)'}), '([pose[:, 0:1] * 1000 / self.scale, pose[:, 1:2] * 1000 / self.\n scale, ori / self.scale], axis=1)\n', (58077, 58178), True, 'import tensorflow.compat.v1 as tf\n'), ((58284, 58324), 'tensorflow.compat.v1.cast', 'tf.cast', (["features['contact']", 'tf.float32'], {}), "(features['contact'], tf.float32)\n", (58291, 58324), True, 'import tensorflow.compat.v1 as tf\n'), ((58476, 58503), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['cp'], {'axis': '(-1)'}), '(cp, axis=-1)\n', (58490, 58503), True, 'import tensorflow.compat.v1 as tf\n'), ((58905, 58940), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '[1, 2, 3]'}), '(mask, axis=[1, 2, 3])\n', (58918, 58940), True, 'import tensorflow.compat.v1 as tf\n'), ((58997, 59028), 'tensorflow.compat.v1.concat', 'tf.concat', (['[im, coord]'], {'axis': '(-1)'}), '([im, coord], axis=-1)\n', (59006, 59028), True, 'import tensorflow.compat.v1 as tf\n'), ((59083, 59118), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['object']", '[1]'], {}), "(features['object'], [1])\n", (59093, 59118), True, 'import tensorflow.compat.v1 as tf\n'), ((59133, 59170), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['material']", '[1]'], {}), "(features['material'], [1])\n", (59143, 59170), True, 'import tensorflow.compat.v1 as tf\n'), ((62285, 62298), 'tensorflow.compat.v1.stack', 'tf.stack', (['ims'], {}), '(ims)\n', (62293, 62298), True, 'import tensorflow.compat.v1 as tf\n'), ((62319, 62338), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ims'], {}), '(start_ims)\n', (62327, 62338), True, 'import tensorflow.compat.v1 as tf\n'), ((62358, 62376), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ts'], {}), '(start_ts)\n', (62366, 62376), True, 'import tensorflow.compat.v1 as tf\n'), ((62391, 62404), 'tensorflow.compat.v1.stack', 'tf.stack', (['tes'], {}), '(tes)\n', (62399, 62404), True, 'import tensorflow.compat.v1 as tf\n'), ((62421, 62436), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixts'], {}), '(pixts)\n', (62429, 62436), True, 'import tensorflow.compat.v1 as tf\n'), ((62453, 62468), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixte'], {}), '(pixte)\n', (62461, 62468), True, 'import tensorflow.compat.v1 as tf\n'), ((62482, 62500), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[num]'], {}), '(ob, [num])\n', (62489, 62500), True, 'import tensorflow.compat.v1 as tf\n'), ((62515, 62534), 'tensorflow.compat.v1.tile', 'tf.tile', (['mat', '[num]'], {}), '(mat, [num])\n', (62522, 62534), True, 'import tensorflow.compat.v1 as tf\n'), ((63422, 63462), 'tensorflow.compat.v1.cast', 'tf.cast', (["features['contact']", 'tf.float32'], {}), "(features['contact'], tf.float32)\n", (63429, 63462), True, 'import tensorflow.compat.v1 as tf\n'), ((63609, 63636), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['cp'], {'axis': '(-1)'}), '(cp, axis=-1)\n', (63623, 63636), True, 'import tensorflow.compat.v1 as tf\n'), ((63985, 64020), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['object']", '[1]'], {}), "(features['object'], [1])\n", (63995, 64020), True, 'import tensorflow.compat.v1 as tf\n'), ((64035, 64072), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['material']", '[1]'], {}), "(features['material'], [1])\n", (64045, 64072), True, 'import tensorflow.compat.v1 as tf\n'), ((65392, 65413), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_state'], {}), '(start_state)\n', (65400, 65413), True, 'import tensorflow.compat.v1 as tf\n'), ((65427, 65439), 'tensorflow.compat.v1.stack', 'tf.stack', (['us'], {}), '(us)\n', (65435, 65439), True, 'import tensorflow.compat.v1 as tf\n'), ((65453, 65471), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[num]'], {}), '(ob, [num])\n', (65460, 65471), True, 'import tensorflow.compat.v1 as tf\n'), ((65486, 65505), 'tensorflow.compat.v1.tile', 'tf.tile', (['mat', '[num]'], {}), '(mat, [num])\n', (65493, 65505), True, 'import tensorflow.compat.v1 as tf\n'), ((66122, 66162), 'tensorflow.compat.v1.cast', 'tf.cast', (["features['contact']", 'tf.float32'], {}), "(features['contact'], tf.float32)\n", (66129, 66162), True, 'import tensorflow.compat.v1 as tf\n'), ((66309, 66336), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['cp'], {'axis': '(-1)'}), '(cp, axis=-1)\n', (66323, 66336), True, 'import tensorflow.compat.v1 as tf\n'), ((67134, 67169), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['mask'], {'axis': '[1, 2, 3]'}), '(mask, axis=[1, 2, 3])\n', (67147, 67169), True, 'import tensorflow.compat.v1 as tf\n'), ((67183, 67214), 'tensorflow.compat.v1.concat', 'tf.concat', (['[im, coord]'], {'axis': '(-1)'}), '([im, coord], axis=-1)\n', (67192, 67214), True, 'import tensorflow.compat.v1 as tf\n'), ((67267, 67302), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['object']", '[1]'], {}), "(features['object'], [1])\n", (67277, 67302), True, 'import tensorflow.compat.v1 as tf\n'), ((67317, 67354), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['material']", '[1]'], {}), "(features['material'], [1])\n", (67327, 67354), True, 'import tensorflow.compat.v1 as tf\n'), ((69761, 69774), 'tensorflow.compat.v1.stack', 'tf.stack', (['ims'], {}), '(ims)\n', (69769, 69774), True, 'import tensorflow.compat.v1 as tf\n'), ((69795, 69814), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ims'], {}), '(start_ims)\n', (69803, 69814), True, 'import tensorflow.compat.v1 as tf\n'), ((69834, 69852), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_ts'], {}), '(start_ts)\n', (69842, 69852), True, 'import tensorflow.compat.v1 as tf\n'), ((69875, 69896), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_state'], {}), '(start_state)\n', (69883, 69896), True, 'import tensorflow.compat.v1 as tf\n'), ((69910, 69922), 'tensorflow.compat.v1.stack', 'tf.stack', (['us'], {}), '(us)\n', (69918, 69922), True, 'import tensorflow.compat.v1 as tf\n'), ((69937, 69950), 'tensorflow.compat.v1.stack', 'tf.stack', (['tes'], {}), '(tes)\n', (69945, 69950), True, 'import tensorflow.compat.v1 as tf\n'), ((69967, 69982), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixts'], {}), '(pixts)\n', (69975, 69982), True, 'import tensorflow.compat.v1 as tf\n'), ((69999, 70014), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixte'], {}), '(pixte)\n', (70007, 70014), True, 'import tensorflow.compat.v1 as tf\n'), ((70032, 70048), 'tensorflow.compat.v1.stack', 'tf.stack', (['mv_trs'], {}), '(mv_trs)\n', (70040, 70048), True, 'import tensorflow.compat.v1 as tf\n'), ((70067, 70084), 'tensorflow.compat.v1.stack', 'tf.stack', (['mv_rots'], {}), '(mv_rots)\n', (70075, 70084), True, 'import tensorflow.compat.v1 as tf\n'), ((70100, 70114), 'tensorflow.compat.v1.stack', 'tf.stack', (['viss'], {}), '(viss)\n', (70108, 70114), True, 'import tensorflow.compat.v1 as tf\n'), ((70129, 70147), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[num]'], {}), '(ob, [num])\n', (70136, 70147), True, 'import tensorflow.compat.v1 as tf\n'), ((70162, 70181), 'tensorflow.compat.v1.tile', 'tf.tile', (['mat', '[num]'], {}), '(mat, [num])\n', (70169, 70181), True, 'import tensorflow.compat.v1 as tf\n'), ((74936, 74955), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (74950, 74955), True, 'import numpy as np\n'), ((75241, 75268), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, :2]'], {}), '(seq_pred[:, :2])\n', (75251, 75268), True, 'import numpy as np\n'), ((75287, 75313), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 2]'], {}), '(seq_pred[:, 2])\n', (75297, 75313), True, 'import numpy as np\n'), ((75331, 75357), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 3]'], {}), '(seq_pred[:, 3])\n', (75341, 75357), True, 'import numpy as np\n'), ((75376, 75402), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 4]'], {}), '(seq_pred[:, 4])\n', (75386, 75402), True, 'import numpy as np\n'), ((75421, 75449), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 5:7]'], {}), '(seq_pred[:, 5:7])\n', (75431, 75449), True, 'import numpy as np\n'), ((75467, 75495), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 7:9]'], {}), '(seq_pred[:, 7:9])\n', (75477, 75495), True, 'import numpy as np\n'), ((75513, 75539), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, 9]'], {}), '(seq_pred[:, 9])\n', (75523, 75539), True, 'import numpy as np\n'), ((77617, 77653), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '[20, 15]'}), '(2, 3, figsize=[20, 15])\n', (77629, 77653), True, 'import matplotlib.pyplot as plt\n'), ((77667, 77695), 'numpy.arange', 'np.arange', (['pos_pred.shape[0]'], {}), '(pos_pred.shape[0])\n', (77676, 77695), True, 'import numpy as np\n'), ((81505, 81541), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '[20, 15]'}), '(2, 3, figsize=[20, 15])\n', (81517, 81541), True, 'import matplotlib.pyplot as plt\n'), ((81555, 81583), 'numpy.arange', 'np.arange', (['pos_pred.shape[0]'], {}), '(pos_pred.shape[0])\n', (81564, 81583), True, 'import numpy as np\n'), ((89522, 89549), 'numpy.squeeze', 'np.squeeze', (['seq_pred[:, :2]'], {}), '(seq_pred[:, :2])\n', (89532, 89549), True, 'import numpy as np\n'), ((89817, 89847), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[15, 15]'}), '(figsize=[15, 15])\n', (89829, 89847), True, 'import matplotlib.pyplot as plt\n'), ((89899, 89929), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[17, 17]'}), '(figsize=[17, 17])\n', (89911, 89929), True, 'import matplotlib.pyplot as plt\n'), ((96536, 96586), 'os.path.join', 'os.path.join', (['path', '"""resources"""', '"""plane_image.npy"""'], {}), "(path, 'resources', 'plane_image.npy')\n", (96548, 96586), False, 'import os\n'), ((96710, 96763), 'tensorflow.compat.v1.tile', 'tf.tile', (['self.plane_depth', '[self.batch_size, 1, 1, 1]'], {}), '(self.plane_depth, [self.batch_size, 1, 1, 1])\n', (96717, 96763), True, 'import tensorflow.compat.v1 as tf\n'), ((104084, 104134), 'os.path.join', 'os.path.join', (['path', '"""resources"""', '"""plane_image.npy"""'], {}), "(path, 'resources', 'plane_image.npy')\n", (104096, 104134), False, 'import os\n'), ((104258, 104311), 'tensorflow.compat.v1.tile', 'tf.tile', (['self.plane_depth', '[self.batch_size, 1, 1, 1]'], {}), '(self.plane_depth, [self.batch_size, 1, 1, 1])\n', (104265, 104311), True, 'import tensorflow.compat.v1 as tf\n'), ((128109, 128137), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pos_c2', '[bs, -1]'], {}), '(pos_c2, [bs, -1])\n', (128119, 128137), True, 'import tensorflow.compat.v1 as tf\n'), ((128459, 128504), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_fc, rot_fc, rns_fc2]'], {'axis': '(-1)'}), '([pos_fc, rot_fc, rns_fc2], axis=-1)\n', (128468, 128504), True, 'import tensorflow.compat.v1 as tf\n'), ((128524, 128571), 'tensorflow.compat.v1.tile', 'tf.tile', (['encoding[:, None, :]', '[1, num_pred, 1]'], {}), '(encoding[:, None, :], [1, num_pred, 1])\n', (128531, 128571), True, 'import tensorflow.compat.v1 as tf\n'), ((128594, 128635), 'tensorflow.compat.v1.concat', 'tf.concat', (['[encoding, particles]'], {'axis': '(-1)'}), '([encoding, particles], axis=-1)\n', (128603, 128635), True, 'import tensorflow.compat.v1 as tf\n'), ((128657, 128700), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['input_data', '[bs * num_pred, -1]'], {}), '(input_data, [bs * num_pred, -1])\n', (128667, 128700), True, 'import tensorflow.compat.v1 as tf\n'), ((130890, 130939), 'tensorflow.compat.v1.concat', 'tf.concat', (['[inputs[:, :3], inputs[:, 5:]]'], {'axis': '(1)'}), '([inputs[:, :3], inputs[:, 5:]], axis=1)\n', (130899, 130939), True, 'import tensorflow.compat.v1 as tf\n'), ((148450, 148515), 'tensorflow.compat.v1.concat', 'tf.concat', (['[old_state[:, :3], old_state[:, 4:], actions]'], {'axis': '(-1)'}), '([old_state[:, :3], old_state[:, 4:], actions], axis=-1)\n', (148459, 148515), True, 'import tensorflow.compat.v1 as tf\n'), ((2129, 2144), 'pickle.load', 'pickle.load', (['bf'], {}), '(bf)\n', (2140, 2144), False, 'import pickle\n'), ((3267, 3289), 'numpy.square', 'np.square', (['self.q_diag'], {}), '(self.q_diag)\n', (3276, 3289), True, 'import numpy as np\n'), ((3370, 3392), 'numpy.square', 'np.square', (['self.r_diag'], {}), '(self.r_diag)\n', (3379, 3392), True, 'import numpy as np\n'), ((3594, 3654), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (3602, 3654), True, 'import numpy as np\n'), ((3659, 3786), 'numpy.array', 'np.array', (['[49.8394116, -2.3510439, 0, 2.5196417, 1.93745247, 27.6656989, 67.1287098, \n 0.03124815, -0.18917632, -0.14730855]'], {}), '([49.8394116, -2.3510439, 0, 2.5196417, 1.93745247, 27.6656989, \n 67.1287098, 0.03124815, -0.18917632, -0.14730855])\n', (3667, 3786), True, 'import numpy as np\n'), ((3842, 3971), 'numpy.array', 'np.array', (['[27.9914853, -30.3366791, 0, -4.6963326, -2.96631439, 3.6698755, -\n 14.5376077, -0.49956926, 0.56362964, 0.54478971]'], {}), '([27.9914853, -30.3366791, 0, -4.6963326, -2.96631439, 3.6698755, -\n 14.5376077, -0.49956926, 0.56362964, 0.54478971])\n', (3850, 3971), True, 'import numpy as np\n'), ((18316, 18365), 'tensorflow.compat.v1.tile', 'tf.tile', (['seq_label[:, :, None, :]', '[1, 1, num, 1]'], {}), '(seq_label[:, :, None, :], [1, 1, num, 1])\n', (18323, 18365), True, 'import tensorflow.compat.v1 as tf\n'), ((20187, 20209), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_r + 1e-05)'], {}), '(diag_r + 1e-05)\n', (20193, 20209), True, 'import tensorflow.compat.v1 as tf\n'), ((20549, 20563), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr'], {}), '(corr)\n', (20557, 20563), True, 'import tensorflow.compat.v1 as tf\n'), ((20964, 20985), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_r_cont'], {}), '(corr_r_cont)\n', (20972, 20985), True, 'import tensorflow.compat.v1 as tf\n'), ((21084, 21106), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_q + 1e-05)'], {}), '(diag_q + 1e-05)\n', (21090, 21106), True, 'import tensorflow.compat.v1 as tf\n'), ((21488, 21504), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_q'], {}), '(corr_q)\n', (21496, 21504), True, 'import tensorflow.compat.v1 as tf\n'), ((21857, 21878), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mv_tr'], {}), '(mv_tr)\n', (21871, 21878), True, 'import tensorflow.compat.v1 as tf\n'), ((21923, 21945), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mv_rot'], {}), '(mv_rot)\n', (21937, 21945), True, 'import tensorflow.compat.v1 as tf\n'), ((21989, 22012), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_tr'], {}), '(dist_tr)\n', (22003, 22012), True, 'import tensorflow.compat.v1 as tf\n'), ((22057, 22081), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_rot'], {}), '(dist_rot)\n', (22071, 22081), True, 'import tensorflow.compat.v1 as tf\n'), ((22758, 22784), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (22772, 22784), True, 'import tensorflow.compat.v1 as tf\n'), ((23557, 23583), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23571, 23583), True, 'import tensorflow.compat.v1 as tf\n'), ((23973, 23999), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23987, 23999), True, 'import tensorflow.compat.v1 as tf\n'), ((24163, 24181), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['corr_r'], {}), '(corr_r)\n', (24173, 24181), True, 'import tensorflow.compat.v1 as tf\n'), ((24229, 24252), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['corr_r_cont'], {}), '(corr_r_cont)\n', (24239, 24252), True, 'import tensorflow.compat.v1 as tf\n'), ((24300, 24318), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['corr_q'], {}), '(corr_q)\n', (24310, 24318), True, 'import tensorflow.compat.v1 as tf\n'), ((25438, 25458), 'tensorflow.compat.v1.greater', 'tf.greater', (['mv_tr', '(0)'], {}), '(mv_tr, 0)\n', (25448, 25458), True, 'import tensorflow.compat.v1 as tf\n'), ((25562, 25583), 'tensorflow.compat.v1.greater', 'tf.greater', (['mv_rot', '(0)'], {}), '(mv_rot, 0)\n', (25572, 25583), True, 'import tensorflow.compat.v1 as tf\n'), ((25715, 25739), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['m_per_tr'], {}), '(m_per_tr)\n', (25729, 25739), True, 'import tensorflow.compat.v1 as tf\n'), ((25741, 25768), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['deg_per_deg'], {}), '(deg_per_deg)\n', (25755, 25768), True, 'import tensorflow.compat.v1 as tf\n'), ((26045, 26064), 'tensorflow.compat.v1.greater', 'tf.greater', (['diff', '(0)'], {}), '(diff, 0)\n', (26055, 26064), True, 'import tensorflow.compat.v1 as tf\n'), ((26066, 26079), 'tensorflow.compat.v1.sqrt', 'tf.sqrt', (['diff'], {}), '(diff)\n', (26073, 26079), True, 'import tensorflow.compat.v1 as tf\n'), ((27062, 27084), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['diff'], {}), '(diff)\n', (27078, 27084), True, 'import tensorflow.compat.v1 as tf\n'), ((27265, 27287), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['diff'], {}), '(diff)\n', (27281, 27287), True, 'import tensorflow.compat.v1 as tf\n'), ((28090, 28121), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_r_het_diag + 1e-05)'], {}), '(diag_r_het_diag + 1e-05)\n', (28096, 28121), True, 'import tensorflow.compat.v1 as tf\n'), ((28283, 28313), 'tensorflow.compat.v1.abs', 'tf.abs', (['(diag_r_het_tri + 1e-05)'], {}), '(diag_r_het_tri + 1e-05)\n', (28289, 28313), True, 'import tensorflow.compat.v1 as tf\n'), ((28943, 28962), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_diag'], {}), '(corr_diag)\n', (28951, 28962), True, 'import tensorflow.compat.v1 as tf\n'), ((28996, 29015), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['corr_full'], {}), '(corr_full)\n', (29004, 29015), True, 'import tensorflow.compat.v1 as tf\n'), ((30281, 30317), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['seg_loss'], {'axis': '[1, 2]'}), '(seg_loss, axis=[1, 2])\n', (30294, 30317), True, 'import tensorflow.compat.v1 as tf\n'), ((30526, 30563), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['seg_loss2'], {'axis': '[1, 2]'}), '(seg_loss2, axis=[1, 2])\n', (30539, 30563), True, 'import tensorflow.compat.v1 as tf\n'), ((31810, 31835), 'tensorflow.compat.v1.greater', 'tf.greater', (['rot_pred', '(180)'], {}), '(rot_pred, 180)\n', (31820, 31835), True, 'import tensorflow.compat.v1 as tf\n'), ((31868, 31893), 'tensorflow.compat.v1.square', 'tf.square', (['(rot_pred - 180)'], {}), '(rot_pred - 180)\n', (31877, 31893), True, 'import tensorflow.compat.v1 as tf\n'), ((31926, 31949), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot_pred'], {}), '(rot_pred)\n', (31939, 31949), True, 'import tensorflow.compat.v1 as tf\n'), ((32320, 32354), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 2)'], {}), '(step, self.epoch_size * 2)\n', (32327, 32354), True, 'import tensorflow.compat.v1 as tf\n'), ((32902, 32936), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 5)'], {}), '(step, self.epoch_size * 5)\n', (32909, 32936), True, 'import tensorflow.compat.v1 as tf\n'), ((33824, 33861), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_diag'], {}), '(likelihood_const_diag)\n', (33838, 33861), True, 'import tensorflow.compat.v1 as tf\n'), ((33944, 33980), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_tri'], {}), '(likelihood_const_tri)\n', (33958, 33980), True, 'import tensorflow.compat.v1 as tf\n'), ((34062, 34097), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_diag'], {}), '(likelihood_het_diag)\n', (34076, 34097), True, 'import tensorflow.compat.v1 as tf\n'), ((34178, 34212), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_tri'], {}), '(likelihood_het_tri)\n', (34192, 34212), True, 'import tensorflow.compat.v1 as tf\n'), ((34274, 34321), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (["('label/' + name)", 'label[0, i]'], {}), "('label/' + name, label[0, i])\n", (34291, 34321), True, 'import tensorflow.compat.v1 as tf\n'), ((34819, 34846), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_r_diag'], {}), '(corr_r_diag)\n', (34833, 34846), True, 'import tensorflow.compat.v1 as tf\n'), ((34898, 34925), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_r_full'], {}), '(corr_r_full)\n', (34912, 34925), True, 'import tensorflow.compat.v1 as tf\n'), ((35010, 35036), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (35024, 35036), True, 'import tensorflow.compat.v1 as tf\n'), ((35119, 35138), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['vis'], {}), '(vis)\n', (35133, 35138), True, 'import tensorflow.compat.v1 as tf\n'), ((35221, 35245), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['pix_dist'], {}), '(pix_dist)\n', (35235, 35245), True, 'import tensorflow.compat.v1 as tf\n'), ((35327, 35350), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_3d'], {}), '(dist_3d)\n', (35341, 35350), True, 'import tensorflow.compat.v1 as tf\n'), ((35438, 35456), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['ce'], {}), '(ce)\n', (35452, 35456), True, 'import tensorflow.compat.v1 as tf\n'), ((39974, 40008), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 5)'], {}), '(step, self.epoch_size * 5)\n', (39981, 40008), True, 'import tensorflow.compat.v1 as tf\n'), ((40849, 40886), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_diag'], {}), '(likelihood_const_diag)\n', (40863, 40886), True, 'import tensorflow.compat.v1 as tf\n'), ((40969, 41005), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_tri'], {}), '(likelihood_const_tri)\n', (40983, 41005), True, 'import tensorflow.compat.v1 as tf\n'), ((41087, 41122), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_diag'], {}), '(likelihood_het_diag)\n', (41101, 41122), True, 'import tensorflow.compat.v1 as tf\n'), ((41203, 41237), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_tri'], {}), '(likelihood_het_tri)\n', (41217, 41237), True, 'import tensorflow.compat.v1 as tf\n'), ((41325, 41366), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_diag_ana'], {}), '(likelihood_const_diag_ana)\n', (41339, 41366), True, 'import tensorflow.compat.v1 as tf\n'), ((41453, 41493), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_const_tri_ana'], {}), '(likelihood_const_tri_ana)\n', (41467, 41493), True, 'import tensorflow.compat.v1 as tf\n'), ((41579, 41618), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_diag_ana'], {}), '(likelihood_het_diag_ana)\n', (41593, 41618), True, 'import tensorflow.compat.v1 as tf\n'), ((41703, 41741), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_het_tri_ana'], {}), '(likelihood_het_tri_ana)\n', (41717, 41741), True, 'import tensorflow.compat.v1 as tf\n'), ((41786, 41805), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (41800, 41805), True, 'import tensorflow.compat.v1 as tf\n'), ((45992, 46041), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['prod', '(-0.999999999)', '(0.999999999)'], {}), '(prod, -0.999999999, 0.999999999)\n', (46008, 46041), True, 'import tensorflow.compat.v1 as tf\n'), ((46291, 46340), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['prod', '(-0.999999999)', '(0.999999999)'], {}), '(prod, -0.999999999, 0.999999999)\n', (46307, 46340), True, 'import tensorflow.compat.v1 as tf\n'), ((46498, 46526), 'tensorflow.compat.v1.greater', 'tf.greater', (['pred_norm', '(1e-06)'], {}), '(pred_norm, 1e-06)\n', (46508, 46526), True, 'import tensorflow.compat.v1 as tf\n'), ((46560, 46589), 'tensorflow.compat.v1.greater', 'tf.greater', (['label_norm', '(1e-06)'], {}), '(label_norm, 1e-06)\n', (46570, 46589), True, 'import tensorflow.compat.v1 as tf\n'), ((46633, 46656), 'tensorflow.compat.v1.math.is_finite', 'tf.math.is_finite', (['prod'], {}), '(prod)\n', (46650, 46656), True, 'import tensorflow.compat.v1 as tf\n'), ((46691, 46703), 'tensorflow.compat.v1.abs', 'tf.abs', (['prod'], {}), '(prod)\n', (46697, 46703), True, 'import tensorflow.compat.v1 as tf\n'), ((46705, 46724), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['prod'], {}), '(prod)\n', (46718, 46724), True, 'import tensorflow.compat.v1 as tf\n'), ((48112, 48147), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['state', '[-1, self.dim_x]'], {}), '(state, [-1, self.dim_x])\n', (48122, 48147), True, 'import tensorflow.compat.v1 as tf\n'), ((48938, 48982), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['state', '(shape[:-1] + [self.dim_x])'], {}), '(state, shape[:-1] + [self.dim_x])\n', (48948, 48982), True, 'import tensorflow.compat.v1 as tf\n'), ((49349, 49383), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diff', '[-1, self.dim_z]'], {}), '(diff, [-1, self.dim_z])\n', (49359, 49383), True, 'import tensorflow.compat.v1 as tf\n'), ((49624, 49667), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diff', '(shape[:-1] + [self.dim_z])'], {}), '(diff, shape[:-1] + [self.dim_z])\n', (49634, 49667), True, 'import tensorflow.compat.v1 as tf\n'), ((52730, 52747), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (52742, 52747), True, 'import tensorflow.compat.v1 as tf\n'), ((58228, 58258), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (["features['normal']"], {}), "(features['normal'])\n", (58238, 58258), True, 'import tensorflow.compat.v1 as tf\n'), ((58339, 58363), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['con', '[-1, 1]'], {}), '(con, [-1, 1])\n', (58349, 58363), True, 'import tensorflow.compat.v1 as tf\n'), ((60169, 60205), 'numpy.random.randint', 'np.random.randint', (['(2)', '(seq_len - 2)', '(5)'], {}), '(2, seq_len - 2, 5)\n', (60186, 60205), True, 'import numpy as np\n'), ((60337, 60365), 'numpy.arange', 'np.arange', (['(2)', '(seq_len - 2)', '(8)'], {}), '(2, seq_len - 2, 8)\n', (60346, 60365), True, 'import numpy as np\n'), ((61285, 61356), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[si, :2], relative_rot, cp[si], n[si], con[si]]'], {'axis': '(0)'}), '([pose[si, :2], relative_rot, cp[si], n[si], con[si]], axis=0)\n', (61294, 61356), True, 'import tensorflow.compat.v1 as tf\n'), ((61443, 61491), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(0.1)', 'size': '(24, 8)'}), '(loc=0, scale=0.1, size=(24, 8))\n', (61459, 61491), True, 'import numpy as np\n'), ((61550, 61597), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(10)', 'scale': '(5)', 'size': '(24, 8)'}), '(loc=10, scale=5, size=(24, 8))\n', (61566, 61597), True, 'import numpy as np\n'), ((61627, 61675), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(-10)', 'scale': '(5)', 'size': '(12, 8)'}), '(loc=-10, scale=5, size=(12, 8))\n', (61643, 61675), True, 'import numpy as np\n'), ((62580, 62596), 'tensorflow.compat.v1.stack', 'tf.stack', (['labels'], {}), '(labels)\n', (62588, 62596), True, 'import tensorflow.compat.v1 as tf\n'), ((62616, 62633), 'tensorflow.compat.v1.stack', 'tf.stack', (['good_zs'], {}), '(good_zs)\n', (62624, 62633), True, 'import tensorflow.compat.v1 as tf\n'), ((62653, 62669), 'tensorflow.compat.v1.stack', 'tf.stack', (['bad_zs'], {}), '(bad_zs)\n', (62661, 62669), True, 'import tensorflow.compat.v1 as tf\n'), ((62723, 62739), 'tensorflow.compat.v1.stack', 'tf.stack', (['labels'], {}), '(labels)\n', (62731, 62739), True, 'import tensorflow.compat.v1 as tf\n'), ((62741, 62755), 'tensorflow.compat.v1.stack', 'tf.stack', (['pixs'], {}), '(pixs)\n', (62749, 62755), True, 'import tensorflow.compat.v1 as tf\n'), ((62757, 62777), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_pixs'], {}), '(start_pixs)\n', (62765, 62777), True, 'import tensorflow.compat.v1 as tf\n'), ((62797, 62811), 'tensorflow.compat.v1.stack', 'tf.stack', (['segs'], {}), '(segs)\n', (62805, 62811), True, 'import tensorflow.compat.v1 as tf\n'), ((62813, 62833), 'tensorflow.compat.v1.stack', 'tf.stack', (['start_segs'], {}), '(start_segs)\n', (62821, 62833), True, 'import tensorflow.compat.v1 as tf\n'), ((62835, 62849), 'tensorflow.compat.v1.stack', 'tf.stack', (['viss'], {}), '(viss)\n', (62843, 62849), True, 'import tensorflow.compat.v1 as tf\n'), ((63255, 63321), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori]'], {'axis': '(1)'}), '([pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori], axis=1)\n', (63264, 63321), True, 'import tensorflow.compat.v1 as tf\n'), ((63366, 63396), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (["features['normal']"], {}), "(features['normal'])\n", (63376, 63396), True, 'import tensorflow.compat.v1 as tf\n'), ((63477, 63501), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['con', '[-1, 1]'], {}), '(con, [-1, 1])\n', (63487, 63501), True, 'import tensorflow.compat.v1 as tf\n'), ((64435, 64472), 'numpy.random.randint', 'np.random.randint', (['(2)', '(seq_len - 1)', '(10)'], {}), '(2, seq_len - 1, 10)\n', (64452, 64472), True, 'import numpy as np\n'), ((64603, 64631), 'numpy.arange', 'np.arange', (['(2)', '(seq_len - 1)', '(8)'], {}), '(2, seq_len - 1, 8)\n', (64612, 64631), True, 'import numpy as np\n'), ((65222, 65312), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[si, :2], relative_rot, friction, mu, cp[si], n[si], con[si]]'], {'axis': '(0)'}), '([pose[si, :2], relative_rot, friction, mu, cp[si], n[si], con[si]\n ], axis=0)\n', (65231, 65312), True, 'import tensorflow.compat.v1 as tf\n'), ((65955, 66021), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori]'], {'axis': '(1)'}), '([pose[:, 0:1] * 1000, pose[:, 1:2] * 1000, ori], axis=1)\n', (65964, 66021), True, 'import tensorflow.compat.v1 as tf\n'), ((66066, 66096), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (["features['normal']"], {}), "(features['normal'])\n", (66076, 66096), True, 'import tensorflow.compat.v1 as tf\n'), ((66177, 66201), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['con', '[-1, 1]'], {}), '(con, [-1, 1])\n', (66187, 66201), True, 'import tensorflow.compat.v1 as tf\n'), ((67536, 67584), 'numpy.random.randint', 'np.random.randint', (['(1)', '(seq_len - self.sl - 2)', 'num'], {}), '(1, seq_len - self.sl - 2, num)\n', (67553, 67584), True, 'import numpy as np\n'), ((71406, 71436), 'csv.DictWriter', 'csv.DictWriter', (['log_file', 'keys'], {}), '(log_file, keys)\n', (71420, 71436), False, 'import csv\n'), ((71652, 71692), 'csv.DictWriter', 'csv.DictWriter', (['log_file_corr', 'keys_corr'], {}), '(log_file_corr, keys_corr)\n', (71666, 71692), False, 'import csv\n'), ((72806, 72823), 'numpy.mean', 'np.mean', (['corr_vis'], {}), '(corr_vis)\n', (72813, 72823), True, 'import numpy as np\n'), ((72857, 72875), 'numpy.mean', 'np.mean', (['corr_cont'], {}), '(corr_cont)\n', (72864, 72875), True, 'import numpy as np\n'), ((73195, 73213), 'numpy.mean', 'np.mean', (['corr_cont'], {}), '(corr_cont)\n', (73202, 73213), True, 'import numpy as np\n'), ((75560, 75571), 'numpy.max', 'np.max', (['vis'], {}), '(vis)\n', (75566, 75571), True, 'import numpy as np\n'), ((75621, 75641), 'numpy.squeeze', 'np.squeeze', (['z[:, :2]'], {}), '(z[:, :2])\n', (75631, 75641), True, 'import numpy as np\n'), ((75663, 75682), 'numpy.squeeze', 'np.squeeze', (['z[:, 2]'], {}), '(z[:, 2])\n', (75673, 75682), True, 'import numpy as np\n'), ((75703, 75724), 'numpy.squeeze', 'np.squeeze', (['z[:, 3:5]'], {}), '(z[:, 3:5])\n', (75713, 75724), True, 'import numpy as np\n'), ((75745, 75766), 'numpy.squeeze', 'np.squeeze', (['z[:, 5:7]'], {}), '(z[:, 5:7])\n', (75755, 75766), True, 'import numpy as np\n'), ((75787, 75806), 'numpy.squeeze', 'np.squeeze', (['z[:, 7]'], {}), '(z[:, 7])\n', (75797, 75806), True, 'import numpy as np\n'), ((84813, 84843), 'csv.DictWriter', 'csv.DictWriter', (['log_file', 'keys'], {}), '(log_file, keys)\n', (84827, 84843), False, 'import csv\n'), ((86058, 86088), 'csv.DictWriter', 'csv.DictWriter', (['log_file', 'keys'], {}), '(log_file, keys)\n', (86072, 86088), False, 'import csv\n'), ((86925, 86954), 'numpy.save', 'np.save', (["(name + '_init')", 'init'], {}), "(name + '_init', init)\n", (86932, 86954), True, 'import numpy as np\n'), ((86967, 86995), 'numpy.save', 'np.save', (["(name + '_true')", 'seq'], {}), "(name + '_true', seq)\n", (86974, 86995), True, 'import numpy as np\n'), ((87008, 87041), 'numpy.save', 'np.save', (["(name + '_pred')", 'seq_pred'], {}), "(name + '_pred', seq_pred)\n", (87015, 87041), True, 'import numpy as np\n'), ((87054, 87079), 'numpy.save', 'np.save', (["(name + '_obs')", 'z'], {}), "(name + '_obs', z)\n", (87061, 87079), True, 'import numpy as np\n'), ((87092, 87122), 'numpy.save', 'np.save', (["(name + '_c')", 'cov_pred'], {}), "(name + '_c', cov_pred)\n", (87099, 87122), True, 'import numpy as np\n'), ((87135, 87163), 'numpy.save', 'np.save', (["(name + '_q')", 'q_pred'], {}), "(name + '_q', q_pred)\n", (87142, 87163), True, 'import numpy as np\n'), ((87176, 87204), 'numpy.save', 'np.save', (["(name + '_r')", 'r_pred'], {}), "(name + '_r', r_pred)\n", (87183, 87204), True, 'import numpy as np\n'), ((87217, 87244), 'numpy.save', 'np.save', (["(name + '_vis')", 'vis'], {}), "(name + '_vis', vis)\n", (87224, 87244), True, 'import numpy as np\n'), ((87257, 87286), 'numpy.save', 'np.save', (["(name + '_u')", 'actions'], {}), "(name + '_u', actions)\n", (87264, 87286), True, 'import numpy as np\n'), ((87299, 87324), 'numpy.save', 'np.save', (["(name + '_ob')", 'ob'], {}), "(name + '_ob', ob)\n", (87306, 87324), True, 'import numpy as np\n'), ((89569, 89586), 'numpy.min', 'np.min', (['seq[:, 0]'], {}), '(seq[:, 0])\n', (89575, 89586), True, 'import numpy as np\n'), ((89588, 89610), 'numpy.min', 'np.min', (['pos_pred[:, 0]'], {}), '(pos_pred[:, 0])\n', (89594, 89610), True, 'import numpy as np\n'), ((89631, 89648), 'numpy.min', 'np.min', (['seq[:, 1]'], {}), '(seq[:, 1])\n', (89637, 89648), True, 'import numpy as np\n'), ((89650, 89672), 'numpy.min', 'np.min', (['pos_pred[:, 1]'], {}), '(pos_pred[:, 1])\n', (89656, 89672), True, 'import numpy as np\n'), ((89693, 89710), 'numpy.max', 'np.max', (['seq[:, 0]'], {}), '(seq[:, 0])\n', (89699, 89710), True, 'import numpy as np\n'), ((89712, 89734), 'numpy.max', 'np.max', (['pos_pred[:, 0]'], {}), '(pos_pred[:, 0])\n', (89718, 89734), True, 'import numpy as np\n'), ((89755, 89772), 'numpy.max', 'np.max', (['seq[:, 1]'], {}), '(seq[:, 1])\n', (89761, 89772), True, 'import numpy as np\n'), ((89774, 89796), 'numpy.max', 'np.max', (['pos_pred[:, 1]'], {}), '(pos_pred[:, 1])\n', (89780, 89796), True, 'import numpy as np\n'), ((95192, 95268), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos_pred[-1]', 'width': 'width', 'height': 'height', 'angle': 'theta', 'alpha': '(0.1)'}), '(xy=pos_pred[-1], width=width, height=height, angle=theta, alpha=0.1)\n', (95199, 95268), False, 'from matplotlib.patches import Ellipse\n'), ((97671, 97748), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm1"""', 'trainable': 'trainable'}), "(name='segment/norm1', trainable=trainable)\n", (97705, 97748), True, 'import tensorflow.compat.v1 as tf\n'), ((97842, 97919), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm2"""', 'trainable': 'trainable'}), "(name='segment/norm2', trainable=trainable)\n", (97876, 97919), True, 'import tensorflow.compat.v1 as tf\n'), ((98013, 98090), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm3"""', 'trainable': 'trainable'}), "(name='segment/norm3', trainable=trainable)\n", (98047, 98090), True, 'import tensorflow.compat.v1 as tf\n'), ((98185, 98262), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm4"""', 'trainable': 'trainable'}), "(name='segment/norm4', trainable=trainable)\n", (98219, 98262), True, 'import tensorflow.compat.v1 as tf\n'), ((98357, 98434), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""segment/norm5"""', 'trainable': 'trainable'}), "(name='segment/norm5', trainable=trainable)\n", (98391, 98434), True, 'import tensorflow.compat.v1 as tf\n'), ((99869, 99886), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['""""""'], {}), "('')\n", (99882, 99886), True, 'import tensorflow.compat.v1 as tf\n'), ((103065, 103106), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pos_pix', '[self.batch_size, 2]'], {}), '(pos_pix, [self.batch_size, 2])\n', (103075, 103106), True, 'import tensorflow.compat.v1 as tf\n'), ((103125, 103164), 'differentiable_filters.utils.push_utils._to_3d', 'utils._to_3d', (['pos_pix', 'self.plane_depth'], {}), '(pos_pix, self.plane_depth)\n', (103137, 103164), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((103289, 103350), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_pix[:, 1:2] * 2, pos_pix[:, 0:1] * 2]'], {'axis': '(1)'}), '([pos_pix[:, 1:2] * 2, pos_pix[:, 0:1] * 2], axis=1)\n', (103298, 103350), True, 'import tensorflow.compat.v1 as tf\n'), ((103430, 103534), 'tensorflow.compat.v1.image.extract_glimpse', 'tf.image.extract_glimpse', (['images'], {'size': '[72, 72]', 'offsets': 'coords_rot', 'centered': '(True)', 'normalized': '(False)'}), '(images, size=[72, 72], offsets=coords_rot,\n centered=True, normalized=False)\n', (103454, 103534), True, 'import tensorflow.compat.v1 as tf\n'), ((105886, 105963), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""glimpse/norm1"""', 'trainable': 'trainable'}), "(name='glimpse/norm1', trainable=trainable)\n", (105920, 105963), True, 'import tensorflow.compat.v1 as tf\n'), ((106057, 106134), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""glimpse/norm2"""', 'trainable': 'trainable'}), "(name='glimpse/norm2', trainable=trainable)\n", (106091, 106134), True, 'import tensorflow.compat.v1 as tf\n'), ((106228, 106305), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""glimpse/norm3"""', 'trainable': 'trainable'}), "(name='glimpse/norm3', trainable=trainable)\n", (106262, 106305), True, 'import tensorflow.compat.v1 as tf\n'), ((106399, 106472), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""rot/norm1"""', 'trainable': 'trainable'}), "(name='rot/norm1', trainable=trainable)\n", (106433, 106472), True, 'import tensorflow.compat.v1 as tf\n'), ((106566, 106639), 'tensorflow.compat.v1.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'name': '"""rot/norm2"""', 'trainable': 'trainable'}), "(name='rot/norm2', trainable=trainable)\n", (106600, 106639), True, 'import tensorflow.compat.v1 as tf\n'), ((108092, 108109), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['""""""'], {}), "('')\n", (108105, 108109), True, 'import tensorflow.compat.v1 as tf\n'), ((114573, 114620), 'tensorflow.compat.v1.linalg.norm', 'tf.linalg.norm', (['n[:, :2]'], {'axis': '(1)', 'keepdims': '(True)'}), '(n[:, :2], axis=1, keepdims=True)\n', (114587, 114620), True, 'import tensorflow.compat.v1 as tf\n'), ((115030, 115069), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos, rot, r, n, s]'], {'axis': '(-1)'}), '([pos, rot, r, n, s], axis=-1)\n', (115039, 115069), True, 'import tensorflow.compat.v1 as tf\n'), ((115108, 115141), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""r_x"""', 'r[0, 0]'], {}), "('r_x', r[0, 0])\n", (115125, 115141), True, 'import tensorflow.compat.v1 as tf\n'), ((115154, 115187), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""r_y"""', 'r[0, 1]'], {}), "('r_y', r[0, 1])\n", (115171, 115187), True, 'import tensorflow.compat.v1 as tf\n'), ((115200, 115233), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""n_x"""', 'n[0, 0]'], {}), "('n_x', n[0, 0])\n", (115217, 115233), True, 'import tensorflow.compat.v1 as tf\n'), ((115246, 115279), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""n_y"""', 'n[0, 1]'], {}), "('n_y', n[0, 1])\n", (115263, 115279), True, 'import tensorflow.compat.v1 as tf\n'), ((115292, 115327), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""o_x"""', 'pos[0, 0]'], {}), "('o_x', pos[0, 0])\n", (115309, 115327), True, 'import tensorflow.compat.v1 as tf\n'), ((115340, 115375), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""o_y"""', 'pos[0, 1]'], {}), "('o_y', pos[0, 1])\n", (115357, 115375), True, 'import tensorflow.compat.v1 as tf\n'), ((115388, 115427), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""t_x"""', 'tip_pos[0, 0]'], {}), "('t_x', tip_pos[0, 0])\n", (115405, 115427), True, 'import tensorflow.compat.v1 as tf\n'), ((115440, 115479), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""t_y"""', 'tip_pos[0, 1]'], {}), "('t_y', tip_pos[0, 1])\n", (115457, 115479), True, 'import tensorflow.compat.v1 as tf\n'), ((115492, 115523), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""s"""', 's[0, 0]'], {}), "('s', s[0, 0])\n", (115509, 115523), True, 'import tensorflow.compat.v1 as tf\n'), ((115536, 115571), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""rot"""', 'rot[0, 0]'], {}), "('rot', rot[0, 0])\n", (115553, 115571), True, 'import tensorflow.compat.v1 as tf\n'), ((122340, 122390), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['het_diag_pos_c2', '[self.batch_size, -1]'], {}), '(het_diag_pos_c2, [self.batch_size, -1])\n', (122350, 122390), True, 'import tensorflow.compat.v1 as tf\n'), ((122891, 122953), 'tensorflow.compat.v1.concat', 'tf.concat', (['[het_diag_pos, het_diag_rot, het_diag_rns]'], {'axis': '(-1)'}), '([het_diag_pos, het_diag_rot, het_diag_rns], axis=-1)\n', (122900, 122953), True, 'import tensorflow.compat.v1 as tf\n'), ((123818, 123859), 'tensorflow.compat.v1.square', 'tf.square', (['(diag + self.het_diag_init_bias)'], {}), '(diag + self.het_diag_init_bias)\n', (123827, 123859), True, 'import tensorflow.compat.v1 as tf\n'), ((123912, 123932), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['diag'], {}), '(diag)\n', (123926, 123932), True, 'import tensorflow.compat.v1 as tf\n'), ((128774, 128810), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'fc1'], {}), "('fc1_out', fc1)\n", (128794, 128810), True, 'import tensorflow.compat.v1 as tf\n'), ((128876, 128912), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'fc2'], {}), "('fc2_out', fc2)\n", (128896, 128912), True, 'import tensorflow.compat.v1 as tf\n'), ((128980, 129022), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""pos_c1_out"""', 'pos_c1'], {}), "('pos_c1_out', pos_c1)\n", (129000, 129022), True, 'import tensorflow.compat.v1 as tf\n'), ((129035, 129077), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""pos_c2_out"""', 'pos_c2'], {}), "('pos_c2_out', pos_c2)\n", (129055, 129077), True, 'import tensorflow.compat.v1 as tf\n'), ((129090, 129132), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""pos_fc_out"""', 'pos_fc'], {}), "('pos_fc_out', pos_fc)\n", (129110, 129132), True, 'import tensorflow.compat.v1 as tf\n'), ((129145, 129187), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rot_fc_out"""', 'rot_fc'], {}), "('rot_fc_out', rot_fc)\n", (129165, 129187), True, 'import tensorflow.compat.v1 as tf\n'), ((129200, 129244), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rns_fc1_out"""', 'rns_fc1'], {}), "('rns_fc1_out', rns_fc1)\n", (129220, 129244), True, 'import tensorflow.compat.v1 as tf\n'), ((129257, 129301), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rns_fc2_out"""', 'rns_fc2'], {}), "('rns_fc2_out', rns_fc2)\n", (129277, 129301), True, 'import tensorflow.compat.v1 as tf\n'), ((129314, 129350), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'fc1'], {}), "('fc1_out', fc1)\n", (129334, 129350), True, 'import tensorflow.compat.v1 as tf\n'), ((129363, 129399), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'fc2'], {}), "('fc2_out', fc2)\n", (129383, 129399), True, 'import tensorflow.compat.v1 as tf\n'), ((129412, 129446), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""like"""', 'like'], {}), "('like', like)\n", (129432, 129446), True, 'import tensorflow.compat.v1 as tf\n'), ((132247, 132283), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ob', '[self.batch_size, 1]'], {}), '(ob, [self.batch_size, 1])\n', (132257, 132283), True, 'import tensorflow.compat.v1 as tf\n'), ((132344, 132383), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[1, bs // self.batch_size]'], {}), '(ob, [1, bs // self.batch_size])\n', (132351, 132383), True, 'import tensorflow.compat.v1 as tf\n'), ((132401, 132421), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ob', '[-1]'], {}), '(ob, [-1])\n', (132411, 132421), True, 'import tensorflow.compat.v1 as tf\n'), ((132439, 132479), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (132463, 132479), True, 'import tensorflow.compat.v1 as tf\n'), ((132497, 132537), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (132521, 132537), True, 'import tensorflow.compat.v1 as tf\n'), ((132720, 132780), 'tensorflow.compat.v1.concat', 'tf.concat', (['[update[:, :2], rot_pred, update[:, 3:]]'], {'axis': '(-1)'}), '([update[:, :2], rot_pred, update[:, 3:]], axis=-1)\n', (132729, 132780), True, 'import tensorflow.compat.v1 as tf\n'), ((140773, 140792), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['F'], {}), '(F)\n', (140789, 140792), True, 'import tensorflow.compat.v1 as tf\n'), ((1957, 1982), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1972, 1982), False, 'import os\n'), ((2003, 2055), 'os.path.join', 'os.path.join', (['path', '"""resources"""', '"""butter_points.pkl"""'], {}), "(path, 'resources', 'butter_points.pkl')\n", (2015, 2055), False, 'import os\n'), ((16584, 16646), 'tensorflow.compat.v1.concat', 'tf.concat', (['[base_covar[:, :3, :3], init_R[:, :3, :3]]'], {'axis': '(-1)'}), '([base_covar[:, :3, :3], init_R[:, :3, :3]], axis=-1)\n', (16593, 16646), True, 'import tensorflow.compat.v1 as tf\n'), ((16750, 16812), 'tensorflow.compat.v1.concat', 'tf.concat', (['[base_covar[:, 5:, 5:], init_R[:, 3:, 3:]]'], {'axis': '(-1)'}), '([base_covar[:, 5:, 5:], init_R[:, 3:, 3:]], axis=-1)\n', (16759, 16812), True, 'import tensorflow.compat.v1 as tf\n'), ((20799, 20839), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['seq_label[:, :, 9:]', '[-1, 1]'], {}), '(seq_label[:, :, 9:], [-1, 1])\n', (20809, 20839), True, 'import tensorflow.compat.v1 as tf\n'), ((21333, 21373), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['seq_label[:, :, 9:]', '[-1, 1]'], {}), '(seq_label[:, :, 9:], [-1, 1])\n', (21343, 21373), True, 'import tensorflow.compat.v1 as tf\n'), ((24453, 24477), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dists[i]'], {}), '(dists[i])\n', (24467, 24477), True, 'import tensorflow.compat.v1 as tf\n'), ((24615, 24642), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_obs[i]'], {}), '(dist_obs[i])\n', (24629, 24642), True, 'import tensorflow.compat.v1 as tf\n'), ((30171, 30200), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['seg_pred'], {'axis': '(-1)'}), '(seg_pred, axis=-1)\n', (30181, 30200), True, 'import tensorflow.compat.v1 as tf\n'), ((30221, 30245), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['seg'], {'axis': '(-1)'}), '(seg, axis=-1)\n', (30231, 30245), True, 'import tensorflow.compat.v1 as tf\n'), ((30399, 30436), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['initial_seg_pred'], {'axis': '(-1)'}), '(initial_seg_pred, axis=-1)\n', (30409, 30436), True, 'import tensorflow.compat.v1 as tf\n'), ((30457, 30489), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['initial_seg'], {'axis': '(-1)'}), '(initial_seg, axis=-1)\n', (30467, 30489), True, 'import tensorflow.compat.v1 as tf\n'), ((34458, 34485), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dist_obs[i]'], {}), '(dist_obs[i])\n', (34472, 34485), True, 'import tensorflow.compat.v1 as tf\n'), ((34622, 34650), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_diag[i]'], {}), '(corr_diag[i])\n', (34636, 34650), True, 'import tensorflow.compat.v1 as tf\n'), ((34739, 34767), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['corr_full[i]'], {}), '(corr_full[i])\n', (34753, 34767), True, 'import tensorflow.compat.v1 as tf\n'), ((39341, 39378), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(mse * self.scale ** 2)'], {}), '(mse * self.scale ** 2)\n', (39355, 39378), True, 'import tensorflow.compat.v1 as tf\n'), ((39398, 39431), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * self.scale)'], {}), '(dist * self.scale)\n', (39412, 39431), True, 'import tensorflow.compat.v1 as tf\n'), ((39696, 39729), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * self.scale)'], {}), '(dist * self.scale)\n', (39710, 39729), True, 'import tensorflow.compat.v1 as tf\n'), ((41940, 41964), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dists[i]'], {}), '(dists[i])\n', (41954, 41964), True, 'import tensorflow.compat.v1 as tf\n'), ((42060, 42088), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['dists_ana[i]'], {}), '(dists_ana[i])\n', (42074, 42088), True, 'import tensorflow.compat.v1 as tf\n'), ((45849, 45894), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pred', '[self.batch_size, -1, 1, 2]'], {}), '(pred, [self.batch_size, -1, 1, 2])\n', (45859, 45894), True, 'import tensorflow.compat.v1 as tf\n'), ((45925, 45971), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['label', '[self.batch_size, -1, 2, 1]'], {}), '(label, [self.batch_size, -1, 2, 1])\n', (45935, 45971), True, 'import tensorflow.compat.v1 as tf\n'), ((46069, 46111), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['prod', '[self.batch_size, -1, 1]'], {}), '(prod, [self.batch_size, -1, 1])\n', (46079, 46111), True, 'import tensorflow.compat.v1 as tf\n'), ((46156, 46197), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['pred', '[self.batch_size, 1, 2]'], {}), '(pred, [self.batch_size, 1, 2])\n', (46166, 46197), True, 'import tensorflow.compat.v1 as tf\n'), ((46228, 46270), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['label', '[self.batch_size, 2, 1]'], {}), '(label, [self.batch_size, 2, 1])\n', (46238, 46270), True, 'import tensorflow.compat.v1 as tf\n'), ((46368, 46406), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['prod', '[self.batch_size, 1]'], {}), '(prod, [self.batch_size, 1])\n', (46378, 46406), True, 'import tensorflow.compat.v1 as tf\n'), ((49819, 49873), 'tensorflow.compat.v1.sin', 'tf.sin', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (49825, 49873), True, 'import tensorflow.compat.v1 as tf\n'), ((49893, 49947), 'tensorflow.compat.v1.cos', 'tf.cos', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (49899, 49947), True, 'import tensorflow.compat.v1 as tf\n'), ((50094, 50135), 'tensorflow.compat.v1.math.atan2', 'tf.math.atan2', (['mean[:, 2:3]', 'mean[:, 3:4]'], {}), '(mean[:, 2:3], mean[:, 3:4])\n', (50107, 50135), True, 'import tensorflow.compat.v1 as tf\n'), ((50391, 50445), 'tensorflow.compat.v1.sin', 'tf.sin', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (50397, 50445), True, 'import tensorflow.compat.v1 as tf\n'), ((50465, 50519), 'tensorflow.compat.v1.cos', 'tf.cos', (['(points[:, :, 2:3] * self.scale * np.pi / 180.0)'], {}), '(points[:, :, 2:3] * self.scale * np.pi / 180.0)\n', (50471, 50519), True, 'import tensorflow.compat.v1 as tf\n'), ((50668, 50709), 'tensorflow.compat.v1.math.atan2', 'tf.math.atan2', (['mean[:, 2:3]', 'mean[:, 3:4]'], {}), '(mean[:, 2:3], mean[:, 3:4])\n', (50681, 50709), True, 'import tensorflow.compat.v1 as tf\n'), ((53535, 53557), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (53543, 53557), True, 'import tensorflow.compat.v1 as tf\n'), ((53839, 53873), 'tensorflow.compat.v1.greater', 'tf.greater', (['rot_new', '(rot_max / 2.0)'], {}), '(rot_new, rot_max / 2.0)\n', (53849, 53873), True, 'import tensorflow.compat.v1 as tf\n'), ((53962, 53994), 'tensorflow.compat.v1.less', 'tf.less', (['rot_add', '(-rot_max / 2.0)'], {}), '(rot_add, -rot_max / 2.0)\n', (53969, 53994), True, 'import tensorflow.compat.v1 as tf\n'), ((54229, 54252), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ob', '[-1, 1]'], {}), '(ob, [-1, 1])\n', (54239, 54252), True, 'import tensorflow.compat.v1 as tf\n'), ((54346, 54368), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (54354, 54368), True, 'import tensorflow.compat.v1 as tf\n'), ((54370, 54388), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (54383, 54388), True, 'import tensorflow.compat.v1 as tf\n'), ((54461, 54482), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""rect1"""'], {}), "(ob, 'rect1')\n", (54469, 54482), True, 'import tensorflow.compat.v1 as tf\n'), ((54577, 54597), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri1"""'], {}), "(ob, 'tri1')\n", (54585, 54597), True, 'import tensorflow.compat.v1 as tf\n'), ((54693, 54713), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri2"""'], {}), "(ob, 'tri2')\n", (54701, 54713), True, 'import tensorflow.compat.v1 as tf\n'), ((54809, 54829), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri3"""'], {}), "(ob, 'tri3')\n", (54817, 54829), True, 'import tensorflow.compat.v1 as tf\n'), ((54925, 54944), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""hex"""'], {}), "(ob, 'hex')\n", (54933, 54944), True, 'import tensorflow.compat.v1 as tf\n'), ((55040, 55062), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (55048, 55062), True, 'import tensorflow.compat.v1 as tf\n'), ((55064, 55082), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (55077, 55082), True, 'import tensorflow.compat.v1 as tf\n'), ((55335, 55369), 'tensorflow.compat.v1.greater', 'tf.greater', (['rot_new', '(rot_max / 2.0)'], {}), '(rot_new, rot_max / 2.0)\n', (55345, 55369), True, 'import tensorflow.compat.v1 as tf\n'), ((55458, 55490), 'tensorflow.compat.v1.less', 'tf.less', (['rot_add', '(-rot_max / 2.0)'], {}), '(rot_add, -rot_max / 2.0)\n', (55465, 55490), True, 'import tensorflow.compat.v1 as tf\n'), ((57190, 57232), 'tensorflow.compat.v1.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x, y)'], {}), '((x, y))\n', (57224, 57232), True, 'import tensorflow.compat.v1 as tf\n'), ((58526, 58550), 'tensorflow.compat.v1.less', 'tf.less', (['con_norm', '(1e-06)'], {}), '(con_norm, 1e-06)\n', (58533, 58550), True, 'import tensorflow.compat.v1 as tf\n'), ((58782, 58803), 'tensorflow.compat.v1.greater', 'tf.greater', (['mask', '(2.5)'], {}), '(mask, 2.5)\n', (58792, 58803), True, 'import tensorflow.compat.v1 as tf\n'), ((58805, 58823), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['mask'], {}), '(mask)\n', (58817, 58823), True, 'import tensorflow.compat.v1 as tf\n'), ((58857, 58876), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['mask'], {}), '(mask)\n', (58870, 58876), True, 'import tensorflow.compat.v1 as tf\n'), ((63782, 63819), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['friction']", '[1]'], {}), "(features['friction'], [1])\n", (63792, 63819), True, 'import tensorflow.compat.v1 as tf\n'), ((66482, 66519), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['friction']", '[1]'], {}), "(features['friction'], [1])\n", (66492, 66519), True, 'import tensorflow.compat.v1 as tf\n'), ((67011, 67032), 'tensorflow.compat.v1.greater', 'tf.greater', (['mask', '(2.5)'], {}), '(mask, 2.5)\n', (67021, 67032), True, 'import tensorflow.compat.v1 as tf\n'), ((67034, 67052), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['mask'], {}), '(mask)\n', (67046, 67052), True, 'import tensorflow.compat.v1 as tf\n'), ((67086, 67105), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['mask'], {}), '(mask)\n', (67099, 67105), True, 'import tensorflow.compat.v1 as tf\n'), ((67729, 67784), 'numpy.arange', 'np.arange', (['(1)', '(seq_len - self.sl - 2)', '((self.sl + 1) // 2)'], {}), '(1, seq_len - self.sl - 2, (self.sl + 1) // 2)\n', (67738, 67784), True, 'import numpy as np\n'), ((68977, 69020), 'tensorflow.compat.v1.tile', 'tf.tile', (['pose[si:si + 1, 2:3]', '[self.sl, 1]'], {}), '(pose[si:si + 1, 2:3], [self.sl, 1])\n', (68984, 69020), True, 'import tensorflow.compat.v1 as tf\n'), ((74347, 74366), 'numpy.mean', 'np.mean', (['corr_diags'], {}), '(corr_diags)\n', (74354, 74366), True, 'import numpy as np\n'), ((74407, 74426), 'numpy.mean', 'np.mean', (['corr_fulls'], {}), '(corr_fulls)\n', (74414, 74426), True, 'import numpy as np\n'), ((75939, 75968), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 0, 0]'], {}), '(cov_pred[:, 0, 0])\n', (75949, 75968), True, 'import numpy as np\n'), ((75995, 76024), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 1, 1]'], {}), '(cov_pred[:, 1, 1])\n', (76005, 76024), True, 'import numpy as np\n'), ((76051, 76080), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 2, 2]'], {}), '(cov_pred[:, 2, 2])\n', (76061, 76080), True, 'import numpy as np\n'), ((76107, 76136), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 3, 3]'], {}), '(cov_pred[:, 3, 3])\n', (76117, 76136), True, 'import numpy as np\n'), ((76164, 76193), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 4, 4]'], {}), '(cov_pred[:, 4, 4])\n', (76174, 76193), True, 'import numpy as np\n'), ((76221, 76250), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 5, 5]'], {}), '(cov_pred[:, 5, 5])\n', (76231, 76250), True, 'import numpy as np\n'), ((76278, 76307), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 6, 6]'], {}), '(cov_pred[:, 6, 6])\n', (76288, 76307), True, 'import numpy as np\n'), ((76335, 76364), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 7, 7]'], {}), '(cov_pred[:, 7, 7])\n', (76345, 76364), True, 'import numpy as np\n'), ((76392, 76421), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 8, 8]'], {}), '(cov_pred[:, 8, 8])\n', (76402, 76421), True, 'import numpy as np\n'), ((76448, 76477), 'numpy.squeeze', 'np.squeeze', (['cov_pred[:, 9, 9]'], {}), '(cov_pred[:, 9, 9])\n', (76458, 76477), True, 'import numpy as np\n'), ((76642, 76669), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 0, 0]'], {}), '(q_pred[:, 0, 0])\n', (76652, 76669), True, 'import numpy as np\n'), ((76696, 76723), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 1, 1]'], {}), '(q_pred[:, 1, 1])\n', (76706, 76723), True, 'import numpy as np\n'), ((76750, 76777), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 2, 2]'], {}), '(q_pred[:, 2, 2])\n', (76760, 76777), True, 'import numpy as np\n'), ((76804, 76831), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 3, 3]'], {}), '(q_pred[:, 3, 3])\n', (76814, 76831), True, 'import numpy as np\n'), ((76859, 76886), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 4, 4]'], {}), '(q_pred[:, 4, 4])\n', (76869, 76886), True, 'import numpy as np\n'), ((76914, 76941), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 5, 5]'], {}), '(q_pred[:, 5, 5])\n', (76924, 76941), True, 'import numpy as np\n'), ((76969, 76996), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 6, 6]'], {}), '(q_pred[:, 6, 6])\n', (76979, 76996), True, 'import numpy as np\n'), ((77024, 77051), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 7, 7]'], {}), '(q_pred[:, 7, 7])\n', (77034, 77051), True, 'import numpy as np\n'), ((77079, 77106), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 8, 8]'], {}), '(q_pred[:, 8, 8])\n', (77089, 77106), True, 'import numpy as np\n'), ((77133, 77160), 'numpy.squeeze', 'np.squeeze', (['q_pred[:, 9, 9]'], {}), '(q_pred[:, 9, 9])\n', (77143, 77160), True, 'import numpy as np\n'), ((77187, 77214), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 0, 0]'], {}), '(r_pred[:, 0, 0])\n', (77197, 77214), True, 'import numpy as np\n'), ((77241, 77268), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 1, 1]'], {}), '(r_pred[:, 1, 1])\n', (77251, 77268), True, 'import numpy as np\n'), ((77295, 77322), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 2, 2]'], {}), '(r_pred[:, 2, 2])\n', (77305, 77322), True, 'import numpy as np\n'), ((77350, 77377), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 3, 3]'], {}), '(r_pred[:, 3, 3])\n', (77360, 77377), True, 'import numpy as np\n'), ((77405, 77432), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 4, 4]'], {}), '(r_pred[:, 4, 4])\n', (77415, 77432), True, 'import numpy as np\n'), ((77460, 77487), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 5, 5]'], {}), '(r_pred[:, 5, 5])\n', (77470, 77487), True, 'import numpy as np\n'), ((77515, 77542), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 6, 6]'], {}), '(r_pred[:, 6, 6])\n', (77525, 77542), True, 'import numpy as np\n'), ((77569, 77596), 'numpy.squeeze', 'np.squeeze', (['r_pred[:, 7, 7]'], {}), '(r_pred[:, 7, 7])\n', (77579, 77596), True, 'import numpy as np\n'), ((81605, 81615), 'numpy.max', 'np.max', (['qx'], {}), '(qx)\n', (81611, 81615), True, 'import numpy as np\n'), ((81617, 81627), 'numpy.max', 'np.max', (['qy'], {}), '(qy)\n', (81623, 81627), True, 'import numpy as np\n'), ((81629, 81639), 'numpy.max', 'np.max', (['rx'], {}), '(rx)\n', (81635, 81639), True, 'import numpy as np\n'), ((81641, 81651), 'numpy.max', 'np.max', (['ry'], {}), '(ry)\n', (81647, 81651), True, 'import numpy as np\n'), ((82083, 82093), 'numpy.max', 'np.max', (['qt'], {}), '(qt)\n', (82089, 82093), True, 'import numpy as np\n'), ((82095, 82105), 'numpy.max', 'np.max', (['rt'], {}), '(rt)\n', (82101, 82105), True, 'import numpy as np\n'), ((82437, 82448), 'numpy.max', 'np.max', (['qrx'], {}), '(qrx)\n', (82443, 82448), True, 'import numpy as np\n'), ((82450, 82461), 'numpy.max', 'np.max', (['qry'], {}), '(qry)\n', (82456, 82461), True, 'import numpy as np\n'), ((82463, 82474), 'numpy.max', 'np.max', (['rrx'], {}), '(rrx)\n', (82469, 82474), True, 'import numpy as np\n'), ((82476, 82487), 'numpy.max', 'np.max', (['rry'], {}), '(rry)\n', (82482, 82487), True, 'import numpy as np\n'), ((82928, 82939), 'numpy.max', 'np.max', (['qnx'], {}), '(qnx)\n', (82934, 82939), True, 'import numpy as np\n'), ((82941, 82952), 'numpy.max', 'np.max', (['qny'], {}), '(qny)\n', (82947, 82952), True, 'import numpy as np\n'), ((82954, 82965), 'numpy.max', 'np.max', (['rnx'], {}), '(rnx)\n', (82960, 82965), True, 'import numpy as np\n'), ((82967, 82978), 'numpy.max', 'np.max', (['rny'], {}), '(rny)\n', (82973, 82978), True, 'import numpy as np\n'), ((83412, 83423), 'numpy.max', 'np.max', (['qmu'], {}), '(qmu)\n', (83418, 83423), True, 'import numpy as np\n'), ((83425, 83435), 'numpy.max', 'np.max', (['ql'], {}), '(ql)\n', (83431, 83435), True, 'import numpy as np\n'), ((83711, 83721), 'numpy.max', 'np.max', (['qs'], {}), '(qs)\n', (83717, 83721), True, 'import numpy as np\n'), ((83723, 83733), 'numpy.max', 'np.max', (['rs'], {}), '(rs)\n', (83729, 83733), True, 'import numpy as np\n'), ((90288, 90363), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos_pred[i]', 'width': 'width', 'height': 'height', 'angle': 'theta', 'alpha': '(0.1)'}), '(xy=pos_pred[i], width=width, height=height, angle=theta, alpha=0.1)\n', (90295, 90363), False, 'from matplotlib.patches import Ellipse\n'), ((95091, 95120), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (95101, 95120), True, 'import numpy as np\n'), ((95158, 95171), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (95165, 95171), True, 'import numpy as np\n'), ((96493, 96518), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (96508, 96518), False, 'import os\n'), ((96650, 96663), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (96657, 96663), True, 'import numpy as np\n'), ((98568, 98645), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm1"""', 'trainable': 'trainable'}), "(name='segment/norm1', trainable=trainable)\n", (98602, 98645), True, 'import tensorflow.compat.v1 as tf\n'), ((98739, 98816), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm2"""', 'trainable': 'trainable'}), "(name='segment/norm2', trainable=trainable)\n", (98773, 98816), True, 'import tensorflow.compat.v1 as tf\n'), ((98910, 98987), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm3"""', 'trainable': 'trainable'}), "(name='segment/norm3', trainable=trainable)\n", (98944, 98987), True, 'import tensorflow.compat.v1 as tf\n'), ((99082, 99159), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm4"""', 'trainable': 'trainable'}), "(name='segment/norm4', trainable=trainable)\n", (99116, 99159), True, 'import tensorflow.compat.v1 as tf\n'), ((99254, 99331), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""segment/norm5"""', 'trainable': 'trainable'}), "(name='segment/norm5', trainable=trainable)\n", (99288, 99331), True, 'import tensorflow.compat.v1 as tf\n'), ((99937, 99961), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""segment"""'], {}), "('segment')\n", (99950, 99961), True, 'import tensorflow.compat.v1 as tf\n'), ((100030, 100075), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['conv1', '(3)', '(2)'], {'padding': '"""SAME"""'}), "(conv1, 3, 2, padding='SAME')\n", (100046, 100075), True, 'import tensorflow.compat.v1 as tf\n'), ((100338, 100383), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['conv2', '(3)', '(2)'], {'padding': '"""SAME"""'}), "(conv2, 3, 2, padding='SAME')\n", (100354, 100383), True, 'import tensorflow.compat.v1 as tf\n'), ((100646, 100691), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['conv3', '(5)', '(4)'], {'padding': '"""SAME"""'}), "(conv3, 5, 4, padding='SAME')\n", (100662, 100691), True, 'import tensorflow.compat.v1 as tf\n'), ((101325, 101376), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['deconv2', '[height // 2, width // 2]'], {}), '(deconv2, [height // 2, width // 2])\n', (101340, 101376), True, 'import tensorflow.compat.v1 as tf\n'), ((101651, 101693), 'tensorflow.compat.v1.image.resize', 'tf.image.resize', (['mask_out', '[height, width]'], {}), '(mask_out, [height, width])\n', (101666, 101693), True, 'import tensorflow.compat.v1 as tf\n'), ((104041, 104066), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (104056, 104066), False, 'import os\n'), ((104198, 104211), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (104205, 104211), True, 'import numpy as np\n'), ((106773, 106850), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""glimpse/norm1"""', 'trainable': 'trainable'}), "(name='glimpse/norm1', trainable=trainable)\n", (106807, 106850), True, 'import tensorflow.compat.v1 as tf\n'), ((106944, 107021), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""glimpse/norm2"""', 'trainable': 'trainable'}), "(name='glimpse/norm2', trainable=trainable)\n", (106978, 107021), True, 'import tensorflow.compat.v1 as tf\n'), ((107115, 107192), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""glimpse/norm3"""', 'trainable': 'trainable'}), "(name='glimpse/norm3', trainable=trainable)\n", (107149, 107192), True, 'import tensorflow.compat.v1 as tf\n'), ((107286, 107359), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""rot/norm1"""', 'trainable': 'trainable'}), "(name='rot/norm1', trainable=trainable)\n", (107320, 107359), True, 'import tensorflow.compat.v1 as tf\n'), ((107453, 107526), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'name': '"""rot/norm2"""', 'trainable': 'trainable'}), "(name='rot/norm2', trainable=trainable)\n", (107487, 107526), True, 'import tensorflow.compat.v1 as tf\n'), ((108166, 108186), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""rot"""'], {}), "('rot')\n", (108179, 108186), True, 'import tensorflow.compat.v1 as tf\n'), ((108604, 108654), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['rot_conv1', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(rot_conv1, 3, 2, padding='VALID')\n", (108620, 108654), True, 'import tensorflow.compat.v1 as tf\n'), ((108943, 108993), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['rot_conv2', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(rot_conv2, 3, 2, padding='VALID')\n", (108959, 108993), True, 'import tensorflow.compat.v1 as tf\n'), ((109997, 110021), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""glimpse"""'], {}), "('glimpse')\n", (110010, 110021), True, 'import tensorflow.compat.v1 as tf\n'), ((110182, 110223), 'tensorflow.compat.v1.concat', 'tf.concat', (['[tip_pix_y, tip_pix_x]'], {'axis': '(1)'}), '([tip_pix_y, tip_pix_x], axis=1)\n', (110191, 110223), True, 'import tensorflow.compat.v1 as tf\n'), ((110272, 110372), 'tensorflow.compat.v1.image.extract_glimpse', 'tf.image.extract_glimpse', (['coord'], {'size': '[64, 64]', 'offsets': 'coords', 'centered': '(True)', 'normalized': '(False)'}), '(coord, size=[64, 64], offsets=coords, centered=\n True, normalized=False)\n', (110296, 110372), True, 'import tensorflow.compat.v1 as tf\n'), ((110509, 110609), 'tensorflow.compat.v1.image.extract_glimpse', 'tf.image.extract_glimpse', (['image'], {'size': '[64, 64]', 'offsets': 'coords', 'centered': '(True)', 'normalized': '(False)'}), '(image, size=[64, 64], offsets=coords, centered=\n True, normalized=False)\n', (110533, 110609), True, 'import tensorflow.compat.v1 as tf\n'), ((110842, 110883), 'tensorflow.compat.v1.concat', 'tf.concat', (['[im_glimpse, glimpse]'], {'axis': '(-1)'}), '([im_glimpse, glimpse], axis=-1)\n', (110851, 110883), True, 'import tensorflow.compat.v1 as tf\n'), ((110953, 111001), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['g_conv1', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(g_conv1, 3, 2, padding='VALID')\n", (110969, 111001), True, 'import tensorflow.compat.v1 as tf\n'), ((111275, 111323), 'tensorflow.compat.v1.nn.max_pool2d', 'tf.nn.max_pool2d', (['g_conv2', '(3)', '(2)'], {'padding': '"""VALID"""'}), "(g_conv2, 3, 2, padding='VALID')\n", (111291, 111323), True, 'import tensorflow.compat.v1 as tf\n'), ((111885, 111927), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['g_conv3', '[self.batch_size, -1]'], {}), '(g_conv3, [self.batch_size, -1])\n', (111895, 111927), True, 'import tensorflow.compat.v1 as tf\n'), ((111986, 112037), 'tensorflow.compat.v1.concat', 'tf.concat', (['[tip_pix_end - tip_pix, tip_pix]'], {'axis': '(1)'}), '([tip_pix_end - tip_pix, tip_pix], axis=1)\n', (111995, 112037), True, 'import tensorflow.compat.v1 as tf\n'), ((112073, 112118), 'tensorflow.compat.v1.concat', 'tf.concat', (['[glimpse_encoding, pix_u]'], {'axis': '(-1)'}), '([glimpse_encoding, pix_u], axis=-1)\n', (112082, 112118), True, 'import tensorflow.compat.v1 as tf\n'), ((112644, 112695), 'differentiable_filters.utils.push_utils._to_3d_d', 'utils._to_3d_d', (['r_pix', 'coord[:, :, :, -1:]', 'tip_pos'], {}), '(r_pix, coord[:, :, :, -1:], tip_pos)\n', (112658, 112695), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((113073, 113128), 'differentiable_filters.utils.push_utils._to_3d_d', 'utils._to_3d_d', (['n_end_pix', 'coord[:, :, :, -1:]', 'tip_pos'], {}), '(n_end_pix, coord[:, :, :, -1:], tip_pos)\n', (113087, 113128), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((113329, 113345), 'tensorflow.compat.v1.nn.sigmoid', 'tf.nn.sigmoid', (['s'], {}), '(s)\n', (113342, 113345), True, 'import tensorflow.compat.v1 as tf\n'), ((116105, 116124), 'numpy.ones', 'np.ones', (['self.dim_z'], {}), '(self.dim_z)\n', (116112, 116124), True, 'import numpy as np\n'), ((116478, 116513), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init_const'], {}), '(init_const)\n', (116501, 116513), True, 'import tensorflow.compat.v1 as tf\n'), ((123028, 123099), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""het_diag_pos_c1_im"""', 'het_diag_pos_c1[0:1, :, :, 0:1]'], {}), "('het_diag_pos_c1_im', het_diag_pos_c1[0:1, :, :, 0:1])\n", (123044, 123099), True, 'import tensorflow.compat.v1 as tf\n'), ((123149, 123209), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_c1_out"""', 'het_diag_pos_c1'], {}), "('het_diag_pos_c1_out', het_diag_pos_c1)\n", (123169, 123209), True, 'import tensorflow.compat.v1 as tf\n'), ((123226, 123286), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_c2_out"""', 'het_diag_pos_c2'], {}), "('het_diag_pos_c2_out', het_diag_pos_c2)\n", (123246, 123286), True, 'import tensorflow.compat.v1 as tf\n'), ((123303, 123365), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_fc1_out"""', 'het_diag_pos_fc1'], {}), "('het_diag_pos_fc1_out', het_diag_pos_fc1)\n", (123323, 123365), True, 'import tensorflow.compat.v1 as tf\n'), ((123382, 123440), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_pos_fc2_out"""', 'het_diag_pos'], {}), "('het_diag_pos_fc2_out', het_diag_pos)\n", (123402, 123440), True, 'import tensorflow.compat.v1 as tf\n'), ((123457, 123514), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rot_fc_out"""', 'het_diag_rot'], {}), "('het_diag_rot_fc_out', het_diag_rot)\n", (123477, 123514), True, 'import tensorflow.compat.v1 as tf\n'), ((123531, 123589), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rns_fc1_out"""', 'het_diag_fc1'], {}), "('het_diag_rns_fc1_out', het_diag_fc1)\n", (123551, 123589), True, 'import tensorflow.compat.v1 as tf\n'), ((123606, 123664), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rns_fc2_out"""', 'het_diag_fc2'], {}), "('het_diag_rns_fc2_out', het_diag_fc2)\n", (123626, 123664), True, 'import tensorflow.compat.v1 as tf\n'), ((123681, 123739), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_rns_fc3_out"""', 'het_diag_rns'], {}), "('het_diag_rns_fc3_out', het_diag_rns)\n", (123701, 123739), True, 'import tensorflow.compat.v1 as tf\n'), ((123756, 123798), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_out"""', 'diag'], {}), "('het_diag_out', diag)\n", (123776, 123798), True, 'import tensorflow.compat.v1 as tf\n'), ((124081, 124108), 'tensorflow.compat.v1.linalg.tensor_diag', 'tf.linalg.tensor_diag', (['diag'], {}), '(diag)\n', (124102, 124108), True, 'import tensorflow.compat.v1 as tf\n'), ((124125, 124172), 'tensorflow.compat.v1.tile', 'tf.tile', (['R[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(R[None, :, :], [self.batch_size, 1, 1])\n', (124132, 124172), True, 'import tensorflow.compat.v1 as tf\n'), ((131917, 131965), 'tensorflow.compat.v1.concat', 'tf.concat', (['[last_state, actions[:, :2]]'], {'axis': '(-1)'}), '([last_state, actions[:, :2]], axis=-1)\n', (131926, 131965), True, 'import tensorflow.compat.v1 as tf\n'), ((132608, 132630), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (132616, 132630), True, 'import tensorflow.compat.v1 as tf\n'), ((132664, 132687), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot_pred'], {}), '(rot_pred)\n', (132677, 132687), True, 'import tensorflow.compat.v1 as tf\n'), ((132902, 132938), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'fc1'], {}), "('fc1_out', fc1)\n", (132922, 132938), True, 'import tensorflow.compat.v1 as tf\n'), ((132955, 132991), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'fc2'], {}), "('fc2_out', fc2)\n", (132975, 132991), True, 'import tensorflow.compat.v1 as tf\n'), ((133008, 133044), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc3_out"""', 'fc3'], {}), "('fc3_out', fc3)\n", (133028, 133044), True, 'import tensorflow.compat.v1 as tf\n'), ((133061, 133103), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""update_out"""', 'update'], {}), "('update_out', update)\n", (133081, 133103), True, 'import tensorflow.compat.v1 as tf\n'), ((135830, 135896), 'differentiable_filters.utils.push_utils.physical_model_derivative', 'utils.physical_model_derivative', (['pos', 'cp', 'n', 'actions', 'fr', 'fr_mu', 's'], {}), '(pos, cp, n, actions, fr, fr_mu, s)\n', (135861, 135896), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((136143, 136157), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['ob'], {}), '(ob)\n', (136153, 136157), True, 'import tensorflow.compat.v1 as tf\n'), ((136179, 136219), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (136203, 136219), True, 'import tensorflow.compat.v1 as tf\n'), ((136241, 136281), 'tensorflow.compat.v1.strings.regex_replace', 'tf.strings.regex_replace', (['ob', "'\\x00'", '""""""'], {}), "(ob, '\\x00', '')\n", (136265, 136281), True, 'import tensorflow.compat.v1 as tf\n'), ((136758, 136791), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (136765, 136791), True, 'import tensorflow.compat.v1 as tf\n'), ((140015, 140070), 'differentiable_filters.utils.push_utils.physical_model', 'utils.physical_model', (['pos', 'cp', 'n', 'actions', 'fr', 'fr_mu', 's'], {}), '(pos, cp, n, actions, fr, fr_mu, s)\n', (140035, 140070), True, 'from differentiable_filters.utils import push_utils as utils\n'), ((140305, 140338), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (140312, 140338), True, 'import tensorflow.compat.v1 as tf\n'), ((141345, 141364), 'numpy.ones', 'np.ones', (['self.dim_x'], {}), '(self.dim_x)\n', (141352, 141364), True, 'import numpy as np\n'), ((141411, 141433), 'numpy.square', 'np.square', (['self.q_diag'], {}), '(self.q_diag)\n', (141420, 141433), True, 'import numpy as np\n'), ((141702, 141737), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init_const'], {}), '(init_const)\n', (141725, 141737), True, 'import tensorflow.compat.v1 as tf\n'), ((149100, 149145), 'tensorflow.compat.v1.square', 'tf.square', (['(diag + self.het_diag_lrn_init_bias)'], {}), '(diag + self.het_diag_lrn_init_bias)\n', (149109, 149145), True, 'import tensorflow.compat.v1 as tf\n'), ((149206, 149226), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['diag'], {}), '(diag)\n', (149220, 149226), True, 'import tensorflow.compat.v1 as tf\n'), ((151007, 151052), 'tensorflow.compat.v1.square', 'tf.square', (['(diag + self.het_diag_ana_init_bias)'], {}), '(diag + self.het_diag_ana_init_bias)\n', (151016, 151052), True, 'import tensorflow.compat.v1 as tf\n'), ((151113, 151133), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['diag'], {}), '(diag)\n', (151127, 151133), True, 'import tensorflow.compat.v1 as tf\n'), ((2600, 2613), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (2608, 2613), True, 'import numpy as np\n'), ((2673, 2692), 'numpy.ones', 'np.ones', (['self.dim_x'], {}), '(self.dim_x)\n', (2680, 2692), True, 'import numpy as np\n'), ((2959, 2972), 'numpy.array', 'np.array', (['cov'], {}), '(cov)\n', (2967, 2972), True, 'import numpy as np\n'), ((3032, 3051), 'numpy.ones', 'np.ones', (['self.dim_z'], {}), '(self.dim_z)\n', (3039, 3051), True, 'import numpy as np\n'), ((18657, 18681), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['likelihood'], {}), '(likelihood)\n', (18669, 18681), True, 'import tensorflow.compat.v1 as tf\n'), ((18682, 18705), 'tensorflow.compat.v1.math.log', 'tf.math.log', (['self.scale'], {}), '(self.scale)\n', (18693, 18705), True, 'import tensorflow.compat.v1 as tf\n'), ((20436, 20460), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (20446, 20460), True, 'import tensorflow.compat.v1 as tf\n'), ((28598, 28622), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (28608, 28622), True, 'import tensorflow.compat.v1 as tf\n'), ((28825, 28849), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (28835, 28849), True, 'import tensorflow.compat.v1 as tf\n'), ((29589, 29629), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * scale_dist * cont)'], {}), '(dist * scale_dist * cont)\n', (29603, 29629), True, 'import tensorflow.compat.v1 as tf\n'), ((29652, 29689), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['(mse * scale_mse * cont)'], {}), '(mse * scale_mse * cont)\n', (29665, 29689), True, 'import tensorflow.compat.v1 as tf\n'), ((29734, 29767), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['(dist * scale_dist)'], {}), '(dist * scale_dist)\n', (29748, 29767), True, 'import tensorflow.compat.v1 as tf\n'), ((29792, 29822), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['(mse * scale_mse)'], {}), '(mse * scale_mse)\n', (29805, 29822), True, 'import tensorflow.compat.v1 as tf\n'), ((31501, 31529), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['like_good', '(1e-06)'], {}), '(like_good, 1e-06)\n', (31511, 31529), True, 'import tensorflow.compat.v1 as tf\n'), ((31592, 31625), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(1.0 - like_bad)', '(1e-06)'], {}), '(1.0 - like_bad, 1e-06)\n', (31602, 31625), True, 'import tensorflow.compat.v1 as tf\n'), ((33509, 33535), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (33523, 33535), True, 'import tensorflow.compat.v1 as tf\n'), ((36153, 36179), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (36167, 36179), True, 'import tensorflow.compat.v1 as tf\n'), ((36204, 36222), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['ce'], {}), '(ce)\n', (36218, 36222), True, 'import tensorflow.compat.v1 as tf\n'), ((36224, 36248), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['vis', '[-1, 1]'], {}), '(vis, [-1, 1])\n', (36234, 36248), True, 'import tensorflow.compat.v1 as tf\n'), ((46867, 46878), 'tensorflow.compat.v1.abs', 'tf.abs', (['ang'], {}), '(ang)\n', (46873, 46878), True, 'import tensorflow.compat.v1 as tf\n'), ((52975, 52997), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (52983, 52997), True, 'import tensorflow.compat.v1 as tf\n'), ((53052, 53073), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""rect1"""'], {}), "(ob, 'rect1')\n", (53060, 53073), True, 'import tensorflow.compat.v1 as tf\n'), ((53131, 53151), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri1"""'], {}), "(ob, 'tri1')\n", (53139, 53151), True, 'import tensorflow.compat.v1 as tf\n'), ((53210, 53230), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri2"""'], {}), "(ob, 'tri2')\n", (53218, 53230), True, 'import tensorflow.compat.v1 as tf\n'), ((53289, 53309), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""tri3"""'], {}), "(ob, 'tri3')\n", (53297, 53309), True, 'import tensorflow.compat.v1 as tf\n'), ((53368, 53387), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""hex"""'], {}), "(ob, 'hex')\n", (53376, 53387), True, 'import tensorflow.compat.v1 as tf\n'), ((53567, 53585), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (53580, 53585), True, 'import tensorflow.compat.v1 as tf\n'), ((54285, 54307), 'tensorflow.compat.v1.tile', 'tf.tile', (['ob', '[1, mult]'], {}), '(ob, [1, mult])\n', (54292, 54307), True, 'import tensorflow.compat.v1 as tf\n'), ((54484, 54501), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54496, 54501), True, 'import tensorflow.compat.v1 as tf\n'), ((54599, 54616), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54611, 54616), True, 'import tensorflow.compat.v1 as tf\n'), ((54715, 54732), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54727, 54732), True, 'import tensorflow.compat.v1 as tf\n'), ((54831, 54848), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54843, 54848), True, 'import tensorflow.compat.v1 as tf\n'), ((54946, 54963), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (54958, 54963), True, 'import tensorflow.compat.v1 as tf\n'), ((55149, 55161), 'tensorflow.compat.v1.sign', 'tf.sign', (['rot'], {}), '(rot)\n', (55156, 55161), True, 'import tensorflow.compat.v1 as tf\n'), ((62153, 62185), 'tensorflow.compat.v1.tile', 'tf.tile', (['label[None, :]', '[24, 1]'], {}), '(label[None, :], [24, 1])\n', (62160, 62185), True, 'import tensorflow.compat.v1 as tf\n'), ((62223, 62255), 'tensorflow.compat.v1.tile', 'tf.tile', (['label[None, :]', '[24, 1]'], {}), '(label[None, :], [24, 1])\n', (62230, 62255), True, 'import tensorflow.compat.v1 as tf\n'), ((63659, 63683), 'tensorflow.compat.v1.less', 'tf.less', (['con_norm', '(1e-06)'], {}), '(con_norm, 1e-06)\n', (63666, 63683), True, 'import tensorflow.compat.v1 as tf\n'), ((64877, 64890), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (64885, 64890), True, 'import tensorflow.compat.v1 as tf\n'), ((66359, 66383), 'tensorflow.compat.v1.less', 'tf.less', (['con_norm', '(1e-06)'], {}), '(con_norm, 1e-06)\n', (66366, 66383), True, 'import tensorflow.compat.v1 as tf\n'), ((67950, 67989), 'numpy.arange', 'np.arange', (['(1)', '(seq_len - self.sl - 2)', '(20)'], {}), '(1, seq_len - self.sl - 2, 20)\n', (67959, 67989), True, 'import numpy as np\n'), ((68451, 68464), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[1]'], {}), '([1])\n', (68459, 68464), True, 'import tensorflow.compat.v1 as tf\n'), ((69215, 69255), 'tensorflow.compat.v1.tile', 'tf.tile', (['friction[None, :]', '[self.sl, 1]'], {}), '(friction[None, :], [self.sl, 1])\n', (69222, 69255), True, 'import tensorflow.compat.v1 as tf\n'), ((69288, 69322), 'tensorflow.compat.v1.tile', 'tf.tile', (['mu[None, :]', '[self.sl, 1]'], {}), '(mu[None, :], [self.sl, 1])\n', (69295, 69322), True, 'import tensorflow.compat.v1 as tf\n'), ((69596, 69623), 'tensorflow.compat.v1.norm', 'tf.norm', (['mv[:, :2]'], {'axis': '(-1)'}), '(mv[:, :2], axis=-1)\n', (69603, 69623), True, 'import tensorflow.compat.v1 as tf\n'), ((69732, 69743), 'tensorflow.compat.v1.abs', 'tf.abs', (['mvr'], {}), '(mvr)\n', (69738, 69743), True, 'import tensorflow.compat.v1 as tf\n'), ((71983, 71993), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (71990, 71993), True, 'import numpy as np\n'), ((72032, 72041), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (72038, 72041), True, 'import numpy as np\n'), ((72513, 72547), 'numpy.corrcoef', 'np.corrcoef', (['r_pred[i:i + 1]', 'cont'], {}), '(r_pred[i:i + 1], cont)\n', (72524, 72547), True, 'import numpy as np\n'), ((72574, 72607), 'numpy.corrcoef', 'np.corrcoef', (['r_pred[i:i + 1]', 'vis'], {}), '(r_pred[i:i + 1], vis)\n', (72585, 72607), True, 'import numpy as np\n'), ((73041, 73075), 'numpy.corrcoef', 'np.corrcoef', (['q_pred[i:i + 1]', 'cont'], {}), '(q_pred[i:i + 1], cont)\n', (73052, 73075), True, 'import numpy as np\n'), ((73597, 73607), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (73604, 73607), True, 'import numpy as np\n'), ((73646, 73655), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (73652, 73655), True, 'import numpy as np\n'), ((90183, 90212), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (90193, 90212), True, 'import numpy as np\n'), ((90250, 90263), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (90257, 90263), True, 'import numpy as np\n'), ((93669, 93685), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (93677, 93685), True, 'import numpy as np\n'), ((93773, 93787), 'numpy.cos', 'np.cos', (['r_pred'], {}), '(r_pred)\n', (93779, 93787), True, 'import numpy as np\n'), ((93868, 93882), 'numpy.sin', 'np.sin', (['r_pred'], {}), '(r_pred)\n', (93874, 93882), True, 'import numpy as np\n'), ((93915, 93929), 'numpy.cos', 'np.cos', (['r_pred'], {}), '(r_pred)\n', (93921, 93929), True, 'import numpy as np\n'), ((93956, 93972), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (93964, 93972), True, 'import numpy as np\n'), ((94053, 94065), 'numpy.cos', 'np.cos', (['r_la'], {}), '(r_la)\n', (94059, 94065), True, 'import numpy as np\n'), ((94144, 94156), 'numpy.sin', 'np.sin', (['r_la'], {}), '(r_la)\n', (94150, 94156), True, 'import numpy as np\n'), ((94189, 94201), 'numpy.cos', 'np.cos', (['r_la'], {}), '(r_la)\n', (94195, 94201), True, 'import numpy as np\n'), ((102005, 102049), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""rgb"""', 'images[:, :, :, :3]'], {}), "('rgb', images[:, :, :, :3])\n", (102021, 102049), True, 'import tensorflow.compat.v1 as tf\n'), ((102070, 102117), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""depth"""', 'coords[:, :, :, -1:]'], {}), "('depth', coords[:, :, :, -1:])\n", (102086, 102117), True, 'import tensorflow.compat.v1 as tf\n'), ((102138, 102189), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv1_im"""', 'conv1[0:1, :, :, 0:1]'], {}), "('conv1_im', conv1[0:1, :, :, 0:1])\n", (102154, 102189), True, 'import tensorflow.compat.v1 as tf\n'), ((102210, 102250), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv1_out"""', 'conv1'], {}), "('conv1_out', conv1)\n", (102230, 102250), True, 'import tensorflow.compat.v1 as tf\n'), ((102271, 102322), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv2_im"""', 'conv2[0:1, :, :, 0:1]'], {}), "('conv2_im', conv2[0:1, :, :, 0:1])\n", (102287, 102322), True, 'import tensorflow.compat.v1 as tf\n'), ((102343, 102383), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv2_out"""', 'conv2'], {}), "('conv2_out', conv2)\n", (102363, 102383), True, 'import tensorflow.compat.v1 as tf\n'), ((102404, 102455), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv3_im"""', 'conv3[0:1, :, :, 0:1]'], {}), "('conv3_im', conv3[0:1, :, :, 0:1])\n", (102420, 102455), True, 'import tensorflow.compat.v1 as tf\n'), ((102476, 102516), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv3_out"""', 'conv3'], {}), "('conv3_out', conv3)\n", (102496, 102516), True, 'import tensorflow.compat.v1 as tf\n'), ((102537, 102592), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""deconv1_im"""', 'deconv1[0:1, :, :, 0:1]'], {}), "('deconv1_im', deconv1[0:1, :, :, 0:1])\n", (102553, 102592), True, 'import tensorflow.compat.v1 as tf\n'), ((102613, 102657), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""deconv1_out"""', 'deconv1'], {}), "('deconv1_out', deconv1)\n", (102633, 102657), True, 'import tensorflow.compat.v1 as tf\n'), ((102678, 102733), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""deconv2_im"""', 'deconv2[0:1, :, :, 0:1]'], {}), "('deconv2_im', deconv2[0:1, :, :, 0:1])\n", (102694, 102733), True, 'import tensorflow.compat.v1 as tf\n'), ((102754, 102798), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""deconv2_out"""', 'deconv2'], {}), "('deconv2_out', deconv2)\n", (102774, 102798), True, 'import tensorflow.compat.v1 as tf\n'), ((102819, 102858), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""mask"""', 'mask_out[0:1]'], {}), "('mask', mask_out[0:1])\n", (102835, 102858), True, 'import tensorflow.compat.v1 as tf\n'), ((109031, 109075), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['rot_conv2', '[self.batch_size, -1]'], {}), '(rot_conv2, [self.batch_size, -1])\n', (109041, 109075), True, 'import tensorflow.compat.v1 as tf\n'), ((109266, 109325), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_rot"""', 'glimpse_rot[0:1, :, :, :3]'], {}), "('glimpse_rot', glimpse_rot[0:1, :, :, :3])\n", (109282, 109325), True, 'import tensorflow.compat.v1 as tf\n'), ((109383, 109446), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_start"""', 'start_glimpse[0:1, :, :, :3]'], {}), "('glimpse_start', start_glimpse[0:1, :, :, :3])\n", (109399, 109446), True, 'import tensorflow.compat.v1 as tf\n'), ((109504, 109559), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv1_im"""', 'rot_conv1[0:1, :, :, 0:1]'], {}), "('conv1_im', rot_conv1[0:1, :, :, 0:1])\n", (109520, 109559), True, 'import tensorflow.compat.v1 as tf\n'), ((109580, 109624), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv1_out"""', 'rot_conv1'], {}), "('conv1_out', rot_conv1)\n", (109600, 109624), True, 'import tensorflow.compat.v1 as tf\n'), ((109645, 109700), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv2_im"""', 'rot_conv2[0:1, :, :, 0:1]'], {}), "('conv2_im', rot_conv2[0:1, :, :, 0:1])\n", (109661, 109700), True, 'import tensorflow.compat.v1 as tf\n'), ((109721, 109765), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv2_out"""', 'rot_conv2'], {}), "('conv2_out', rot_conv2)\n", (109741, 109765), True, 'import tensorflow.compat.v1 as tf\n'), ((109786, 109826), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc1_out"""', 'rot_fc1'], {}), "('fc1_out', rot_fc1)\n", (109806, 109826), True, 'import tensorflow.compat.v1 as tf\n'), ((109847, 109887), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""fc2_out"""', 'rot_fc2'], {}), "('fc2_out', rot_fc2)\n", (109867, 109887), True, 'import tensorflow.compat.v1 as tf\n'), ((109908, 109944), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""rot_out"""', 'rot'], {}), "('rot_out', rot)\n", (109928, 109944), True, 'import tensorflow.compat.v1 as tf\n'), ((110051, 110085), 'tensorflow.compat.v1.slice', 'tf.slice', (['tip_pix', '[0, 0]', '[-1, 1]'], {}), '(tip_pix, [0, 0], [-1, 1])\n', (110059, 110085), True, 'import tensorflow.compat.v1 as tf\n'), ((110118, 110152), 'tensorflow.compat.v1.slice', 'tf.slice', (['tip_pix', '[0, 1]', '[-1, 1]'], {}), '(tip_pix, [0, 1], [-1, 1])\n', (110126, 110152), True, 'import tensorflow.compat.v1 as tf\n'), ((112949, 112972), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['r_pix'], {}), '(r_pix)\n', (112965, 112972), True, 'import tensorflow.compat.v1 as tf\n'), ((113196, 113215), 'tensorflow.compat.v1.stop_gradient', 'tf.stop_gradient', (['r'], {}), '(r)\n', (113212, 113215), True, 'import tensorflow.compat.v1 as tf\n'), ((113583, 113637), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_z"""', 'glimpse[0:1, :, :, -1:]'], {}), "('glimpse_z', glimpse[0:1, :, :, -1:])\n", (113599, 113637), True, 'import tensorflow.compat.v1 as tf\n'), ((113658, 113706), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""glimpse_rgb"""', 'im_glimpse[0:1]'], {}), "('glimpse_rgb', im_glimpse[0:1])\n", (113674, 113706), True, 'import tensorflow.compat.v1 as tf\n'), ((113727, 113780), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv1_im"""', 'g_conv1[0:1, :, :, 0:1]'], {}), "('conv1_im', g_conv1[0:1, :, :, 0:1])\n", (113743, 113780), True, 'import tensorflow.compat.v1 as tf\n'), ((113801, 113843), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv1_out"""', 'g_conv1'], {}), "('conv1_out', g_conv1)\n", (113821, 113843), True, 'import tensorflow.compat.v1 as tf\n'), ((113864, 113917), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv2_im"""', 'g_conv2[0:1, :, :, 0:1]'], {}), "('conv2_im', g_conv2[0:1, :, :, 0:1])\n", (113880, 113917), True, 'import tensorflow.compat.v1 as tf\n'), ((113938, 113980), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""conv2_out"""', 'g_conv2'], {}), "('conv2_out', g_conv2)\n", (113958, 113980), True, 'import tensorflow.compat.v1 as tf\n'), ((114001, 114054), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""conv3_im"""', 'g_conv3[0:1, :, :, 0:1]'], {}), "('conv3_im', g_conv3[0:1, :, :, 0:1])\n", (114017, 114054), True, 'import tensorflow.compat.v1 as tf\n'), ((114075, 114115), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""g_fc1_out"""', 'g_fc1'], {}), "('g_fc1_out', g_fc1)\n", (114095, 114115), True, 'import tensorflow.compat.v1 as tf\n'), ((114136, 114178), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""g_rfc2_out"""', 'g_rfc2'], {}), "('g_rfc2_out', g_rfc2)\n", (114156, 114178), True, 'import tensorflow.compat.v1 as tf\n'), ((114199, 114239), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""r_pix_out"""', 'r_pix'], {}), "('r_pix_out', r_pix)\n", (114219, 114239), True, 'import tensorflow.compat.v1 as tf\n'), ((114260, 114302), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""g_nfc2_out"""', 'g_nfc2'], {}), "('g_nfc2_out', g_nfc2)\n", (114280, 114302), True, 'import tensorflow.compat.v1 as tf\n'), ((114323, 114363), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n_pix_out"""', 'n_pix'], {}), "('n_pix_out', n_pix)\n", (114343, 114363), True, 'import tensorflow.compat.v1 as tf\n'), ((114384, 114432), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n_end_pix_out"""', 'n_end_pix'], {}), "('n_end_pix_out', n_end_pix)\n", (114404, 114432), True, 'import tensorflow.compat.v1 as tf\n'), ((114657, 114675), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['n_norm'], {}), '(n_norm)\n', (114667, 114675), True, 'import tensorflow.compat.v1 as tf\n'), ((114780, 114798), 'tensorflow.compat.v1.tile', 'tf.tile', (['s', '[1, 2]'], {}), '(s, [1, 2])\n', (114787, 114798), True, 'import tensorflow.compat.v1 as tf\n'), ((116183, 116205), 'numpy.square', 'np.square', (['self.r_diag'], {}), '(self.r_diag)\n', (116192, 116205), True, 'import numpy as np\n'), ((118726, 118756), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (118750, 118756), True, 'import tensorflow.compat.v1 as tf\n'), ((118802, 118831), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (118825, 118831), True, 'import tensorflow.compat.v1 as tf\n'), ((124031, 124046), 'tensorflow.compat.v1.square', 'tf.square', (['diag'], {}), '(diag)\n', (124040, 124046), True, 'import tensorflow.compat.v1 as tf\n'), ((124372, 124422), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['het_full_pos_c2', '[self.batch_size, -1]'], {}), '(het_full_pos_c2, [self.batch_size, -1])\n', (124382, 124422), True, 'import tensorflow.compat.v1 as tf\n'), ((124798, 124859), 'tensorflow.compat.v1.concat', 'tf.concat', (['[het_full_pos, het_full_rot, het_full_g2]'], {'axis': '(-1)'}), '([het_full_pos, het_full_rot, het_full_g2], axis=-1)\n', (124807, 124859), True, 'import tensorflow.compat.v1 as tf\n'), ((125743, 125770), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (125765, 125770), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((125788, 125827), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.het_full_init_bias'], {}), '(self.het_full_init_bias)\n', (125802, 125827), True, 'import tensorflow.compat.v1 as tf\n'), ((126004, 126031), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (126026, 126031), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((126049, 126090), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.const_full_init_bias'], {}), '(self.const_full_init_bias)\n', (126063, 126090), True, 'import tensorflow.compat.v1 as tf\n'), ((126219, 126266), 'tensorflow.compat.v1.tile', 'tf.tile', (['R[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(R[None, :, :], [self.batch_size, 1, 1])\n', (126226, 126266), True, 'import tensorflow.compat.v1 as tf\n'), ((129744, 129806), 'numpy.array', 'np.array', (['[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]], dtype=np.float32)\n', (129752, 129806), True, 'import numpy as np\n'), ((129885, 129947), 'numpy.array', 'np.array', (['[[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]]], dtype=np.float32)\n', (129893, 129947), True, 'import numpy as np\n'), ((130026, 130088), 'numpy.array', 'np.array', (['[[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]]], dtype=np.float32)\n', (130034, 130088), True, 'import numpy as np\n'), ((130167, 130229), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]], dtype=np.float32)\n', (130175, 130229), True, 'import numpy as np\n'), ((130308, 130370), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]], dtype=np.float32)\n', (130316, 130370), True, 'import numpy as np\n'), ((130449, 130511), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]]], dtype=np.float32)\n', (130457, 130511), True, 'import numpy as np\n'), ((130590, 130652), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]]], dtype=np.float32)\n', (130598, 130652), True, 'import numpy as np\n'), ((130731, 130793), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]]], dtype=np.float32)\n', (130739, 130793), True, 'import numpy as np\n'), ((136318, 136340), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (136326, 136340), True, 'import tensorflow.compat.v1 as tf\n'), ((136378, 136401), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot_pred'], {}), '(rot_pred)\n', (136391, 136401), True, 'import tensorflow.compat.v1 as tf\n'), ((136444, 136466), 'tensorflow.compat.v1.equal', 'tf.equal', (['ob', '"""ellip1"""'], {}), "(ob, 'ellip1')\n", (136452, 136466), True, 'import tensorflow.compat.v1 as tf\n'), ((136499, 136517), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['dom'], {}), '(dom)\n', (136512, 136517), True, 'import tensorflow.compat.v1 as tf\n'), ((136998, 137088), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred]'], {'axis': '(1)'}), '([pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred\n ], axis=1)\n', (137007, 137088), True, 'import tensorflow.compat.v1 as tf\n'), ((140545, 140635), 'tensorflow.compat.v1.concat', 'tf.concat', (['[pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred]'], {'axis': '(1)'}), '([pos_pred, ori_pred, fr_pred, fr_mu_pred, cp_pred, n_pred, s_pred\n ], axis=1)\n', (140554, 140635), True, 'import tensorflow.compat.v1 as tf\n'), ((142708, 142738), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (142732, 142738), True, 'import tensorflow.compat.v1 as tf\n'), ((142784, 142813), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (142807, 142813), True, 'import tensorflow.compat.v1 as tf\n'), ((148886, 148935), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_lrn_fc1_out"""', 'fc1'], {}), "('het_diag_lrn_fc1_out', fc1)\n", (148906, 148935), True, 'import tensorflow.compat.v1 as tf\n'), ((148956, 149005), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_lrn_fc2_out"""', 'fc2'], {}), "('het_diag_lrn_fc2_out', fc2)\n", (148976, 149005), True, 'import tensorflow.compat.v1 as tf\n'), ((149026, 149076), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_lrn_fc3_out"""', 'diag'], {}), "('het_diag_lrn_fc3_out', diag)\n", (149046, 149076), True, 'import tensorflow.compat.v1 as tf\n'), ((149395, 149422), 'tensorflow.compat.v1.linalg.tensor_diag', 'tf.linalg.tensor_diag', (['diag'], {}), '(diag)\n', (149416, 149422), True, 'import tensorflow.compat.v1 as tf\n'), ((149443, 149490), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (149450, 149490), True, 'import tensorflow.compat.v1 as tf\n'), ((150793, 150842), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_ana_fc1_out"""', 'fc1'], {}), "('het_diag_ana_fc1_out', fc1)\n", (150813, 150842), True, 'import tensorflow.compat.v1 as tf\n'), ((150863, 150912), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_ana_fc2_out"""', 'fc2'], {}), "('het_diag_ana_fc2_out', fc2)\n", (150883, 150912), True, 'import tensorflow.compat.v1 as tf\n'), ((150933, 150983), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_diag_ana_fc3_out"""', 'diag'], {}), "('het_diag_ana_fc3_out', diag)\n", (150953, 150983), True, 'import tensorflow.compat.v1 as tf\n'), ((151302, 151329), 'tensorflow.compat.v1.linalg.tensor_diag', 'tf.linalg.tensor_diag', (['diag'], {}), '(diag)\n', (151323, 151329), True, 'import tensorflow.compat.v1 as tf\n'), ((151350, 151397), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (151357, 151397), True, 'import tensorflow.compat.v1 as tf\n'), ((40167, 40197), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_ana'], {}), '(likelihood_ana)\n', (40181, 40197), True, 'import tensorflow.compat.v1 as tf\n'), ((40229, 40255), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (40243, 40255), True, 'import tensorflow.compat.v1 as tf\n'), ((40287, 40317), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_ana'], {}), '(likelihood_ana)\n', (40301, 40317), True, 'import tensorflow.compat.v1 as tf\n'), ((40356, 40375), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (40370, 40375), True, 'import tensorflow.compat.v1 as tf\n'), ((40565, 40595), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood_ana'], {}), '(likelihood_ana)\n', (40579, 40595), True, 'import tensorflow.compat.v1 as tf\n'), ((40632, 40651), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (40646, 40651), True, 'import tensorflow.compat.v1 as tf\n'), ((46800, 46811), 'tensorflow.compat.v1.abs', 'tf.abs', (['ang'], {}), '(ang)\n', (46806, 46811), True, 'import tensorflow.compat.v1 as tf\n'), ((46854, 46865), 'tensorflow.compat.v1.abs', 'tf.abs', (['ang'], {}), '(ang)\n', (46860, 46865), True, 'import tensorflow.compat.v1 as tf\n'), ((47337, 47364), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(1 - pred)', '(1e-07)'], {}), '(1 - pred, 1e-07)\n', (47347, 47364), True, 'import tensorflow.compat.v1 as tf\n'), ((47402, 47425), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['pred', '(1e-07)'], {}), '(pred, 1e-07)\n', (47412, 47425), True, 'import tensorflow.compat.v1 as tf\n'), ((47466, 47493), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['(1 - pred)', '(1e-07)'], {}), '(1 - pred, 1e-07)\n', (47476, 47493), True, 'import tensorflow.compat.v1 as tf\n'), ((47531, 47554), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['pred', '(1e-07)'], {}), '(pred, 1e-07)\n', (47541, 47554), True, 'import tensorflow.compat.v1 as tf\n'), ((53007, 53025), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['rot'], {}), '(rot)\n', (53020, 53025), True, 'import tensorflow.compat.v1 as tf\n'), ((53653, 53665), 'tensorflow.compat.v1.sign', 'tf.sign', (['rot'], {}), '(rot)\n', (53660, 53665), True, 'import tensorflow.compat.v1 as tf\n'), ((55127, 55138), 'tensorflow.compat.v1.abs', 'tf.abs', (['rot'], {}), '(rot)\n', (55133, 55138), True, 'import tensorflow.compat.v1 as tf\n'), ((63895, 63925), 'tensorflow.compat.v1.ones', 'tf.ones', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (63902, 63925), True, 'import tensorflow.compat.v1 as tf\n'), ((66595, 66625), 'tensorflow.compat.v1.ones', 'tf.ones', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (66602, 66625), True, 'import tensorflow.compat.v1 as tf\n'), ((87782, 87797), 'numpy.asscalar', 'np.asscalar', (['ob'], {}), '(ob)\n', (87793, 87797), True, 'import numpy as np\n'), ((93103, 93233), 'matplotlib.patches.Ellipse', 'Ellipse', (['(pos_pred[i, 0], pos_pred[i, 1])', '(2 * a * 1000)', '(2 * b * 1000)', 'seq_pred[i, 2]'], {'alpha': '(0.1)', 'facecolor': '"""r"""', 'edgecolor': '"""r"""'}), "((pos_pred[i, 0], pos_pred[i, 1]), 2 * a * 1000, 2 * b * 1000,\n seq_pred[i, 2], alpha=0.1, facecolor='r', edgecolor='r')\n", (93110, 93233), False, 'from matplotlib.patches import Ellipse\n'), ((93387, 93502), 'matplotlib.patches.Ellipse', 'Ellipse', (['(seq[i, 0], seq[i, 1])', '(2 * a * 1000)', '(2 * b * 1000)', 'seq[i, 2]'], {'alpha': '(0.1)', 'facecolor': '"""g"""', 'edgecolor': '"""g"""'}), "((seq[i, 0], seq[i, 1]), 2 * a * 1000, 2 * b * 1000, seq[i, 2],\n alpha=0.1, facecolor='g', edgecolor='g')\n", (93394, 93502), False, 'from matplotlib.patches import Ellipse\n'), ((93821, 93835), 'numpy.sin', 'np.sin', (['r_pred'], {}), '(r_pred)\n', (93827, 93835), True, 'import numpy as np\n'), ((94099, 94111), 'numpy.sin', 'np.sin', (['r_la'], {}), '(r_la)\n', (94105, 94111), True, 'import numpy as np\n'), ((94672, 94730), 'matplotlib.patches.Polygon', 'Polygon', (['points_p'], {'alpha': '(0.1)', 'facecolor': '"""r"""', 'edgecolor': '"""r"""'}), "(points_p, alpha=0.1, facecolor='r', edgecolor='r')\n", (94679, 94730), False, 'from matplotlib.patches import Polygon\n'), ((94810, 94868), 'matplotlib.patches.Polygon', 'Polygon', (['points_l'], {'alpha': '(0.1)', 'facecolor': '"""g"""', 'edgecolor': '"""g"""'}), "(points_l, alpha=0.1, facecolor='g', edgecolor='g')\n", (94817, 94868), False, 'from matplotlib.patches import Polygon\n'), ((101801, 101856), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n1_mean"""', 'self.im_n1.moving_mean'], {}), "('n1_mean', self.im_n1.moving_mean)\n", (101821, 101856), True, 'import tensorflow.compat.v1 as tf\n'), ((101881, 101939), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""n1_var"""', 'self.im_n1.moving_variance'], {}), "('n1_var', self.im_n1.moving_variance)\n", (101901, 101939), True, 'import tensorflow.compat.v1 as tf\n'), ((119179, 119209), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (119203, 119209), True, 'import tensorflow.compat.v1 as tf\n'), ((119255, 119284), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (119278, 119284), True, 'import tensorflow.compat.v1 as tf\n'), ((125047, 125118), 'tensorflow.compat.v1.summary.image', 'tf.summary.image', (['"""het_full_pos_c1_im"""', 'het_full_pos_c1[0:1, :, :, 0:1]'], {}), "('het_full_pos_c1_im', het_full_pos_c1[0:1, :, :, 0:1])\n", (125063, 125118), True, 'import tensorflow.compat.v1 as tf\n'), ((125168, 125228), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_pos_c1_out"""', 'het_full_pos_c1'], {}), "('het_full_pos_c1_out', het_full_pos_c1)\n", (125188, 125228), True, 'import tensorflow.compat.v1 as tf\n'), ((125245, 125305), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_pos_c2_out"""', 'het_full_pos_c2'], {}), "('het_full_pos_c2_out', het_full_pos_c2)\n", (125265, 125305), True, 'import tensorflow.compat.v1 as tf\n'), ((125322, 125379), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_pos_fc_out"""', 'het_full_pos'], {}), "('het_full_pos_fc_out', het_full_pos)\n", (125342, 125379), True, 'import tensorflow.compat.v1 as tf\n'), ((125396, 125453), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_rot_fc_out"""', 'het_full_rot'], {}), "('het_full_rot_fc_out', het_full_rot)\n", (125416, 125453), True, 'import tensorflow.compat.v1 as tf\n'), ((125470, 125525), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_g_fc1_out"""', 'het_full_g1'], {}), "('het_full_g_fc1_out', het_full_g1)\n", (125490, 125525), True, 'import tensorflow.compat.v1 as tf\n'), ((125542, 125597), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_g_fc2_out"""', 'het_full_g2'], {}), "('het_full_g_fc2_out', het_full_g2)\n", (125562, 125597), True, 'import tensorflow.compat.v1 as tf\n'), ((125614, 125668), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_fc1_out"""', 'het_full_fc1'], {}), "('het_full_fc1_out', het_full_fc1)\n", (125634, 125668), True, 'import tensorflow.compat.v1 as tf\n'), ((125685, 125725), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_tri_out"""', 'tri'], {}), "('het_tri_out', tri)\n", (125705, 125725), True, 'import tensorflow.compat.v1 as tf\n'), ((125857, 125886), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['R'], {}), '(R)\n', (125883, 125886), True, 'import tensorflow.compat.v1 as tf\n'), ((125908, 125939), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (125922, 125939), True, 'import tensorflow.compat.v1 as tf\n'), ((126120, 126149), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['R'], {}), '(R)\n', (126146, 126149), True, 'import tensorflow.compat.v1 as tf\n'), ((126171, 126202), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (126185, 126202), True, 'import tensorflow.compat.v1 as tf\n'), ((137371, 137401), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 7]'], {}), '([self.batch_size, 7])\n', (137379, 137401), True, 'import tensorflow.compat.v1 as tf\n'), ((137436, 137469), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (137443, 137469), True, 'import tensorflow.compat.v1 as tf\n'), ((137504, 137534), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 2]'], {}), '([self.batch_size, 2])\n', (137512, 137534), True, 'import tensorflow.compat.v1 as tf\n'), ((137611, 137641), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 8]'], {}), '([self.batch_size, 8])\n', (137619, 137641), True, 'import tensorflow.compat.v1 as tf\n'), ((137676, 137709), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (137683, 137709), True, 'import tensorflow.compat.v1 as tf\n'), ((137744, 137774), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 1]'], {}), '([self.batch_size, 1])\n', (137752, 137774), True, 'import tensorflow.compat.v1 as tf\n'), ((137850, 137880), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['[self.batch_size, 9]'], {}), '([self.batch_size, 9])\n', (137858, 137880), True, 'import tensorflow.compat.v1 as tf\n'), ((137914, 137947), 'tensorflow.compat.v1.cast', 'tf.cast', (['keep_contact', 'tf.float32'], {}), '(keep_contact, tf.float32)\n', (137921, 137947), True, 'import tensorflow.compat.v1 as tf\n'), ((139169, 139205), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['dnx', '[-1, 1, self.dim_x]'], {}), '(dnx, [-1, 1, self.dim_x])\n', (139179, 139205), True, 'import tensorflow.compat.v1 as tf\n'), ((139228, 139264), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['dny', '[-1, 1, self.dim_x]'], {}), '(dny, [-1, 1, self.dim_x])\n', (139238, 139264), True, 'import tensorflow.compat.v1 as tf\n'), ((139287, 139322), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['ds', '[-1, 1, self.dim_x]'], {}), '(ds, [-1, 1, self.dim_x])\n', (139297, 139322), True, 'import tensorflow.compat.v1 as tf\n'), ((143154, 143184), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (143178, 143184), True, 'import tensorflow.compat.v1 as tf\n'), ((143230, 143259), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (143253, 143259), True, 'import tensorflow.compat.v1 as tf\n'), ((149341, 149356), 'tensorflow.compat.v1.square', 'tf.square', (['diag'], {}), '(diag)\n', (149350, 149356), True, 'import tensorflow.compat.v1 as tf\n'), ((149953, 149980), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (149975, 149980), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((150002, 150045), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.het_full_lrn_init_bias'], {}), '(self.het_full_lrn_init_bias)\n', (150016, 150045), True, 'import tensorflow.compat.v1 as tf\n'), ((150246, 150273), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (150268, 150273), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((150295, 150340), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.const_full_lrn_init_bias'], {}), '(self.const_full_lrn_init_bias)\n', (150309, 150340), True, 'import tensorflow.compat.v1 as tf\n'), ((150481, 150528), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (150488, 150528), True, 'import tensorflow.compat.v1 as tf\n'), ((151248, 151263), 'tensorflow.compat.v1.square', 'tf.square', (['diag'], {}), '(diag)\n', (151257, 151263), True, 'import tensorflow.compat.v1 as tf\n'), ((151860, 151887), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (151882, 151887), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((151909, 151952), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.het_full_ana_init_bias'], {}), '(self.het_full_ana_init_bias)\n', (151923, 151952), True, 'import tensorflow.compat.v1 as tf\n'), ((152153, 152180), 'differentiable_filters.utils.tensorflow_compatability.fill_triangular', 'compat.fill_triangular', (['tri'], {}), '(tri)\n', (152175, 152180), True, 'from differentiable_filters.utils import tensorflow_compatability as compat\n'), ((152202, 152247), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.const_full_ana_init_bias'], {}), '(self.const_full_ana_init_bias)\n', (152216, 152247), True, 'import tensorflow.compat.v1 as tf\n'), ((152388, 152435), 'tensorflow.compat.v1.tile', 'tf.tile', (['Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(Q[None, :, :], [self.batch_size, 1, 1])\n', (152395, 152435), True, 'import tensorflow.compat.v1 as tf\n'), ((22938, 22964), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (22952, 22964), True, 'import tensorflow.compat.v1 as tf\n'), ((33464, 33492), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['contact_loss'], {}), '(contact_loss)\n', (33478, 33492), True, 'import tensorflow.compat.v1 as tf\n'), ((40044, 40063), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['mse'], {}), '(mse)\n', (40058, 40063), True, 'import tensorflow.compat.v1 as tf\n'), ((40102, 40128), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (40116, 40128), True, 'import tensorflow.compat.v1 as tf\n'), ((40501, 40527), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (40515, 40527), True, 'import tensorflow.compat.v1 as tf\n'), ((53083, 53100), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53095, 53100), True, 'import tensorflow.compat.v1 as tf\n'), ((53161, 53178), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53173, 53178), True, 'import tensorflow.compat.v1 as tf\n'), ((53240, 53257), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53252, 53257), True, 'import tensorflow.compat.v1 as tf\n'), ((53319, 53336), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53331, 53336), True, 'import tensorflow.compat.v1 as tf\n'), ((53397, 53414), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['rot'], {}), '(rot)\n', (53409, 53414), True, 'import tensorflow.compat.v1 as tf\n'), ((53631, 53642), 'tensorflow.compat.v1.abs', 'tf.abs', (['rot'], {}), '(rot)\n', (53637, 53642), True, 'import tensorflow.compat.v1 as tf\n'), ((74185, 74222), 'numpy.corrcoef', 'np.corrcoef', (['r_het_diag[i:i + 1]', 'vis'], {}), '(r_het_diag[i:i + 1], vis)\n', (74196, 74222), True, 'import numpy as np\n'), ((74263, 74299), 'numpy.corrcoef', 'np.corrcoef', (['r_het_tri[i:i + 1]', 'vis'], {}), '(r_het_tri[i:i + 1], vis)\n', (74274, 74299), True, 'import numpy as np\n'), ((121201, 121231), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (121225, 121231), True, 'import tensorflow.compat.v1 as tf\n'), ((121277, 121306), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (121300, 121306), True, 'import tensorflow.compat.v1 as tf\n'), ((121522, 121552), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (121546, 121552), True, 'import tensorflow.compat.v1 as tf\n'), ((121598, 121626), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (121621, 121626), True, 'import tensorflow.compat.v1 as tf\n'), ((121942, 121972), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (121966, 121972), True, 'import tensorflow.compat.v1 as tf\n'), ((122018, 122047), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (122041, 122047), True, 'import tensorflow.compat.v1 as tf\n'), ((138048, 138112), 'numpy.array', 'np.array', (['[[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138056, 138112), True, 'import numpy as np\n'), ((138174, 138238), 'numpy.array', 'np.array', (['[[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 1, 0, 0, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138182, 138238), True, 'import numpy as np\n'), ((138301, 138365), 'numpy.array', 'np.array', (['[[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138309, 138365), True, 'import numpy as np\n'), ((138431, 138495), 'numpy.array', 'np.array', (['[[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 1, 0, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138439, 138495), True, 'import numpy as np\n'), ((138618, 138682), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 1, 0, 0, 0, 0, 0.0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 1, 0, 0, 0, 0, 0.0]]], dtype=np.float32)\n', (138626, 138682), True, 'import numpy as np\n'), ((138805, 138867), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 1, 0, 0, 0, 0]]], dtype=np.float32)\n', (138813, 138867), True, 'import numpy as np\n'), ((138991, 139053), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]]], dtype=np.float32)\n', (138999, 139053), True, 'import numpy as np\n'), ((144110, 144140), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (144134, 144140), True, 'import tensorflow.compat.v1 as tf\n'), ((144186, 144215), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (144209, 144215), True, 'import tensorflow.compat.v1 as tf\n'), ((149746, 149795), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_lrn_fc1_out"""', 'fc1'], {}), "('het_full_lrn_fc1_out', fc1)\n", (149766, 149795), True, 'import tensorflow.compat.v1 as tf\n'), ((149816, 149865), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_lrn_fc2_out"""', 'fc2'], {}), "('het_full_lrn_fc2_out', fc2)\n", (149836, 149865), True, 'import tensorflow.compat.v1 as tf\n'), ((149886, 149931), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_lrn_out"""', 'tri'], {}), "('het_full_lrn_out', tri)\n", (149906, 149931), True, 'import tensorflow.compat.v1 as tf\n'), ((150079, 150108), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (150105, 150108), True, 'import tensorflow.compat.v1 as tf\n'), ((150134, 150165), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (150148, 150165), True, 'import tensorflow.compat.v1 as tf\n'), ((150374, 150403), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (150400, 150403), True, 'import tensorflow.compat.v1 as tf\n'), ((150429, 150460), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (150443, 150460), True, 'import tensorflow.compat.v1 as tf\n'), ((151653, 151702), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_ana_fc1_out"""', 'fc1'], {}), "('het_full_ana_fc1_out', fc1)\n", (151673, 151702), True, 'import tensorflow.compat.v1 as tf\n'), ((151723, 151772), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_ana_fc2_out"""', 'fc2'], {}), "('het_full_ana_fc2_out', fc2)\n", (151743, 151772), True, 'import tensorflow.compat.v1 as tf\n'), ((151793, 151838), 'tensorflow.compat.v1.summary.histogram', 'tf.summary.histogram', (['"""het_full_ana_out"""', 'tri'], {}), "('het_full_ana_out', tri)\n", (151813, 151838), True, 'import tensorflow.compat.v1 as tf\n'), ((151986, 152015), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (152012, 152015), True, 'import tensorflow.compat.v1 as tf\n'), ((152041, 152072), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (152055, 152072), True, 'import tensorflow.compat.v1 as tf\n'), ((152281, 152310), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['Q'], {}), '(Q)\n', (152307, 152310), True, 'import tensorflow.compat.v1 as tf\n'), ((152336, 152367), 'tensorflow.compat.v1.linalg.diag', 'tf.linalg.diag', (['self.bias_fixed'], {}), '(self.bias_fixed)\n', (152350, 152367), True, 'import tensorflow.compat.v1 as tf\n'), ((23079, 23105), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23093, 23105), True, 'import tensorflow.compat.v1 as tf\n'), ((33393, 33407), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (33401, 33407), True, 'import tensorflow.compat.v1 as tf\n'), ((33415, 33441), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (33429, 33441), True, 'import tensorflow.compat.v1 as tf\n'), ((144490, 144520), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (144514, 144520), True, 'import tensorflow.compat.v1 as tf\n'), ((144566, 144594), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (144589, 144594), True, 'import tensorflow.compat.v1 as tf\n'), ((144886, 144916), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (144910, 144916), True, 'import tensorflow.compat.v1 as tf\n'), ((144962, 144991), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (144985, 144991), True, 'import tensorflow.compat.v1 as tf\n'), ((23220, 23246), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23234, 23246), True, 'import tensorflow.compat.v1 as tf\n'), ((23327, 23361), 'tensorflow.compat.v1.less', 'tf.less', (['step', '(self.epoch_size * 3)'], {}), '(step, self.epoch_size * 3)\n', (23334, 23361), True, 'import tensorflow.compat.v1 as tf\n'), ((94451, 94462), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (94459, 94462), True, 'import numpy as np\n'), ((145945, 145975), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (145969, 145975), True, 'import tensorflow.compat.v1 as tf\n'), ((146021, 146050), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (146044, 146050), True, 'import tensorflow.compat.v1 as tf\n'), ((23461, 23487), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (23475, 23487), True, 'import tensorflow.compat.v1 as tf\n'), ((32708, 32734), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (32722, 32734), True, 'import tensorflow.compat.v1 as tf\n'), ((33191, 33219), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['contact_loss'], {}), '(contact_loss)\n', (33205, 33219), True, 'import tensorflow.compat.v1 as tf\n'), ((33258, 33284), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (33272, 33284), True, 'import tensorflow.compat.v1 as tf\n'), ((89363, 89376), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (89369, 89376), True, 'import numpy as np\n'), ((89414, 89427), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (89420, 89427), True, 'import numpy as np\n'), ((94521, 94536), 'numpy.dot', 'np.dot', (['r_p', 'pt'], {}), '(r_p, pt)\n', (94527, 94536), True, 'import numpy as np\n'), ((94598, 94613), 'numpy.dot', 'np.dot', (['r_l', 'pt'], {}), '(r_l, pt)\n', (94604, 94613), True, 'import numpy as np\n'), ((146395, 146425), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (146419, 146425), True, 'import tensorflow.compat.v1 as tf\n'), ((146471, 146500), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (146494, 146500), True, 'import tensorflow.compat.v1 as tf\n'), ((32641, 32669), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['contact_loss'], {}), '(contact_loss)\n', (32655, 32669), True, 'import tensorflow.compat.v1 as tf\n'), ((33127, 33153), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (33141, 33153), True, 'import tensorflow.compat.v1 as tf\n'), ((147416, 147446), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (147440, 147446), True, 'import tensorflow.compat.v1 as tf\n'), ((147492, 147521), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (147515, 147521), True, 'import tensorflow.compat.v1 as tf\n'), ((32577, 32603), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['normal_ang'], {}), '(normal_ang)\n', (32591, 32603), True, 'import tensorflow.compat.v1 as tf\n'), ((33011, 33025), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (33019, 33025), True, 'import tensorflow.compat.v1 as tf\n'), ((147800, 147830), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (147824, 147830), True, 'import tensorflow.compat.v1 as tf\n'), ((147876, 147904), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (147899, 147904), True, 'import tensorflow.compat.v1 as tf\n'), ((148196, 148226), 'tensorflow.compat.v1.keras.regularizers.l2', 'tf.keras.regularizers.l2', ([], {'l': 'wd'}), '(l=wd)\n', (148220, 148226), True, 'import tensorflow.compat.v1 as tf\n'), ((148272, 148301), 'tensorflow.compat.v1.constant_initializer', 'tf.constant_initializer', (['init'], {}), '(init)\n', (148295, 148301), True, 'import tensorflow.compat.v1 as tf\n'), ((32461, 32475), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['mses'], {}), '(mses)\n', (32469, 32475), True, 'import tensorflow.compat.v1 as tf\n')]
|
import numpy as np
from scipy.optimize import leastsq
import matplotlib
matplotlib.use('TkAgg')
import pylab as plt
from math import sqrt, atan, cos
from process_data import *
guess_mean = np.mean(y1)/2
guess_std = 3*np.std(y1)/(2**0.5)
guess_phase = 0
guess_stretch = 0.3
data_first_guess = guess_std*np.sin(np.sin(guess_stretch**-1 * (x1))) + guess_mean
optimize_func = lambda x: x[0]*np.sin(np.sin(x[1]**-1 *(x1))) - y1
est_std, est_stretch, est_mean = leastsq(optimize_func, [guess_std, guess_stretch, guess_mean])[0]
fig = plt.figure(1, figsize=(9, 5), dpi=150)
fig.suptitle('\\textbf{Torque Felt by Driven Gear vs. Difference in Displacements}', fontweight='bold')
fig.subplots_adjust(left=0.11, top=0.9, right=0.98, bottom=0.1)
plt.plot(x1, y1, '.', label='Processed Data Points', c='black')
plt.plot(x1, est_std*np.sin(est_stretch**-1 *(x1)+est_mean), '--', label='Fitted Sine Wave', c='black')
plt.ylabel('\\textbf{Torque Felt by\\\\Driven Gear (Nm)}')
plt.xlabel('\\textbf{Difference in Displacements (rad)}')
plt.xlim(0, np.pi/2)
plt.legend(numpoints=1)
plt.show()
|
[
"pylab.show",
"numpy.std",
"pylab.ylabel",
"scipy.optimize.leastsq",
"matplotlib.use",
"pylab.figure",
"numpy.mean",
"pylab.xlabel",
"numpy.sin",
"pylab.xlim",
"pylab.legend",
"pylab.plot"
] |
[((72, 95), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (86, 95), False, 'import matplotlib\n'), ((533, 571), 'pylab.figure', 'plt.figure', (['(1)'], {'figsize': '(9, 5)', 'dpi': '(150)'}), '(1, figsize=(9, 5), dpi=150)\n', (543, 571), True, 'import pylab as plt\n'), ((741, 804), 'pylab.plot', 'plt.plot', (['x1', 'y1', '"""."""'], {'label': '"""Processed Data Points"""', 'c': '"""black"""'}), "(x1, y1, '.', label='Processed Data Points', c='black')\n", (749, 804), True, 'import pylab as plt\n'), ((910, 968), 'pylab.ylabel', 'plt.ylabel', (['"""\\\\textbf{Torque Felt by\\\\\\\\Driven Gear (Nm)}"""'], {}), "('\\\\textbf{Torque Felt by\\\\\\\\Driven Gear (Nm)}')\n", (920, 968), True, 'import pylab as plt\n'), ((969, 1026), 'pylab.xlabel', 'plt.xlabel', (['"""\\\\textbf{Difference in Displacements (rad)}"""'], {}), "('\\\\textbf{Difference in Displacements (rad)}')\n", (979, 1026), True, 'import pylab as plt\n'), ((1027, 1049), 'pylab.xlim', 'plt.xlim', (['(0)', '(np.pi / 2)'], {}), '(0, np.pi / 2)\n', (1035, 1049), True, 'import pylab as plt\n'), ((1049, 1072), 'pylab.legend', 'plt.legend', ([], {'numpoints': '(1)'}), '(numpoints=1)\n', (1059, 1072), True, 'import pylab as plt\n'), ((1073, 1083), 'pylab.show', 'plt.show', ([], {}), '()\n', (1081, 1083), True, 'import pylab as plt\n'), ((190, 201), 'numpy.mean', 'np.mean', (['y1'], {}), '(y1)\n', (197, 201), True, 'import numpy as np\n'), ((460, 522), 'scipy.optimize.leastsq', 'leastsq', (['optimize_func', '[guess_std, guess_stretch, guess_mean]'], {}), '(optimize_func, [guess_std, guess_stretch, guess_mean])\n', (467, 522), False, 'from scipy.optimize import leastsq\n'), ((218, 228), 'numpy.std', 'np.std', (['y1'], {}), '(y1)\n', (224, 228), True, 'import numpy as np\n'), ((826, 867), 'numpy.sin', 'np.sin', (['(est_stretch ** -1 * x1 + est_mean)'], {}), '(est_stretch ** -1 * x1 + est_mean)\n', (832, 867), True, 'import numpy as np\n'), ((312, 344), 'numpy.sin', 'np.sin', (['(guess_stretch ** -1 * x1)'], {}), '(guess_stretch ** -1 * x1)\n', (318, 344), True, 'import numpy as np\n'), ((398, 421), 'numpy.sin', 'np.sin', (['(x[1] ** -1 * x1)'], {}), '(x[1] ** -1 * x1)\n', (404, 421), True, 'import numpy as np\n')]
|
"""basic array functions"""
import multiprocessing
import warnings
import numpy as np
try:
import numexpr
numexpr.set_num_threads(multiprocessing.cpu_count())
numexpr.set_vml_num_threads(multiprocessing.cpu_count())
except ImportError:
warnings.warn('numexpr not detected, use `sudo pip install numexpr`')
numexpr = None
def astype(array, dtype):
"""cast array to dtype
Parameters
----------
- array: array
- dtype: dtype to cast to
"""
if numexpr is None:
return array.astype(dtype)
result = np.zeros(array.shape, dtype=dtype)
return numexpr.evaluate('array', out=result, casting='unsafe')
def concatenate(arrays, axis, dtype=None, out=None):
"""concatenate arrays along axis
Parameters
----------
- arrays: iterable of arrays
- axis: int axis to concatenate
- dtype: dtype of result
- out: array in which to store result
"""
# compute sizes
ndim = arrays[0].ndim
other_axes = [other for other in range(arrays[0].ndim) if other != axis]
other_lengths = [arrays[0].shape[other_axis] for other_axis in other_axes]
axis_lengths = [array.shape[axis] for array in arrays]
axis_length = np.sum(axis_lengths)
result_shape = other_lengths[:axis] + [axis_length] + other_lengths[axis:]
# ensure sizes and dtypes are proper
for a, array in enumerate(arrays):
if len(array.shape) != ndim:
raise Exception('array' + str(a) + 'has wrong dimensions')
for ol, length in enumerate(other_lengths):
if array.shape[other_axes[ol]] != length:
raise Exception('bad axis ' + str(ol) + ' of array ' + str(a))
if out is not None:
if out.shape != result_shape:
raise Exception('out does not have shape ' + str(result_shape))
if dtype is not None and out.dtype != dtype:
raise Exception('out does not have dtype ' + str(dtype))
# initialize output
if out is None:
out = np.zeros(result_shape, dtype=dtype)
# fall back to numpy if numexpr not available
if numexpr is None:
out[:] = np.concatenate(arrays, axis=axis)
return out
# populate output
start = 0
slices = [slice(None) for d in range(ndim)]
for array in arrays:
end = start + array.shape[axis]
slices[axis] = slice(start, end)
numexpr.evaluate('array', out=out[slices])
start = end
return out
def isnan(X):
"""evaluate whether elements of X are infinite
Parameters
----------
- X: array to evaluate nan values of
"""
if numexpr is not None:
return numexpr.evaluate('X != X')
else:
return np.isnan(X)
def nan_to_num(X):
"""convert infinite values in array to 0
Parameters
----------
- X: array whose infinite values to convert
"""
if numexpr is not None:
X = copy(X)
X[isnan(X)] = 0
return X
else:
return np.nan_to_num(X)
def nan_to_num_inplace(X):
"""convert infinite values in array to 0 inplace
Parameters
----------
- X: array whose infinite values to convert
"""
if numexpr is not None:
X[isnan(X)] = 0
return X
else:
X[np.isnan(X)] = 0
return X
def copy(X):
"""return a copy of X
Parameters
----------
- X: array to copy
"""
if numexpr is not None:
return numexpr.evaluate('X')
else:
return np.copy(X)
|
[
"numpy.sum",
"numpy.nan_to_num",
"numpy.copy",
"numpy.zeros",
"numexpr.evaluate",
"numpy.isnan",
"warnings.warn",
"numpy.concatenate",
"multiprocessing.cpu_count"
] |
[((559, 593), 'numpy.zeros', 'np.zeros', (['array.shape'], {'dtype': 'dtype'}), '(array.shape, dtype=dtype)\n', (567, 593), True, 'import numpy as np\n'), ((605, 660), 'numexpr.evaluate', 'numexpr.evaluate', (['"""array"""'], {'out': 'result', 'casting': '"""unsafe"""'}), "('array', out=result, casting='unsafe')\n", (621, 660), False, 'import numexpr\n'), ((1211, 1231), 'numpy.sum', 'np.sum', (['axis_lengths'], {}), '(axis_lengths)\n', (1217, 1231), True, 'import numpy as np\n'), ((141, 168), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (166, 168), False, 'import multiprocessing\n'), ((202, 229), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (227, 229), False, 'import multiprocessing\n'), ((255, 324), 'warnings.warn', 'warnings.warn', (['"""numexpr not detected, use `sudo pip install numexpr`"""'], {}), "('numexpr not detected, use `sudo pip install numexpr`')\n", (268, 324), False, 'import warnings\n'), ((2004, 2039), 'numpy.zeros', 'np.zeros', (['result_shape'], {'dtype': 'dtype'}), '(result_shape, dtype=dtype)\n', (2012, 2039), True, 'import numpy as np\n'), ((2132, 2165), 'numpy.concatenate', 'np.concatenate', (['arrays'], {'axis': 'axis'}), '(arrays, axis=axis)\n', (2146, 2165), True, 'import numpy as np\n'), ((2384, 2426), 'numexpr.evaluate', 'numexpr.evaluate', (['"""array"""'], {'out': 'out[slices]'}), "('array', out=out[slices])\n", (2400, 2426), False, 'import numexpr\n'), ((2653, 2679), 'numexpr.evaluate', 'numexpr.evaluate', (['"""X != X"""'], {}), "('X != X')\n", (2669, 2679), False, 'import numexpr\n'), ((2705, 2716), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (2713, 2716), True, 'import numpy as np\n'), ((2984, 3000), 'numpy.nan_to_num', 'np.nan_to_num', (['X'], {}), '(X)\n', (2997, 3000), True, 'import numpy as np\n'), ((3439, 3460), 'numexpr.evaluate', 'numexpr.evaluate', (['"""X"""'], {}), "('X')\n", (3455, 3460), False, 'import numexpr\n'), ((3486, 3496), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (3493, 3496), True, 'import numpy as np\n'), ((3259, 3270), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (3267, 3270), True, 'import numpy as np\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert UV crops to full UV maps."""
import os
import sys
import json
from PIL import Image
import numpy as np
def place_crop(crop, image, center_x, center_y):
"""Place the crop in the image at the specified location."""
im_height, im_width = image.shape[:2]
crop_height, crop_width = crop.shape[:2]
left = center_x - crop_width // 2
right = left + crop_width
top = center_y - crop_height // 2
bottom = top + crop_height
adjusted_crop = crop # remove regions of crop that go beyond image bounds
if left < 0:
adjusted_crop = adjusted_crop[:, -left:]
if right > im_width:
adjusted_crop = adjusted_crop[:, :(im_width - right)]
if top < 0:
adjusted_crop = adjusted_crop[-top:]
if bottom > im_height:
adjusted_crop = adjusted_crop[:(im_height - bottom)]
crop_mask = (adjusted_crop > 0).astype(crop.dtype).sum(-1, keepdims=True)
image[max(0, top):min(im_height, bottom), max(0, left):min(im_width, right)] *= (1 - crop_mask)
image[max(0, top):min(im_height, bottom), max(0, left):min(im_width, right)] += adjusted_crop
return image
def crop2full(keypoints_path, metadata_path, uvdir, outdir):
"""Create each frame's layer UVs from predicted UV crops"""
with open(keypoints_path) as f:
kp_data = json.load(f)
# Get all people ids
people_ids = set()
for frame in kp_data:
for skeleton in kp_data[frame]:
people_ids.add(skeleton['idx'])
people_ids = sorted(list(people_ids))
with open(metadata_path) as f:
metadata = json.load(f)
orig_size = np.array(metadata['alphapose_input_size'][::-1])
out_size = np.array(metadata['size_LR'][::-1])
if 'people_layers' in metadata:
people_layers = metadata['people_layers']
else:
people_layers = [[pid] for pid in people_ids]
# Create output directories.
for layer_i in range(1, 1 + len(people_layers)):
os.makedirs(os.path.join(outdir, f'{layer_i:02d}'), exist_ok=True)
print(f'Writing UVs to {outdir}')
for frame in sorted(kp_data):
for layer_i, layer in enumerate(people_layers, 1):
out_path = os.path.join(outdir, f'{layer_i:02d}', frame)
sys.stdout.flush()
sys.stdout.write('processing frame %s\r' % out_path)
uv_map = np.zeros([out_size[0], out_size[1], 4])
for person_id in layer:
matches = [p for p in kp_data[frame] if p['idx'] == person_id]
if len(matches) == 0: # person doesn't appear in this frame
continue
skeleton = matches[0]
kps = np.array(skeleton['keypoints']).reshape(17, 3)
# Get kps bounding box.
left = kps[:, 0].min()
right = kps[:, 0].max()
top = kps[:, 1].min()
bottom = kps[:, 1].max()
height = bottom - top
width = right - left
orig_crop_size = max(height, width)
orig_center_x = (left + right) // 2
orig_center_y = (top + bottom) // 2
# read predicted uv map
uv_crop_path = os.path.join(uvdir, f'{person_id:02d}_{os.path.basename(out_path)[:-4]}_output_uv.png')
if os.path.exists(uv_crop_path):
uv_crop = np.array(Image.open(uv_crop_path))
else:
uv_crop = np.zeros([256, 256, 3])
# add person ID channel
person_mask = (uv_crop[..., 0:1] > 0).astype('uint8')
person_ids = (255 - person_id) * person_mask
uv_crop = np.concatenate([uv_crop, person_ids], -1)
# scale crop to desired output size
# 256 is the crop size, 192 is the inner crop size
out_crop_size = orig_crop_size * 256./192 * out_size / orig_size
out_crop_size = out_crop_size.astype(np.int)
uv_crop = uv_crop.astype(np.uint8)
uv_crop = np.array(Image.fromarray(uv_crop).resize((out_crop_size[1], out_crop_size[0]), resample=Image.NEAREST))
# scale center coordinate accordingly
out_center_x = (orig_center_x * out_size[1] / orig_size[1]).astype(np.int)
out_center_y = (orig_center_y * out_size[0] / orig_size[0]).astype(np.int)
# Place UV crop in full UV map and save.
uv_map = place_crop(uv_crop, uv_map, out_center_x, out_center_y)
uv_map = Image.fromarray(uv_map.astype('uint8'))
uv_map.save(out_path)
if __name__ == "__main__":
import argparse
arguments = argparse.ArgumentParser()
arguments.add_argument('--dataroot', type=str)
opt = arguments.parse_args()
keypoints_path = os.path.join(opt.dataroot, 'keypoints.json')
metadata_path = os.path.join(opt.dataroot, 'metadata.json')
uvdir = os.path.join(opt.dataroot, 'kp2uv/test_latest/images')
outdir = os.path.join(opt.dataroot, 'iuv')
crop2full(keypoints_path, metadata_path, uvdir, outdir)
|
[
"sys.stdout.write",
"json.load",
"argparse.ArgumentParser",
"os.path.basename",
"numpy.zeros",
"os.path.exists",
"PIL.Image.open",
"sys.stdout.flush",
"numpy.array",
"PIL.Image.fromarray",
"os.path.join",
"numpy.concatenate"
] |
[((2186, 2234), 'numpy.array', 'np.array', (["metadata['alphapose_input_size'][::-1]"], {}), "(metadata['alphapose_input_size'][::-1])\n", (2194, 2234), True, 'import numpy as np\n'), ((2250, 2285), 'numpy.array', 'np.array', (["metadata['size_LR'][::-1]"], {}), "(metadata['size_LR'][::-1])\n", (2258, 2285), True, 'import numpy as np\n'), ((5283, 5308), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5306, 5308), False, 'import argparse\n'), ((5415, 5459), 'os.path.join', 'os.path.join', (['opt.dataroot', '"""keypoints.json"""'], {}), "(opt.dataroot, 'keypoints.json')\n", (5427, 5459), False, 'import os\n'), ((5480, 5523), 'os.path.join', 'os.path.join', (['opt.dataroot', '"""metadata.json"""'], {}), "(opt.dataroot, 'metadata.json')\n", (5492, 5523), False, 'import os\n'), ((5536, 5590), 'os.path.join', 'os.path.join', (['opt.dataroot', '"""kp2uv/test_latest/images"""'], {}), "(opt.dataroot, 'kp2uv/test_latest/images')\n", (5548, 5590), False, 'import os\n'), ((5604, 5637), 'os.path.join', 'os.path.join', (['opt.dataroot', '"""iuv"""'], {}), "(opt.dataroot, 'iuv')\n", (5616, 5637), False, 'import os\n'), ((1887, 1899), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1896, 1899), False, 'import json\n'), ((2156, 2168), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2165, 2168), False, 'import json\n'), ((2544, 2582), 'os.path.join', 'os.path.join', (['outdir', 'f"""{layer_i:02d}"""'], {}), "(outdir, f'{layer_i:02d}')\n", (2556, 2582), False, 'import os\n'), ((2754, 2799), 'os.path.join', 'os.path.join', (['outdir', 'f"""{layer_i:02d}"""', 'frame'], {}), "(outdir, f'{layer_i:02d}', frame)\n", (2766, 2799), False, 'import os\n'), ((2812, 2830), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2828, 2830), False, 'import sys\n'), ((2843, 2895), 'sys.stdout.write', 'sys.stdout.write', (["('processing frame %s\\r' % out_path)"], {}), "('processing frame %s\\r' % out_path)\n", (2859, 2895), False, 'import sys\n'), ((2917, 2956), 'numpy.zeros', 'np.zeros', (['[out_size[0], out_size[1], 4]'], {}), '([out_size[0], out_size[1], 4])\n', (2925, 2956), True, 'import numpy as np\n'), ((3893, 3921), 'os.path.exists', 'os.path.exists', (['uv_crop_path'], {}), '(uv_crop_path)\n', (3907, 3921), False, 'import os\n'), ((4262, 4303), 'numpy.concatenate', 'np.concatenate', (['[uv_crop, person_ids]', '(-1)'], {}), '([uv_crop, person_ids], -1)\n', (4276, 4303), True, 'import numpy as np\n'), ((4040, 4063), 'numpy.zeros', 'np.zeros', (['[256, 256, 3]'], {}), '([256, 256, 3])\n', (4048, 4063), True, 'import numpy as np\n'), ((3238, 3269), 'numpy.array', 'np.array', (["skeleton['keypoints']"], {}), "(skeleton['keypoints'])\n", (3246, 3269), True, 'import numpy as np\n'), ((3962, 3986), 'PIL.Image.open', 'Image.open', (['uv_crop_path'], {}), '(uv_crop_path)\n', (3972, 3986), False, 'from PIL import Image\n'), ((4652, 4676), 'PIL.Image.fromarray', 'Image.fromarray', (['uv_crop'], {}), '(uv_crop)\n', (4667, 4676), False, 'from PIL import Image\n'), ((3825, 3851), 'os.path.basename', 'os.path.basename', (['out_path'], {}), '(out_path)\n', (3841, 3851), False, 'import os\n')]
|
import collections
import copy
import datetime
import gc
import time
# import torch
import numpy as np
from util.logconf import logging
log = logging.getLogger(__name__)
# log.setLevel(logging.WARN)
# log.setLevel(logging.INFO)
log.setLevel(logging.DEBUG)
IrcTuple = collections.namedtuple('IrcTuple', ['index', 'row', 'col'])
XyzTuple = collections.namedtuple('XyzTuple', ['x', 'y', 'z'])
def irc2xyz(coord_irc, origin_xyz, vxSize_xyz, direction_a):
cri_a = np.array(coord_irc)[::-1]
origin_a = np.array(origin_xyz)
vxSize_a = np.array(vxSize_xyz)
coords_xyz = (direction_a @ (cri_a * vxSize_a)) + origin_a
# coords_xyz = (direction_a @ (idx * vxSize_a)) + origin_a
return XyzTuple(*coords_xyz)
def xyz2irc(coord_xyz, origin_xyz, vxSize_xyz, direction_a):
origin_a = np.array(origin_xyz)
vxSize_a = np.array(vxSize_xyz)
coord_a = np.array(coord_xyz)
cri_a = ((coord_a - origin_a) @ np.linalg.inv(direction_a)) / vxSize_a
cri_a = np.round(cri_a)
return IrcTuple(int(cri_a[2]), int(cri_a[1]), int(cri_a[0]))
def importstr(module_str, from_=None):
"""
>>> importstr('os')
<module 'os' from '.../os.pyc'>
>>> importstr('math', 'fabs')
<built-in function fabs>
"""
if from_ is None and ':' in module_str:
module_str, from_ = module_str.rsplit(':')
module = __import__(module_str)
for sub_str in module_str.split('.')[1:]:
module = getattr(module, sub_str)
if from_:
try:
return getattr(module, from_)
except:
raise ImportError('{}.{}'.format(module_str, from_))
return module
# class dotdict(dict):
# '''dict where key can be access as attribute d.key -> d[key]'''
# @classmethod
# def deep(cls, dic_obj):
# '''Initialize from dict with deep conversion'''
# return cls(dic_obj).deepConvert()
#
# def __getattr__(self, attr):
# if attr in self:
# return self[attr]
# log.error(sorted(self.keys()))
# raise AttributeError(attr)
# #return self.get(attr, None)
# __setattr__= dict.__setitem__
# __delattr__= dict.__delitem__
#
#
# def __copy__(self):
# return dotdict(self)
#
# def __deepcopy__(self, memo):
# new_dict = dotdict()
# for k, v in self.items():
# new_dict[k] = copy.deepcopy(v, memo)
# return new_dict
#
# # pylint: disable=multiple-statements
# def __getstate__(self): return self.__dict__
# def __setstate__(self, d): self.__dict__.update(d)
#
# def deepConvert(self):
# '''Convert all dicts at all tree levels into dotdict'''
# for k, v in self.items():
# if type(v) is dict: # pylint: disable=unidiomatic-typecheck
# self[k] = dotdict(v)
# self[k].deepConvert()
# try: # try enumerable types
# for m, x in enumerate(v):
# if type(x) is dict: # pylint: disable=unidiomatic-typecheck
# x = dotdict(x)
# x.deepConvert()
# v[m] = x#
# except TypeError:
# pass
# return self
#
# def copy(self):
# # override dict.copy()
# return dotdict(self)
def prhist(ary, prefix_str=None, **kwargs):
if prefix_str is None:
prefix_str = ''
else:
prefix_str += ' '
count_ary, bins_ary = np.histogram(ary, **kwargs)
for i in range(count_ary.shape[0]):
print("{}{:-8.2f}".format(prefix_str, bins_ary[i]), "{:-10}".format(count_ary[i]))
print("{}{:-8.2f}".format(prefix_str, bins_ary[-1]))
# def dumpCuda():
# # small_count = 0
# total_bytes = 0
# size2count_dict = collections.defaultdict(int)
# size2bytes_dict = {}
# for obj in gc.get_objects():
# if isinstance(obj, torch.cuda._CudaBase):
# nbytes = 4
# for n in obj.size():
# nbytes *= n
#
# size2count_dict[tuple([obj.get_device()] + list(obj.size()))] += 1
# size2bytes_dict[tuple([obj.get_device()] + list(obj.size()))] = nbytes
#
# total_bytes += nbytes
#
# # print(small_count, "tensors equal to or less than than 16 bytes")
# for size, count in sorted(size2count_dict.items(), key=lambda sc: (size2bytes_dict[sc[0]] * sc[1], sc[1], sc[0])):
# print('{:4}x'.format(count), '{:10,}'.format(size2bytes_dict[size]), size)
# print('{:10,}'.format(total_bytes), "total bytes")
def enumerateWithEstimate(
iter,
desc_str,
start_ndx=0,
print_ndx=4,
backoff=None,
iter_len=None,
):
"""
In terms of behavior, `enumerateWithEstimate` is almost identical
to the standard `enumerate` (the differences are things like how
our function returns a generator, while `enumerate` returns a
specialized `<enumerate object at 0x...>`).
However, the side effects (logging, specifically) are what make the
function interesting.
:param iter: `iter` is the iterable that will be passed into
`enumerate`. Required.
:param desc_str: This is a human-readable string that describes
what the loop is doing. The value is arbitrary, but should be
kept reasonably short. Things like `"epoch 4 training"` or
`"deleting temp files"` or similar would all make sense.
:param start_ndx: This parameter defines how many iterations of the
loop should be skipped before timing actually starts. Skipping
a few iterations can be useful if there are startup costs like
caching that are only paid early on, resulting in a skewed
average when those early iterations dominate the average time
per iteration.
NOTE: Using `start_ndx` to skip some iterations makes the time
spent performing those iterations not be included in the
displayed duration. Please account for this if you use the
displayed duration for anything formal.
This parameter defaults to `0`.
:param print_ndx: determines which loop interation that the timing
logging will start on. The intent is that we don't start
logging until we've given the loop a few iterations to let the
average time-per-iteration a chance to stablize a bit. We
require that `print_ndx` not be less than `start_ndx` times
`backoff`, since `start_ndx` greater than `0` implies that the
early N iterations are unstable from a timing perspective.
`print_ndx` defaults to `4`.
:param backoff: This is used to how many iterations to skip before
logging again. Frequent logging is less interesting later on,
so by default we double the gap between logging messages each
time after the first.
`backoff` defaults to `2` unless iter_len is > 1000, in which
case it defaults to `4`.
:param iter_len: Since we need to know the number of items to
estimate when the loop will finish, that can be provided by
passing in a value for `iter_len`. If a value isn't provided,
then it will be set by using the value of `len(iter)`.
:return:
"""
if iter_len is None:
iter_len = len(iter)
if backoff is None:
backoff = 2
while backoff ** 7 < iter_len:
backoff *= 2
assert backoff >= 2
while print_ndx < start_ndx * backoff:
print_ndx *= backoff
log.warning("{} ----/{}, starting".format(
desc_str,
iter_len,
))
start_ts = time.time()
for (current_ndx, item) in enumerate(iter):
yield (current_ndx, item)
if current_ndx == print_ndx:
# ... <1>
duration_sec = ((time.time() - start_ts)
/ (current_ndx - start_ndx + 1)
* (iter_len-start_ndx)
)
done_dt = datetime.datetime.fromtimestamp(start_ts + duration_sec)
done_td = datetime.timedelta(seconds=duration_sec)
log.info("{} {:-4}/{}, done at {}, {}".format(
desc_str,
current_ndx,
iter_len,
str(done_dt).rsplit('.', 1)[0],
str(done_td).rsplit('.', 1)[0],
))
print_ndx *= backoff
if current_ndx + 1 == start_ndx:
start_ts = time.time()
log.warning("{} ----/{}, done at {}".format(
desc_str,
iter_len,
str(datetime.datetime.now()).rsplit('.', 1)[0],
))
#
# try:
# import matplotlib
# matplotlib.use('agg', warn=False)
#
# import matplotlib.pyplot as plt
# # matplotlib color maps
# cdict = {'red': ((0.0, 1.0, 1.0),
# # (0.5, 1.0, 1.0),
# (1.0, 1.0, 1.0)),
#
# 'green': ((0.0, 0.0, 0.0),
# (0.5, 0.0, 0.0),
# (1.0, 0.5, 0.5)),
#
# 'blue': ((0.0, 0.0, 0.0),
# # (0.5, 0.5, 0.5),
# # (0.75, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
#
# 'alpha': ((0.0, 0.0, 0.0),
# (0.75, 0.5, 0.5),
# (1.0, 0.5, 0.5))}
#
# plt.register_cmap(name='mask', data=cdict)
#
# cdict = {'red': ((0.0, 0.0, 0.0),
# (0.25, 1.0, 1.0),
# (1.0, 1.0, 1.0)),
#
# 'green': ((0.0, 1.0, 1.0),
# (0.25, 1.0, 1.0),
# (0.5, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
#
# 'blue': ((0.0, 0.0, 0.0),
# # (0.5, 0.5, 0.5),
# # (0.75, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
#
# 'alpha': ((0.0, 0.15, 0.15),
# (0.5, 0.3, 0.3),
# (0.8, 0.0, 0.0),
# (1.0, 0.0, 0.0))}
#
# plt.register_cmap(name='maskinvert', data=cdict)
# except ImportError:
# pass
|
[
"datetime.datetime.now",
"time.time",
"util.logconf.logging.getLogger",
"numpy.histogram",
"numpy.array",
"collections.namedtuple",
"numpy.linalg.inv",
"datetime.datetime.fromtimestamp",
"datetime.timedelta",
"numpy.round"
] |
[((144, 171), 'util.logconf.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (161, 171), False, 'from util.logconf import logging\n'), ((270, 329), 'collections.namedtuple', 'collections.namedtuple', (['"""IrcTuple"""', "['index', 'row', 'col']"], {}), "('IrcTuple', ['index', 'row', 'col'])\n", (292, 329), False, 'import collections\n'), ((341, 392), 'collections.namedtuple', 'collections.namedtuple', (['"""XyzTuple"""', "['x', 'y', 'z']"], {}), "('XyzTuple', ['x', 'y', 'z'])\n", (363, 392), False, 'import collections\n'), ((508, 528), 'numpy.array', 'np.array', (['origin_xyz'], {}), '(origin_xyz)\n', (516, 528), True, 'import numpy as np\n'), ((544, 564), 'numpy.array', 'np.array', (['vxSize_xyz'], {}), '(vxSize_xyz)\n', (552, 564), True, 'import numpy as np\n'), ((801, 821), 'numpy.array', 'np.array', (['origin_xyz'], {}), '(origin_xyz)\n', (809, 821), True, 'import numpy as np\n'), ((837, 857), 'numpy.array', 'np.array', (['vxSize_xyz'], {}), '(vxSize_xyz)\n', (845, 857), True, 'import numpy as np\n'), ((872, 891), 'numpy.array', 'np.array', (['coord_xyz'], {}), '(coord_xyz)\n', (880, 891), True, 'import numpy as np\n'), ((979, 994), 'numpy.round', 'np.round', (['cri_a'], {}), '(cri_a)\n', (987, 994), True, 'import numpy as np\n'), ((3456, 3483), 'numpy.histogram', 'np.histogram', (['ary'], {}), '(ary, **kwargs)\n', (3468, 3483), True, 'import numpy as np\n'), ((7606, 7617), 'time.time', 'time.time', ([], {}), '()\n', (7615, 7617), False, 'import time\n'), ((467, 486), 'numpy.array', 'np.array', (['coord_irc'], {}), '(coord_irc)\n', (475, 486), True, 'import numpy as np\n'), ((928, 954), 'numpy.linalg.inv', 'np.linalg.inv', (['direction_a'], {}), '(direction_a)\n', (941, 954), True, 'import numpy as np\n'), ((7976, 8032), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(start_ts + duration_sec)'], {}), '(start_ts + duration_sec)\n', (8007, 8032), False, 'import datetime\n'), ((8055, 8095), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'duration_sec'}), '(seconds=duration_sec)\n', (8073, 8095), False, 'import datetime\n'), ((8447, 8458), 'time.time', 'time.time', ([], {}), '()\n', (8456, 8458), False, 'import time\n'), ((7788, 7799), 'time.time', 'time.time', ([], {}), '()\n', (7797, 7799), False, 'import time\n'), ((8557, 8580), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8578, 8580), False, 'import datetime\n')]
|
import os
import sys
import argparse
import datetime
import time
import os.path as osp
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
import datasets
import models
from utils import AverageMeter, Logger
from center_loss import CenterLoss
parser = argparse.ArgumentParser("Center Loss Example")
# dataset
parser.add_argument('-d', '--dataset', type=str, default='mnist', choices=['mnist'])
parser.add_argument('-j', '--workers', default=4, type=int,
help="number of data loading workers (default: 4)")
# optimization
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--lr-model', type=float, default=0.001, help="learning rate for model")
parser.add_argument('--lr-cent', type=float, default=0.5, help="learning rate for center loss")
parser.add_argument('--weight-cent', type=float, default=1, help="weight for center loss")
parser.add_argument('--max-epoch', type=int, default=100)
parser.add_argument('--stepsize', type=int, default=20)
parser.add_argument('--gamma', type=float, default=0.5, help="learning rate decay")
# model
parser.add_argument('--model', type=str, default='cnn')
# misc
parser.add_argument('--eval-freq', type=int, default=10)
parser.add_argument('--print-freq', type=int, default=50)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use-cpu', action='store_true')
parser.add_argument('--save-dir', type=str, default='log')
parser.add_argument('--plot', action='store_true', help="whether to plot features for every epoch")
args = parser.parse_args()
def main():
torch.manual_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
sys.stdout = Logger(osp.join(args.save_dir, 'log_' + args.dataset + '.txt'))
if use_gpu:
print("Currently using GPU: {}".format(args.gpu))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU")
print("Creating dataset: {}".format(args.dataset))
dataset = datasets.create(
name=args.dataset, batch_size=args.batch_size, use_gpu=use_gpu,
num_workers=args.workers,
)
trainloader, testloader = dataset.trainloader, dataset.testloader
print("Creating model: {}".format(args.model))
model = models.create(name=args.model, num_classes=dataset.num_classes)
if use_gpu:
model = nn.DataParallel(model).cuda()
criterion_xent = nn.CrossEntropyLoss()
criterion_cent = CenterLoss(num_classes=dataset.num_classes, feat_dim=2, use_gpu=use_gpu)
optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9)
optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent)
if args.stepsize > 0:
scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)
start_time = time.time()
for epoch in range(args.max_epoch):
print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
train(model, criterion_xent, criterion_cent,
optimizer_model, optimizer_centloss,
trainloader, use_gpu, dataset.num_classes, epoch)
if args.stepsize > 0: scheduler.step()
if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
print("==> Test")
acc, err = test(model, testloader, use_gpu, dataset.num_classes, epoch)
print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
def train(model, criterion_xent, criterion_cent,
optimizer_model, optimizer_centloss,
trainloader, use_gpu, num_classes, epoch):
model.train()
xent_losses = AverageMeter()
cent_losses = AverageMeter()
losses = AverageMeter()
if args.plot:
all_features, all_labels = [], []
for batch_idx, (data, labels) in enumerate(trainloader):
if use_gpu:
data, labels = data.cuda(), labels.cuda()
features, outputs = model(data)
loss_xent = criterion_xent(outputs, labels)
loss_cent = criterion_cent(features, labels)
loss_cent *= args.weight_cent
loss = loss_xent + loss_cent
optimizer_model.zero_grad()
optimizer_centloss.zero_grad()
loss.backward()
optimizer_model.step()
# by doing so, weight_cent would not impact on the learning of centers
for param in criterion_cent.parameters():
param.grad.data *= (1. / args.weight_cent)
optimizer_centloss.step()
losses.update(loss.item(), labels.size(0))
xent_losses.update(loss_xent.item(), labels.size(0))
cent_losses.update(loss_cent.item(), labels.size(0))
if args.plot:
if use_gpu:
all_features.append(features.data.cpu().numpy())
all_labels.append(labels.data.cpu().numpy())
else:
all_features.append(features.data.numpy())
all_labels.append(labels.data.numpy())
if (batch_idx+1) % args.print_freq == 0:
print("Batch {}/{}\t Loss {:.6f} ({:.6f}) XentLoss {:.6f} ({:.6f}) CenterLoss {:.6f} ({:.6f})" \
.format(batch_idx+1, len(trainloader), losses.val, losses.avg, xent_losses.val, xent_losses.avg, cent_losses.val, cent_losses.avg))
if args.plot:
all_features = np.concatenate(all_features, 0)
all_labels = np.concatenate(all_labels, 0)
plot_features(all_features, all_labels, num_classes, epoch, prefix='train')
def test(model, testloader, use_gpu, num_classes, epoch):
model.eval()
correct, total = 0, 0
if args.plot:
all_features, all_labels = [], []
with torch.no_grad():
for data, labels in testloader:
if use_gpu:
data, labels = data.cuda(), labels.cuda()
features, outputs = model(data)
predictions = outputs.data.max(1)[1]
total += labels.size(0)
correct += (predictions == labels.data).sum()
if args.plot:
if use_gpu:
all_features.append(features.data.cpu().numpy())
all_labels.append(labels.data.cpu().numpy())
else:
all_features.append(features.data.numpy())
all_labels.append(labels.data.numpy())
if args.plot:
all_features = np.concatenate(all_features, 0)
all_labels = np.concatenate(all_labels, 0)
plot_features(all_features, all_labels, num_classes, epoch, prefix='test')
acc = correct * 100. / total
err = 100. - acc
return acc, err
def plot_features(features, labels, num_classes, epoch, prefix):
"""Plot features on 2D plane.
Args:
features: (num_instances, num_features).
labels: (num_instances).
"""
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
for label_idx in range(num_classes):
plt.scatter(
features[labels==label_idx, 0],
features[labels==label_idx, 1],
c=colors[label_idx],
s=1,
)
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='upper right')
dirname = osp.join(args.save_dir, prefix)
if not osp.exists(dirname):
os.mkdir(dirname)
save_name = osp.join(dirname, 'epoch_' + str(epoch+1) + '.png')
plt.savefig(save_name, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
|
[
"os.mkdir",
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"torch.no_grad",
"os.path.join",
"models.create",
"utils.AverageMeter",
"matplotlib.pyplot.close",
"os.path.exists",
"datetime.timedelta",
"datasets.create",
"torch.manual_seed",
"matplotlib.pyplot.legend",
"center_loss.CenterLoss",
"matplotlib.use",
"torch.cuda.is_available",
"numpy.concatenate",
"matplotlib.pyplot.scatter",
"torch.nn.CrossEntropyLoss",
"time.time",
"torch.cuda.manual_seed_all",
"torch.nn.DataParallel",
"matplotlib.pyplot.savefig"
] |
[((105, 126), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (119, 126), False, 'import matplotlib\n'), ((408, 454), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Center Loss Example"""'], {}), "('Center Loss Example')\n", (431, 454), False, 'import argparse\n'), ((1781, 1809), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1798, 1809), False, 'import torch\n'), ((1874, 1899), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1897, 1899), False, 'import torch\n'), ((2288, 2398), 'datasets.create', 'datasets.create', ([], {'name': 'args.dataset', 'batch_size': 'args.batch_size', 'use_gpu': 'use_gpu', 'num_workers': 'args.workers'}), '(name=args.dataset, batch_size=args.batch_size, use_gpu=\n use_gpu, num_workers=args.workers)\n', (2303, 2398), False, 'import datasets\n'), ((2552, 2615), 'models.create', 'models.create', ([], {'name': 'args.model', 'num_classes': 'dataset.num_classes'}), '(name=args.model, num_classes=dataset.num_classes)\n', (2565, 2615), False, 'import models\n'), ((2701, 2722), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2720, 2722), True, 'import torch.nn as nn\n'), ((2744, 2816), 'center_loss.CenterLoss', 'CenterLoss', ([], {'num_classes': 'dataset.num_classes', 'feat_dim': '(2)', 'use_gpu': 'use_gpu'}), '(num_classes=dataset.num_classes, feat_dim=2, use_gpu=use_gpu)\n', (2754, 2816), False, 'from center_loss import CenterLoss\n'), ((3159, 3170), 'time.time', 'time.time', ([], {}), '()\n', (3168, 3170), False, 'import time\n'), ((4140, 4154), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4152, 4154), False, 'from utils import AverageMeter, Logger\n'), ((4173, 4187), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4185, 4187), False, 'from utils import AverageMeter, Logger\n'), ((4201, 4215), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4213, 4215), False, 'from utils import AverageMeter, Logger\n'), ((7606, 7692), 'matplotlib.pyplot.legend', 'plt.legend', (["['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']"], {'loc': '"""upper right"""'}), "(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc=\n 'upper right')\n", (7616, 7692), True, 'from matplotlib import pyplot as plt\n'), ((7702, 7733), 'os.path.join', 'osp.join', (['args.save_dir', 'prefix'], {}), '(args.save_dir, prefix)\n', (7710, 7733), True, 'import os.path as osp\n'), ((7864, 7907), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_name'], {'bbox_inches': '"""tight"""'}), "(save_name, bbox_inches='tight')\n", (7875, 7907), True, 'from matplotlib import pyplot as plt\n'), ((7912, 7923), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7921, 7923), True, 'from matplotlib import pyplot as plt\n'), ((1962, 2017), 'os.path.join', 'osp.join', (['args.save_dir', "('log_' + args.dataset + '.txt')"], {}), "(args.save_dir, 'log_' + args.dataset + '.txt')\n", (1970, 2017), True, 'import os.path as osp\n'), ((2133, 2170), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (2159, 2170), False, 'import torch\n'), ((3061, 3140), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer_model'], {'step_size': 'args.stepsize', 'gamma': 'args.gamma'}), '(optimizer_model, step_size=args.stepsize, gamma=args.gamma)\n', (3080, 3140), False, 'from torch.optim import lr_scheduler\n'), ((3847, 3882), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'elapsed'}), '(seconds=elapsed)\n', (3865, 3882), False, 'import datetime\n'), ((5823, 5854), 'numpy.concatenate', 'np.concatenate', (['all_features', '(0)'], {}), '(all_features, 0)\n', (5837, 5854), True, 'import numpy as np\n'), ((5876, 5905), 'numpy.concatenate', 'np.concatenate', (['all_labels', '(0)'], {}), '(all_labels, 0)\n', (5890, 5905), True, 'import numpy as np\n'), ((6162, 6177), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6175, 6177), False, 'import torch\n'), ((6875, 6906), 'numpy.concatenate', 'np.concatenate', (['all_features', '(0)'], {}), '(all_features, 0)\n', (6889, 6906), True, 'import numpy as np\n'), ((6928, 6957), 'numpy.concatenate', 'np.concatenate', (['all_labels', '(0)'], {}), '(all_labels, 0)\n', (6942, 6957), True, 'import numpy as np\n'), ((7441, 7550), 'matplotlib.pyplot.scatter', 'plt.scatter', (['features[labels == label_idx, 0]', 'features[labels == label_idx, 1]'], {'c': 'colors[label_idx]', 's': '(1)'}), '(features[labels == label_idx, 0], features[labels == label_idx,\n 1], c=colors[label_idx], s=1)\n', (7452, 7550), True, 'from matplotlib import pyplot as plt\n'), ((7745, 7764), 'os.path.exists', 'osp.exists', (['dirname'], {}), '(dirname)\n', (7755, 7764), True, 'import os.path as osp\n'), ((7774, 7791), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (7782, 7791), False, 'import os\n'), ((3803, 3814), 'time.time', 'time.time', ([], {}), '()\n', (3812, 3814), False, 'import time\n'), ((2649, 2671), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (2664, 2671), True, 'import torch.nn as nn\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#read in ipsilateral breast labelmap/volume
#mask this patient's breast
#generate histogram of intensity
#DIR to new patient's breast
#expand/dilate region (might need to be manual)
#mask new patient's breast
#generate histogram of intensity
# In[2]:
#import modules
import SimpleITK as sitk
from platipy.imaging.visualisation.tools import ImageVisualiser
from platipy.imaging.utils.tools import get_com
import matplotlib.pyplot as plt
import numpy as np
get_ipython().run_line_magic('matplotlib', 'notebook')
# In[3]:
R_breast=sitk.ReadImage("/home/alicja/Downloads/Segmentation.nii.gz")
# In[4]:
WES_010_4_B50T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz")
WES_010_4_B800T=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz")
# In[5]:
masked_R_breast = sitk.Mask(WES_010_4_B50T, R_breast)
# In[10]:
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(200,900,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[7]:
#Use these values to do thresholding
def estimate_tumour_vol(img_mri, lowerthreshold=300, upperthreshold=3000, hole_size=1):
label_threshold = sitk.BinaryThreshold(img_mri, lowerThreshold=lowerthreshold, upperThreshold=upperthreshold)
label_threshold_cc = sitk.RelabelComponent(sitk.ConnectedComponent(label_threshold))
label_threshold_cc_x = (label_threshold_cc==1)
label_threshold_cc_x_f = sitk.BinaryMorphologicalClosing(label_threshold_cc_x, (hole_size,hole_size,hole_size))
return(label_threshold_cc_x_f)
# In[12]:
image_mri=WES_010_4_B50T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=720, upperthreshold=3000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_4_B50T_hist.nii.gz") #works well
# In[35]:
np.max(label_threshold_cc_x_f)
# In[60]:
masked_R_breast_B800T = sitk.Mask(WES_010_4_B800T, R_breast)
values = sitk.GetArrayViewFromImage(masked_R_breast_B800T).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,600,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[61]:
image_mri=WES_010_4_B800T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=300, upperthreshold=3000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_4_B800T_hist.nii.gz") #works super well
# In[15]:
WES_010_4_T2w=sitk.ReadImage("/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz")
# In[63]:
WES_010_4_T2w=sitk.Resample(WES_010_4_T2w, WES_010_4_B50T)
masked_R_breast_T2w = sitk.Mask(WES_010_4_T2w, R_breast)
values = sitk.GetArrayViewFromImage(masked_R_breast_T2w).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,300,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[64]:
image_mri=WES_010_4_T2w
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=170, upperthreshold=3000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_4_T2w_hist.nii.gz") #works well too
# In[13]:
WES_010_4_MPE=sitk.ReadImage("MPE_sub_WES_010_4.nii.gz")
# In[17]:
WES_010_4_MPE=sitk.Resample(WES_010_4_MPE, WES_010_4_B50T)
masked_R_breast_MPE = sitk.Mask(WES_010_4_MPE, R_breast)
values = sitk.GetArrayViewFromImage(masked_R_breast_MPE).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(150,450,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[20]:
image_mri=WES_010_4_MPE
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=230, upperthreshold=3000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_010_4_MPE_hist.nii.gz") #good
# In[ ]:
# In[65]:
from platipy.imaging.visualisation.tools import ImageVisualiser
from platipy.imaging.registration.registration import (
initial_registration,
fast_symmetric_forces_demons_registration,
transform_propagation,
apply_field
)
# In[66]:
#DIR to Patient 8
WES_008_4_B50T=sitk.ReadImage("/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz")
image_to_0_rigid, tfm_to_0_rigid = initial_registration(
WES_008_4_B50T,
WES_010_4_B50T,
options={
'shrink_factors': [8,4],
'smooth_sigmas': [0,0],
'sampling_rate': 0.5,
'final_interp': 2,
'metric': 'mean_squares',
'optimiser': 'gradient_descent_line_search',
'number_of_iterations': 25},
reg_method='Rigid')
image_to_0_dir, tfm_to_0_dir = fast_symmetric_forces_demons_registration(
WES_008_4_B50T,
image_to_0_rigid,
resolution_staging=[4,2],
iteration_staging=[10,10]
)
R_breast_to_0_rigid = transform_propagation(
WES_008_4_B50T,
R_breast,
tfm_to_0_rigid,
structure=True
)
R_breast_to_0_dir = apply_field(
R_breast_to_0_rigid,
tfm_to_0_dir,
structure=True
)
# In[67]:
vis = ImageVisualiser(WES_008_4_B50T, axis='z', cut=get_com(R_breast_to_0_dir), window=[-250, 500])
vis.add_contour(R_breast_to_0_dir, name='BREAST', color='g')
fig = vis.show()
# In[78]:
breast_contour_dilate=sitk.BinaryDilate(R_breast_to_0_dir, (2,2,2))
# In[79]:
vis = ImageVisualiser(WES_008_4_B50T, axis='z', cut=get_com(R_breast_to_0_dir), window=[-250, 500])
vis.add_contour(breast_contour_dilate, name='BREAST', color='g')
fig = vis.show()
# In[80]:
masked_R_breast = sitk.Mask(WES_008_4_B50T, breast_contour_dilate)
# In[92]:
values = sitk.GetArrayViewFromImage(masked_R_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(200,3000,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[93]:
image_mri=WES_008_4_B50T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=1400, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_008_4_B50T_hist.nii.gz") #good but seems to contain
#fibroglandular tissue as well
# In[95]:
WES_008_4_B800T=sitk.ReadImage("/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz")
WES_008_4_T2w=sitk.ReadImage("/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz")
# In[96]:
masked_R_breast_B800T = sitk.Mask(WES_008_4_B800T, breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_R_breast_B800T).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,600,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[99]:
image_mri=WES_008_4_B800T
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=480, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_008_4_B800T_hist.nii.gz") #good
# In[104]:
WES_008_4_T2w=sitk.Resample(WES_008_4_T2w,WES_008_4_B800T)
masked_R_breast_T2w = sitk.Mask(WES_008_4_T2w, breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_R_breast_T2w).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(1,250,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[105]:
image_mri=WES_008_4_T2w
arr_mri = sitk.GetArrayFromImage(image_mri)
arr_mri[:,:,arr_mri.shape[2]//2:] = 0
image_mri_masked=sitk.GetImageFromArray(arr_mri)
image_mri_masked.CopyInformation(image_mri)
label_threshold_cc_x_f=estimate_tumour_vol(image_mri_masked, lowerthreshold=197, upperthreshold=5000, hole_size=1)
sitk.WriteImage(label_threshold_cc_x_f,"test_label_threshold_008_4_T2w_hist.nii.gz") #okay but picks up
#fibroglandular tissue
# In[106]:
L_breast=sitk.ReadImage("contralateral_segmentation.nii.gz")
# In[107]:
L_breast_to_0_rigid = transform_propagation(
WES_008_4_B50T,
L_breast,
tfm_to_0_rigid,
structure=True
)
L_breast_to_0_dir = apply_field(
L_breast_to_0_rigid,
tfm_to_0_dir,
structure=True
)
# In[110]:
L_breast_contour_dilate=sitk.BinaryDilate(L_breast_to_0_dir, (4,4,4))
vis = ImageVisualiser(WES_008_4_B50T, axis='z', cut=get_com(L_breast_to_0_dir), window=[-250, 500])
vis.add_contour(L_breast_contour_dilate, name='BREAST', color='g')
fig = vis.show()
# In[111]:
masked_L_breast = sitk.Mask(WES_008_4_B50T, L_breast_contour_dilate)
values = sitk.GetArrayViewFromImage(masked_L_breast).flatten()
fig, ax = plt.subplots(1,1)
ax.hist(values, bins=np.linspace(200,3000,50), histtype='stepfilled', lw=2)
#ax.set_yscale('log')
ax.grid()
ax.set_axisbelow(True)
ax.set_xlabel('Intensity')
ax.set_ylabel('Frequency')
fig.show()
# In[ ]:
|
[
"SimpleITK.BinaryThreshold",
"SimpleITK.Resample",
"platipy.imaging.registration.registration.fast_symmetric_forces_demons_registration",
"SimpleITK.ConnectedComponent",
"SimpleITK.GetArrayViewFromImage",
"SimpleITK.ReadImage",
"SimpleITK.GetArrayFromImage",
"numpy.max",
"SimpleITK.BinaryMorphologicalClosing",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"platipy.imaging.utils.tools.get_com",
"SimpleITK.Mask",
"SimpleITK.WriteImage",
"platipy.imaging.registration.registration.initial_registration",
"platipy.imaging.registration.registration.apply_field",
"SimpleITK.BinaryDilate",
"SimpleITK.GetImageFromArray",
"platipy.imaging.registration.registration.transform_propagation"
] |
[((588, 648), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/home/alicja/Downloads/Segmentation.nii.gz"""'], {}), "('/home/alicja/Downloads/Segmentation.nii.gz')\n", (602, 648), True, 'import SimpleITK as sitk\n'), ((677, 850), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz"""'], {}), "(\n '/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz'\n )\n", (691, 850), True, 'import SimpleITK as sitk\n'), ((857, 1031), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz"""'], {}), "(\n '/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz'\n )\n", (871, 1031), True, 'import SimpleITK as sitk\n'), ((1053, 1088), 'SimpleITK.Mask', 'sitk.Mask', (['WES_010_4_B50T', 'R_breast'], {}), '(WES_010_4_B50T, R_breast)\n', (1062, 1088), True, 'import SimpleITK as sitk\n'), ((1177, 1195), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1189, 1195), True, 'import matplotlib.pyplot as plt\n'), ((1982, 2015), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image_mri'], {}), '(image_mri)\n', (2004, 2015), True, 'import SimpleITK as sitk\n'), ((2071, 2102), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['arr_mri'], {}), '(arr_mri)\n', (2093, 2102), True, 'import SimpleITK as sitk\n'), ((2264, 2354), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['label_threshold_cc_x_f', '"""test_label_threshold_010_4_B50T_hist.nii.gz"""'], {}), "(label_threshold_cc_x_f,\n 'test_label_threshold_010_4_B50T_hist.nii.gz')\n", (2279, 2354), True, 'import SimpleITK as sitk\n'), ((2376, 2406), 'numpy.max', 'np.max', (['label_threshold_cc_x_f'], {}), '(label_threshold_cc_x_f)\n', (2382, 2406), True, 'import numpy as np\n'), ((2445, 2481), 'SimpleITK.Mask', 'sitk.Mask', (['WES_010_4_B800T', 'R_breast'], {}), '(WES_010_4_B800T, R_breast)\n', (2454, 2481), True, 'import SimpleITK as sitk\n'), ((2562, 2580), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2574, 2580), True, 'import matplotlib.pyplot as plt\n'), ((2823, 2856), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image_mri'], {}), '(image_mri)\n', (2845, 2856), True, 'import SimpleITK as sitk\n'), ((2912, 2943), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['arr_mri'], {}), '(arr_mri)\n', (2934, 2943), True, 'import SimpleITK as sitk\n'), ((3105, 3196), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['label_threshold_cc_x_f', '"""test_label_threshold_010_4_B800T_hist.nii.gz"""'], {}), "(label_threshold_cc_x_f,\n 'test_label_threshold_010_4_B800T_hist.nii.gz')\n", (3120, 3196), True, 'import SimpleITK as sitk\n'), ((3238, 3378), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz"""'], {}), "(\n '/home/alicja/Documents/WES_010/IMAGES/WES_010_4_20180829_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz'\n )\n", (3252, 3378), True, 'import SimpleITK as sitk\n'), ((3397, 3441), 'SimpleITK.Resample', 'sitk.Resample', (['WES_010_4_T2w', 'WES_010_4_B50T'], {}), '(WES_010_4_T2w, WES_010_4_B50T)\n', (3410, 3441), True, 'import SimpleITK as sitk\n'), ((3464, 3498), 'SimpleITK.Mask', 'sitk.Mask', (['WES_010_4_T2w', 'R_breast'], {}), '(WES_010_4_T2w, R_breast)\n', (3473, 3498), True, 'import SimpleITK as sitk\n'), ((3577, 3595), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3589, 3595), True, 'import matplotlib.pyplot as plt\n'), ((3836, 3869), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image_mri'], {}), '(image_mri)\n', (3858, 3869), True, 'import SimpleITK as sitk\n'), ((3925, 3956), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['arr_mri'], {}), '(arr_mri)\n', (3947, 3956), True, 'import SimpleITK as sitk\n'), ((4118, 4207), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['label_threshold_cc_x_f', '"""test_label_threshold_010_4_T2w_hist.nii.gz"""'], {}), "(label_threshold_cc_x_f,\n 'test_label_threshold_010_4_T2w_hist.nii.gz')\n", (4133, 4207), True, 'import SimpleITK as sitk\n'), ((4247, 4289), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""MPE_sub_WES_010_4.nii.gz"""'], {}), "('MPE_sub_WES_010_4.nii.gz')\n", (4261, 4289), True, 'import SimpleITK as sitk\n'), ((4318, 4362), 'SimpleITK.Resample', 'sitk.Resample', (['WES_010_4_MPE', 'WES_010_4_B50T'], {}), '(WES_010_4_MPE, WES_010_4_B50T)\n', (4331, 4362), True, 'import SimpleITK as sitk\n'), ((4385, 4419), 'SimpleITK.Mask', 'sitk.Mask', (['WES_010_4_MPE', 'R_breast'], {}), '(WES_010_4_MPE, R_breast)\n', (4394, 4419), True, 'import SimpleITK as sitk\n'), ((4498, 4516), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4510, 4516), True, 'import matplotlib.pyplot as plt\n'), ((4759, 4792), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image_mri'], {}), '(image_mri)\n', (4781, 4792), True, 'import SimpleITK as sitk\n'), ((4848, 4879), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['arr_mri'], {}), '(arr_mri)\n', (4870, 4879), True, 'import SimpleITK as sitk\n'), ((5041, 5130), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['label_threshold_cc_x_f', '"""test_label_threshold_010_4_MPE_hist.nii.gz"""'], {}), "(label_threshold_cc_x_f,\n 'test_label_threshold_010_4_MPE_hist.nii.gz')\n", (5056, 5130), True, 'import SimpleITK as sitk\n'), ((5446, 5619), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz"""'], {}), "(\n '/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B50T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz'\n )\n", (5460, 5619), True, 'import SimpleITK as sitk\n'), ((5645, 5936), 'platipy.imaging.registration.registration.initial_registration', 'initial_registration', (['WES_008_4_B50T', 'WES_010_4_B50T'], {'options': "{'shrink_factors': [8, 4], 'smooth_sigmas': [0, 0], 'sampling_rate': 0.5,\n 'final_interp': 2, 'metric': 'mean_squares', 'optimiser':\n 'gradient_descent_line_search', 'number_of_iterations': 25}", 'reg_method': '"""Rigid"""'}), "(WES_008_4_B50T, WES_010_4_B50T, options={\n 'shrink_factors': [8, 4], 'smooth_sigmas': [0, 0], 'sampling_rate': 0.5,\n 'final_interp': 2, 'metric': 'mean_squares', 'optimiser':\n 'gradient_descent_line_search', 'number_of_iterations': 25}, reg_method\n ='Rigid')\n", (5665, 5936), False, 'from platipy.imaging.registration.registration import initial_registration, fast_symmetric_forces_demons_registration, transform_propagation, apply_field\n'), ((6023, 6157), 'platipy.imaging.registration.registration.fast_symmetric_forces_demons_registration', 'fast_symmetric_forces_demons_registration', (['WES_008_4_B50T', 'image_to_0_rigid'], {'resolution_staging': '[4, 2]', 'iteration_staging': '[10, 10]'}), '(WES_008_4_B50T, image_to_0_rigid,\n resolution_staging=[4, 2], iteration_staging=[10, 10])\n', (6064, 6157), False, 'from platipy.imaging.registration.registration import initial_registration, fast_symmetric_forces_demons_registration, transform_propagation, apply_field\n'), ((6193, 6272), 'platipy.imaging.registration.registration.transform_propagation', 'transform_propagation', (['WES_008_4_B50T', 'R_breast', 'tfm_to_0_rigid'], {'structure': '(True)'}), '(WES_008_4_B50T, R_breast, tfm_to_0_rigid, structure=True)\n', (6214, 6272), False, 'from platipy.imaging.registration.registration import initial_registration, fast_symmetric_forces_demons_registration, transform_propagation, apply_field\n'), ((6312, 6374), 'platipy.imaging.registration.registration.apply_field', 'apply_field', (['R_breast_to_0_rigid', 'tfm_to_0_dir'], {'structure': '(True)'}), '(R_breast_to_0_rigid, tfm_to_0_dir, structure=True)\n', (6323, 6374), False, 'from platipy.imaging.registration.registration import initial_registration, fast_symmetric_forces_demons_registration, transform_propagation, apply_field\n'), ((6617, 6664), 'SimpleITK.BinaryDilate', 'sitk.BinaryDilate', (['R_breast_to_0_dir', '(2, 2, 2)'], {}), '(R_breast_to_0_dir, (2, 2, 2))\n', (6634, 6664), True, 'import SimpleITK as sitk\n'), ((6891, 6939), 'SimpleITK.Mask', 'sitk.Mask', (['WES_008_4_B50T', 'breast_contour_dilate'], {}), '(WES_008_4_B50T, breast_contour_dilate)\n', (6900, 6939), True, 'import SimpleITK as sitk\n'), ((7028, 7046), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (7040, 7046), True, 'import matplotlib.pyplot as plt\n'), ((7291, 7324), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image_mri'], {}), '(image_mri)\n', (7313, 7324), True, 'import SimpleITK as sitk\n'), ((7380, 7411), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['arr_mri'], {}), '(arr_mri)\n', (7402, 7411), True, 'import SimpleITK as sitk\n'), ((7574, 7664), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['label_threshold_cc_x_f', '"""test_label_threshold_008_4_B50T_hist.nii.gz"""'], {}), "(label_threshold_cc_x_f,\n 'test_label_threshold_008_4_B50T_hist.nii.gz')\n", (7589, 7664), True, 'import SimpleITK as sitk\n'), ((7748, 7922), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz"""'], {}), "(\n '/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_EP2D_DIFF_TRA_SPAIR_ZOOMIT_EZ_B800T_EP2D_DIFF_TRA_SPAIR_ZOOMIT_TRACEW_DFC_MIX_5.nii.gz'\n )\n", (7762, 7922), True, 'import SimpleITK as sitk\n'), ((7927, 8067), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz"""'], {}), "(\n '/home/alicja/Documents/WES_008/IMAGES/WES_008_4_20180619_MR_T2_TSE_TRA_SPAIR_TSE2D1_11_T2_TSE_TRA_SPAIR_3.nii.gz'\n )\n", (7941, 8067), True, 'import SimpleITK as sitk\n'), ((8096, 8145), 'SimpleITK.Mask', 'sitk.Mask', (['WES_008_4_B800T', 'breast_contour_dilate'], {}), '(WES_008_4_B800T, breast_contour_dilate)\n', (8105, 8145), True, 'import SimpleITK as sitk\n'), ((8226, 8244), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (8238, 8244), True, 'import matplotlib.pyplot as plt\n'), ((8487, 8520), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image_mri'], {}), '(image_mri)\n', (8509, 8520), True, 'import SimpleITK as sitk\n'), ((8576, 8607), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['arr_mri'], {}), '(arr_mri)\n', (8598, 8607), True, 'import SimpleITK as sitk\n'), ((8769, 8860), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['label_threshold_cc_x_f', '"""test_label_threshold_008_4_B800T_hist.nii.gz"""'], {}), "(label_threshold_cc_x_f,\n 'test_label_threshold_008_4_B800T_hist.nii.gz')\n", (8784, 8860), True, 'import SimpleITK as sitk\n'), ((8891, 8936), 'SimpleITK.Resample', 'sitk.Resample', (['WES_008_4_T2w', 'WES_008_4_B800T'], {}), '(WES_008_4_T2w, WES_008_4_B800T)\n', (8904, 8936), True, 'import SimpleITK as sitk\n'), ((8959, 9006), 'SimpleITK.Mask', 'sitk.Mask', (['WES_008_4_T2w', 'breast_contour_dilate'], {}), '(WES_008_4_T2w, breast_contour_dilate)\n', (8968, 9006), True, 'import SimpleITK as sitk\n'), ((9085, 9103), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (9097, 9103), True, 'import matplotlib.pyplot as plt\n'), ((9345, 9378), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image_mri'], {}), '(image_mri)\n', (9367, 9378), True, 'import SimpleITK as sitk\n'), ((9434, 9465), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['arr_mri'], {}), '(arr_mri)\n', (9456, 9465), True, 'import SimpleITK as sitk\n'), ((9627, 9716), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['label_threshold_cc_x_f', '"""test_label_threshold_008_4_T2w_hist.nii.gz"""'], {}), "(label_threshold_cc_x_f,\n 'test_label_threshold_008_4_T2w_hist.nii.gz')\n", (9642, 9716), True, 'import SimpleITK as sitk\n'), ((9778, 9829), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""contralateral_segmentation.nii.gz"""'], {}), "('contralateral_segmentation.nii.gz')\n", (9792, 9829), True, 'import SimpleITK as sitk\n'), ((9867, 9946), 'platipy.imaging.registration.registration.transform_propagation', 'transform_propagation', (['WES_008_4_B50T', 'L_breast', 'tfm_to_0_rigid'], {'structure': '(True)'}), '(WES_008_4_B50T, L_breast, tfm_to_0_rigid, structure=True)\n', (9888, 9946), False, 'from platipy.imaging.registration.registration import initial_registration, fast_symmetric_forces_demons_registration, transform_propagation, apply_field\n'), ((9986, 10048), 'platipy.imaging.registration.registration.apply_field', 'apply_field', (['L_breast_to_0_rigid', 'tfm_to_0_dir'], {'structure': '(True)'}), '(L_breast_to_0_rigid, tfm_to_0_dir, structure=True)\n', (9997, 10048), False, 'from platipy.imaging.registration.registration import initial_registration, fast_symmetric_forces_demons_registration, transform_propagation, apply_field\n'), ((10102, 10149), 'SimpleITK.BinaryDilate', 'sitk.BinaryDilate', (['L_breast_to_0_dir', '(4, 4, 4)'], {}), '(L_breast_to_0_dir, (4, 4, 4))\n', (10119, 10149), True, 'import SimpleITK as sitk\n'), ((10365, 10415), 'SimpleITK.Mask', 'sitk.Mask', (['WES_008_4_B50T', 'L_breast_contour_dilate'], {}), '(WES_008_4_B50T, L_breast_contour_dilate)\n', (10374, 10415), True, 'import SimpleITK as sitk\n'), ((10491, 10509), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (10503, 10509), True, 'import matplotlib.pyplot as plt\n'), ((1550, 1646), 'SimpleITK.BinaryThreshold', 'sitk.BinaryThreshold', (['img_mri'], {'lowerThreshold': 'lowerthreshold', 'upperThreshold': 'upperthreshold'}), '(img_mri, lowerThreshold=lowerthreshold, upperThreshold\n =upperthreshold)\n', (1570, 1646), True, 'import SimpleITK as sitk\n'), ((1811, 1903), 'SimpleITK.BinaryMorphologicalClosing', 'sitk.BinaryMorphologicalClosing', (['label_threshold_cc_x', '(hole_size, hole_size, hole_size)'], {}), '(label_threshold_cc_x, (hole_size, hole_size,\n hole_size))\n', (1842, 1903), True, 'import SimpleITK as sitk\n'), ((1112, 1155), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_R_breast'], {}), '(masked_R_breast)\n', (1138, 1155), True, 'import SimpleITK as sitk\n'), ((1216, 1241), 'numpy.linspace', 'np.linspace', (['(200)', '(900)', '(50)'], {}), '(200, 900, 50)\n', (1227, 1241), True, 'import numpy as np\n'), ((1689, 1729), 'SimpleITK.ConnectedComponent', 'sitk.ConnectedComponent', (['label_threshold'], {}), '(label_threshold)\n', (1712, 1729), True, 'import SimpleITK as sitk\n'), ((2491, 2540), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_R_breast_B800T'], {}), '(masked_R_breast_B800T)\n', (2517, 2540), True, 'import SimpleITK as sitk\n'), ((2601, 2624), 'numpy.linspace', 'np.linspace', (['(1)', '(600)', '(50)'], {}), '(1, 600, 50)\n', (2612, 2624), True, 'import numpy as np\n'), ((3508, 3555), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_R_breast_T2w'], {}), '(masked_R_breast_T2w)\n', (3534, 3555), True, 'import SimpleITK as sitk\n'), ((3616, 3639), 'numpy.linspace', 'np.linspace', (['(1)', '(300)', '(50)'], {}), '(1, 300, 50)\n', (3627, 3639), True, 'import numpy as np\n'), ((4429, 4476), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_R_breast_MPE'], {}), '(masked_R_breast_MPE)\n', (4455, 4476), True, 'import SimpleITK as sitk\n'), ((4537, 4562), 'numpy.linspace', 'np.linspace', (['(150)', '(450)', '(50)'], {}), '(150, 450, 50)\n', (4548, 4562), True, 'import numpy as np\n'), ((6455, 6481), 'platipy.imaging.utils.tools.get_com', 'get_com', (['R_breast_to_0_dir'], {}), '(R_breast_to_0_dir)\n', (6462, 6481), False, 'from platipy.imaging.utils.tools import get_com\n'), ((6729, 6755), 'platipy.imaging.utils.tools.get_com', 'get_com', (['R_breast_to_0_dir'], {}), '(R_breast_to_0_dir)\n', (6736, 6755), False, 'from platipy.imaging.utils.tools import get_com\n'), ((6963, 7006), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_R_breast'], {}), '(masked_R_breast)\n', (6989, 7006), True, 'import SimpleITK as sitk\n'), ((7067, 7093), 'numpy.linspace', 'np.linspace', (['(200)', '(3000)', '(50)'], {}), '(200, 3000, 50)\n', (7078, 7093), True, 'import numpy as np\n'), ((8155, 8204), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_R_breast_B800T'], {}), '(masked_R_breast_B800T)\n', (8181, 8204), True, 'import SimpleITK as sitk\n'), ((8265, 8288), 'numpy.linspace', 'np.linspace', (['(1)', '(600)', '(50)'], {}), '(1, 600, 50)\n', (8276, 8288), True, 'import numpy as np\n'), ((9016, 9063), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_R_breast_T2w'], {}), '(masked_R_breast_T2w)\n', (9042, 9063), True, 'import SimpleITK as sitk\n'), ((9124, 9147), 'numpy.linspace', 'np.linspace', (['(1)', '(250)', '(50)'], {}), '(1, 250, 50)\n', (9135, 9147), True, 'import numpy as np\n'), ((10200, 10226), 'platipy.imaging.utils.tools.get_com', 'get_com', (['L_breast_to_0_dir'], {}), '(L_breast_to_0_dir)\n', (10207, 10226), False, 'from platipy.imaging.utils.tools import get_com\n'), ((10426, 10469), 'SimpleITK.GetArrayViewFromImage', 'sitk.GetArrayViewFromImage', (['masked_L_breast'], {}), '(masked_L_breast)\n', (10452, 10469), True, 'import SimpleITK as sitk\n'), ((10530, 10556), 'numpy.linspace', 'np.linspace', (['(200)', '(3000)', '(50)'], {}), '(200, 3000, 50)\n', (10541, 10556), True, 'import numpy as np\n')]
|
import numpy as np
def hole_filling(img, kernel=3):
N, M = img.shape
for i in range(N):
for j in range(M):
if img[i, j] == 0:
neighbour = img[max(int((i-(kernel-1)/2)), 0):min(int((i+(kernel-1)/2)), N), max(int((j-(kernel-1)/2)),0):min(int((j+(kernel-1)/2)), M)]
if len(neighbour) == 0:
continue
else:
max_val = np.amax(neighbour)
img[i, j] = max_val
return img
|
[
"numpy.amax"
] |
[((429, 447), 'numpy.amax', 'np.amax', (['neighbour'], {}), '(neighbour)\n', (436, 447), True, 'import numpy as np\n')]
|
'''
adapted from Harry
'''
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from pyPCGA import PCGA
# import mf
import math
import datetime as dt
import os
import sys
from poro import Model
#print(np.__version__)
# domain parameters
nx = 128
ny = 128
N = np.array([nx, ny])
m = np.prod(N)
x = np.linspace(0., 1., N[0])
y = np.linspace(0., 1., N[1])
xmin = np.array([x[0], y[0]])
xmax = np.array([x[-1], y[-1]])
# forward problem parameters
pts_fem = np.loadtxt('dof_perm_dg0.csv', delimiter=',')
ptx = np.linspace(0,1,nx)
pty = np.linspace(0,1,ny)
logk_idx = np.loadtxt('logk_idx.txt').astype(int)
forward_params = {'ptx': ptx, 'pty': pty, 'pts_fem': pts_fem, 'logk_idx': logk_idx}
# Load files for s_true and obs
s_true = np.loadtxt('s_true.txt').reshape(-1,1)
obs = np.loadtxt('obs.txt').reshape(-1,1) # gerenated noisy obs from poro.py
# covairance kernel and scale parameters
prior_std = 2.0
prior_cov_scale = np.array([0.1, 0.1])
def kernel(r): return (prior_std ** 2) * np.exp(-r)
XX, YY = np.meshgrid(x, y)
pts = None # for uniform grids, you don't need pts of s
# prepare interface to run as a function
def forward_model(s, parallelization, ncores=None):
model = Model(forward_params)
if parallelization:
simul_obs = model.run(s, parallelization, ncores)
else:
simul_obs = model.run(s, parallelization)
return simul_obs
params = {'R': (50.0) ** 2, 'n_pc': 96,
'maxiter': 10, 'restol': 0.1,
'matvec': 'FFT', 'xmin': xmin, 'xmax': xmax, 'N': N,
'prior_std': prior_std, 'prior_cov_scale': prior_cov_scale,
'kernel': kernel, 'post_cov': 'diag',
'precond': False, 'LM': True, # 'LM_smin' : -30.0, 'LM_smax' : 5.0, # 'alphamax_LM' : 1.E+5,
'parallel': True, 'linesearch': True, #'precision': 1.e-4,
'forward_model_verbose': True, 'verbose': True,
'iter_save': True}
#s_init = np.mean(s_true) * np.ones((m, 1))
s_init = -20. * np.ones((m, 1))
# initialize
prob = PCGA(forward_model, s_init, pts, params, s_true, obs)
# prob = PCGA(forward_model, s_init, pts, params, s_true, obs, X = X) #if you want to add your own drift X
# run inversion
s_hat, simul_obs, post_diagv, iter_best = prob.Run()
|
[
"numpy.meshgrid",
"poro.Model",
"pyPCGA.PCGA",
"numpy.ones",
"matplotlib.use",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"numpy.exp",
"numpy.prod"
] |
[((49, 70), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (63, 70), False, 'import matplotlib\n'), ((323, 341), 'numpy.array', 'np.array', (['[nx, ny]'], {}), '([nx, ny])\n', (331, 341), True, 'import numpy as np\n'), ((347, 357), 'numpy.prod', 'np.prod', (['N'], {}), '(N)\n', (354, 357), True, 'import numpy as np\n'), ((365, 392), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'N[0]'], {}), '(0.0, 1.0, N[0])\n', (376, 392), True, 'import numpy as np\n'), ((396, 423), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'N[1]'], {}), '(0.0, 1.0, N[1])\n', (407, 423), True, 'import numpy as np\n'), ((432, 454), 'numpy.array', 'np.array', (['[x[0], y[0]]'], {}), '([x[0], y[0]])\n', (440, 454), True, 'import numpy as np\n'), ((463, 487), 'numpy.array', 'np.array', (['[x[-1], y[-1]]'], {}), '([x[-1], y[-1]])\n', (471, 487), True, 'import numpy as np\n'), ((531, 576), 'numpy.loadtxt', 'np.loadtxt', (['"""dof_perm_dg0.csv"""'], {'delimiter': '""","""'}), "('dof_perm_dg0.csv', delimiter=',')\n", (541, 576), True, 'import numpy as np\n'), ((584, 605), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'nx'], {}), '(0, 1, nx)\n', (595, 605), True, 'import numpy as np\n'), ((611, 632), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ny'], {}), '(0, 1, ny)\n', (622, 632), True, 'import numpy as np\n'), ((1014, 1034), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (1022, 1034), True, 'import numpy as np\n'), ((1102, 1119), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1113, 1119), True, 'import numpy as np\n'), ((2115, 2168), 'pyPCGA.PCGA', 'PCGA', (['forward_model', 's_init', 'pts', 'params', 's_true', 'obs'], {}), '(forward_model, s_init, pts, params, s_true, obs)\n', (2119, 2168), False, 'from pyPCGA import PCGA\n'), ((1288, 1309), 'poro.Model', 'Model', (['forward_params'], {}), '(forward_params)\n', (1293, 1309), False, 'from poro import Model\n'), ((2077, 2092), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (2084, 2092), True, 'import numpy as np\n'), ((643, 669), 'numpy.loadtxt', 'np.loadtxt', (['"""logk_idx.txt"""'], {}), "('logk_idx.txt')\n", (653, 669), True, 'import numpy as np\n'), ((815, 839), 'numpy.loadtxt', 'np.loadtxt', (['"""s_true.txt"""'], {}), "('s_true.txt')\n", (825, 839), True, 'import numpy as np\n'), ((861, 882), 'numpy.loadtxt', 'np.loadtxt', (['"""obs.txt"""'], {}), "('obs.txt')\n", (871, 882), True, 'import numpy as np\n'), ((1079, 1089), 'numpy.exp', 'np.exp', (['(-r)'], {}), '(-r)\n', (1085, 1089), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import tensorflow as tf
tf.config.run_functions_eagerly(True)
import numpy as np
from graph2tensor.model.layers import GCNConv
from graph2tensor.model.models import MessagePassing
from unittest import TestCase, main
conv_layers = [
GCNConv(units=32, name="layer1"),
GCNConv(units=32, name="layer2"),
GCNConv(units=8, name="layer3"),
]
mp = MessagePassing([conv_layers, conv_layers, conv_layers], name="sage", concat_hidden=False)
src = {'feat': tf.constant(np.random.random(size=(4, 16)), dtype=tf.float32)}
hop1 = {'feat': tf.constant(np.random.random(size=(10, 16)), dtype=tf.float32)}
hop2 = {'feat': tf.constant(np.random.random(size=(55, 16)), dtype=tf.float32)}
hop3 = {'feat': tf.constant(np.random.random(size=(110, 16)), dtype=tf.float32)}
edge = {}
hops = (
(src, edge, hop1, tf.repeat(tf.range(4), tf.range(1, 5))),
(hop1, edge, hop2, tf.repeat(tf.range(10), tf.range(1, 11))),
(hop2, edge, hop3, tf.repeat(tf.range(55), 2))
)
class TestMsgPassing(TestCase):
def test_config(self):
config = mp.get_config()
assert config["name"] == "sage"
assert config["concat_hidden"] is False
assert config["attr_reduce_mode"] == 'concat'
assert config["conv_layers"].__len__() == 3
assert config["conv_layers"][0].__len__() == 3
assert config["conv_layers"][0][1]["class_name"] == "GCNConv"
assert config["conv_layers"][0][1]["config"]["name"] == "layer2"
assert config["conv_layers"][0][1]["config"]["units"] == 32
custom_objects = {
"GCNConv": GCNConv,
}
_ = MessagePassing.from_config(config, custom_objects)
def test_save_load(self):
x = mp((hops, hops, hops)).numpy()
mp.save_weights("/tmp/sage")
mp1 = MessagePassing([conv_layers, conv_layers, conv_layers], name="sage", concat_hidden=False)
mp1.load_weights("/tmp/sage")
x1 = mp1((hops, hops, hops)).numpy()
np.testing.assert_allclose(x, x1, atol=1e-6)
def test_call(self):
assert mp((hops, hops, hops)).numpy().shape == (4, 8)
mp1 = MessagePassing([conv_layers, conv_layers, conv_layers], name="sage", concat_hidden=True)
assert mp1((hops, hops, hops)).numpy().shape == (4, 72)
if __name__ == "__main__":
main()
|
[
"unittest.main",
"tensorflow.config.run_functions_eagerly",
"graph2tensor.model.layers.GCNConv",
"tensorflow.range",
"graph2tensor.model.models.MessagePassing.from_config",
"numpy.random.random",
"numpy.testing.assert_allclose",
"graph2tensor.model.models.MessagePassing"
] |
[((47, 84), 'tensorflow.config.run_functions_eagerly', 'tf.config.run_functions_eagerly', (['(True)'], {}), '(True)\n', (78, 84), True, 'import tensorflow as tf\n'), ((376, 469), 'graph2tensor.model.models.MessagePassing', 'MessagePassing', (['[conv_layers, conv_layers, conv_layers]'], {'name': '"""sage"""', 'concat_hidden': '(False)'}), "([conv_layers, conv_layers, conv_layers], name='sage',\n concat_hidden=False)\n", (390, 469), False, 'from graph2tensor.model.models import MessagePassing\n'), ((260, 292), 'graph2tensor.model.layers.GCNConv', 'GCNConv', ([], {'units': '(32)', 'name': '"""layer1"""'}), "(units=32, name='layer1')\n", (267, 292), False, 'from graph2tensor.model.layers import GCNConv\n'), ((298, 330), 'graph2tensor.model.layers.GCNConv', 'GCNConv', ([], {'units': '(32)', 'name': '"""layer2"""'}), "(units=32, name='layer2')\n", (305, 330), False, 'from graph2tensor.model.layers import GCNConv\n'), ((336, 367), 'graph2tensor.model.layers.GCNConv', 'GCNConv', ([], {'units': '(8)', 'name': '"""layer3"""'}), "(units=8, name='layer3')\n", (343, 367), False, 'from graph2tensor.model.layers import GCNConv\n'), ((2312, 2318), 'unittest.main', 'main', ([], {}), '()\n', (2316, 2318), False, 'from unittest import TestCase, main\n'), ((493, 523), 'numpy.random.random', 'np.random.random', ([], {'size': '(4, 16)'}), '(size=(4, 16))\n', (509, 523), True, 'import numpy as np\n'), ((572, 603), 'numpy.random.random', 'np.random.random', ([], {'size': '(10, 16)'}), '(size=(10, 16))\n', (588, 603), True, 'import numpy as np\n'), ((652, 683), 'numpy.random.random', 'np.random.random', ([], {'size': '(55, 16)'}), '(size=(55, 16))\n', (668, 683), True, 'import numpy as np\n'), ((732, 764), 'numpy.random.random', 'np.random.random', ([], {'size': '(110, 16)'}), '(size=(110, 16))\n', (748, 764), True, 'import numpy as np\n'), ((1622, 1672), 'graph2tensor.model.models.MessagePassing.from_config', 'MessagePassing.from_config', (['config', 'custom_objects'], {}), '(config, custom_objects)\n', (1648, 1672), False, 'from graph2tensor.model.models import MessagePassing\n'), ((1798, 1891), 'graph2tensor.model.models.MessagePassing', 'MessagePassing', (['[conv_layers, conv_layers, conv_layers]'], {'name': '"""sage"""', 'concat_hidden': '(False)'}), "([conv_layers, conv_layers, conv_layers], name='sage',\n concat_hidden=False)\n", (1812, 1891), False, 'from graph2tensor.model.models import MessagePassing\n'), ((1979, 2024), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['x', 'x1'], {'atol': '(1e-06)'}), '(x, x1, atol=1e-06)\n', (2005, 2024), True, 'import numpy as np\n'), ((2126, 2218), 'graph2tensor.model.models.MessagePassing', 'MessagePassing', (['[conv_layers, conv_layers, conv_layers]'], {'name': '"""sage"""', 'concat_hidden': '(True)'}), "([conv_layers, conv_layers, conv_layers], name='sage',\n concat_hidden=True)\n", (2140, 2218), False, 'from graph2tensor.model.models import MessagePassing\n'), ((836, 847), 'tensorflow.range', 'tf.range', (['(4)'], {}), '(4)\n', (844, 847), True, 'import tensorflow as tf\n'), ((849, 863), 'tensorflow.range', 'tf.range', (['(1)', '(5)'], {}), '(1, 5)\n', (857, 863), True, 'import tensorflow as tf\n'), ((900, 912), 'tensorflow.range', 'tf.range', (['(10)'], {}), '(10)\n', (908, 912), True, 'import tensorflow as tf\n'), ((914, 929), 'tensorflow.range', 'tf.range', (['(1)', '(11)'], {}), '(1, 11)\n', (922, 929), True, 'import tensorflow as tf\n'), ((966, 978), 'tensorflow.range', 'tf.range', (['(55)'], {}), '(55)\n', (974, 978), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
High level functions for signal characterization from 1D signals
Code licensed under both GPL and BSD licenses
Authors: <NAME> <<EMAIL>>
<NAME> <<EMAIL>>
"""
from scipy.signal import periodogram, welch
import pandas as pd
import numpy as np
def psd(s, fs, nperseg=256, method='welch', window='hanning', nfft=None, tlims=None):
"""
Estimates power spectral density of 1D signal using Welch's or periodogram methods.
Note: this is a wrapper function that uses functions from scipy.signal module
Parameters
----------
s: 1D array
Input signal to process
fs: float, optional
Sampling frequency of audio signal
nperseg: int, optional
Lenght of segment for 'welch' method, default is 256
nfft: int, optional
Length of FFT for periodogram method. If None, length of signal will be used.
Length of FFT for welch method if zero padding is desired. If None, length of nperseg will be used.
method: {'welch', 'periodogram'}
Method used to estimate the power spectral density of the signal
tlims: tuple of ints or floats
Temporal limits to compute the power spectral density in seconds (s)
Returns
-------
psd: pandas Series
Estimate of power spectral density
f_idx: pandas Series
Index of sample frequencies
Example
-------
s, fs = sound.load('spinetail.wav')
psd, f_idx = psd(s, fs, nperseg=512)
"""
if tlims is not None:
# trim audio signal
try:
s = s[int(tlims[0]*fs): int(tlims[1]*fs)]
except:
raise Exception('length of tlims tuple should be 2')
if method=='welch':
f_idx, psd_s = welch(s, fs, window, nperseg, nfft)
elif method=='periodogram':
f_idx, psd_s = periodogram(s, fs, window, nfft, scaling='spectrum')
else:
raise Exception("Invalid method. Method should be 'welch' or 'periodogram' ")
index_names = ['psd_' + str(idx).zfill(3) for idx in range(1,len(psd_s)+1)]
psd_s = pd.Series(psd_s, index=index_names)
f_idx = pd.Series(f_idx, index=index_names)
return psd_s, f_idx
def rms(s):
"""
Computes the root-mean-square (RMS) of a signal
Parameters
----------
s : ndarray
1D audio signal
Returns
-------
rms: float
Root mean square of signal
"""
return np.sqrt(np.mean(s**2))
|
[
"scipy.signal.periodogram",
"numpy.mean",
"pandas.Series",
"scipy.signal.welch"
] |
[((2212, 2247), 'pandas.Series', 'pd.Series', (['psd_s'], {'index': 'index_names'}), '(psd_s, index=index_names)\n', (2221, 2247), True, 'import pandas as pd\n'), ((2260, 2295), 'pandas.Series', 'pd.Series', (['f_idx'], {'index': 'index_names'}), '(f_idx, index=index_names)\n', (2269, 2295), True, 'import pandas as pd\n'), ((1856, 1891), 'scipy.signal.welch', 'welch', (['s', 'fs', 'window', 'nperseg', 'nfft'], {}), '(s, fs, window, nperseg, nfft)\n', (1861, 1891), False, 'from scipy.signal import periodogram, welch\n'), ((2567, 2582), 'numpy.mean', 'np.mean', (['(s ** 2)'], {}), '(s ** 2)\n', (2574, 2582), True, 'import numpy as np\n'), ((1952, 2004), 'scipy.signal.periodogram', 'periodogram', (['s', 'fs', 'window', 'nfft'], {'scaling': '"""spectrum"""'}), "(s, fs, window, nfft, scaling='spectrum')\n", (1963, 2004), False, 'from scipy.signal import periodogram, welch\n')]
|
import os
from PIL import Image
import numpy as np
path='faces/faces_4/an2i'
trainx=[]
trainy=[]
for filename in os.listdir(path):
pixel=[]
im=Image.open(path+'/'+filename)
for i in range(im.size[0]):
row=[]
for j in range(im.size[1]):
row.append(im.getpixel((i,j)))
pixel.append(row)
trainx.append(pixel)
director=filename.split('_')[1]
if director=='left':
trainy.append([1,0,0,0])
elif director=='right':
trainy.append([0,1,0,0])
elif director=='straight':
trainy.append([0,0,1,0])
elif director=='up':
trainy.append([0,0,0,1])
trainx=np.array(trainx)
trainy=np.array(trainy)
trainx=np.transpose(trainx.reshape((-1,32*30))-128)/256.0
trainy=np.transpose(trainy)
|
[
"numpy.transpose",
"numpy.array",
"os.listdir",
"PIL.Image.open"
] |
[((114, 130), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (124, 130), False, 'import os\n'), ((565, 581), 'numpy.array', 'np.array', (['trainx'], {}), '(trainx)\n', (573, 581), True, 'import numpy as np\n'), ((590, 606), 'numpy.array', 'np.array', (['trainy'], {}), '(trainy)\n', (598, 606), True, 'import numpy as np\n'), ((673, 693), 'numpy.transpose', 'np.transpose', (['trainy'], {}), '(trainy)\n', (685, 693), True, 'import numpy as np\n'), ((146, 179), 'PIL.Image.open', 'Image.open', (["(path + '/' + filename)"], {}), "(path + '/' + filename)\n", (156, 179), False, 'from PIL import Image\n')]
|
import glob
import os
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import matplotlib.image as mpimg
import pandas as pd
import cv2
class FacialKeypointsDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.key_pts_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.key_pts_frame)
def __getitem__(self, idx):
image_name = os.path.join(self.root_dir,
self.key_pts_frame.iloc[idx, 0])
image = mpimg.imread(image_name)
# if image has an alpha color channel, get rid of it
if(image.shape[2] == 4):
image = image[:,:,0:3]
key_pts = self.key_pts_frame.iloc[idx, 1:].as_matrix()
key_pts = key_pts.astype('float').reshape(-1, 2)
sample = {'image': image, 'keypoints': key_pts}
if self.transform:
sample = self.transform(sample)
return sample
# tranforms
import torch
from torchvision import transforms, utils
# tranforms
class GrayScale(object):
def __call__(self, sample):
image, keypoints = sample["image"], sample["keypoints"]
image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY).reshape(image.shape[0], image.shape[1], 1)
print(image_copy.shape)
return {"image": image_copy, "keypoints": keypoints}
class Normalize(object):
"""Convert a color image to grayscale and normalize the color range to [0,1]."""
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
image_copy = np.copy(image)
key_pts_copy = np.copy(key_pts)
# convert image to grayscale
#image_copy = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# scale color range from [0, 255] to [0, 1]
image_copy= image_copy/255.0
# scale keypoints to be centered around 0 with a range of [-1, 1]
# mean = 100, sqrt = 50, so, pts should be (pts - 100)/50
key_pts_copy = (key_pts_copy - 100)/50.0
return {'image': image_copy, 'keypoints': key_pts_copy}
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
h, w = image.shape[:2]
#keep the aspec ratio
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = cv2.resize(image, (new_w, new_h))
# scale the pts, too
key_pts = key_pts * [new_w / w, new_h / h]
return {'image': img, 'keypoints': key_pts}
class RandomCrop(object):
"""Crop randomly the image in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h,
left: left + new_w]
key_pts = key_pts - [left, top]
return {'image': image, 'keypoints': key_pts}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, key_pts = sample['image'], sample['keypoints']
# if image has no grayscale color channel, add one
if(len(image.shape) == 2):
# add that third color dim
image = image.reshape(image.shape[0], image.shape[1], 1)
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return {'image': torch.from_numpy(image),
'keypoints': torch.from_numpy(key_pts)}
class RandomRotation(object):
"""Rotate randomly an image in a sample
Args:
min_rotation_angle (int)
max_rotation_angle (int)
"""
def __init__(self, range_angle=45, range_scale=0.1):
self.range_angle = range_angle
self.range_scale = range_scale
def __call__(self, sample):
image, keypoints = sample['image'], sample['keypoints']
random_angle = -self.range_angle + np.random.random()*2*self.range_angle
random_scale = 1-self.range_scale + np.random.random()*2*self.range_scale
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
rotation = cv2.getRotationMatrix2D((cX, cY), random_angle, random_scale)
keypoints_copy = np.hstack([keypoints, np.ones((keypoints.shape[0], 1))])
#print("keypoints_shape : ", keypoints_copy.T)
keypoints_copy = np.matmul(rotation, keypoints_copy.T).T
rotated_image = cv2.warpAffine(image, rotation,(h, w))
print("rotated_image_shape : ", rotated_image.shape)
return {'image': rotated_image,
'keypoints': keypoints_copy}
class RandomHorizontalFlip(object):
"""Rotate randomly an image in a sample
Args:
"""
def __init__(self):
self.flip_indices = [(21, 22), (20, 23), (19, 24), (18, 25), (17, 26), #eye brow
(36, 45), (37, 44), (38, 43), (39, 42), (41, 46), (40, 47), # eyes
(0, 16), (1, 15), (2, 14), (3, 13), (4, 12), (5, 11), (6, 10), (7, 9), #chin
(48, 54), (49, 54), (50, 53), (58, 56), (59, 55), (60, 64), (61, 64), (67, 65), #mouth
(31, 35), (32, 34) # nose
]
def __call__(self, sample):
flip = np.random.random() > 0.5
image, keypoints = sample['image'], sample['keypoints']
keypoints_copy = keypoints[:,:]
(h, w) = image.shape[:2]
if flip:
# change the coordinates of the keypoints
keypoints_copy[:,0] = w - keypoints_copy[:,0]
#and inverse their position in keypoints as well
for i, j in self.flip_indices:
temp = [keypoints_copy[i,0],keypoints_copy[i,1]]
keypoints_copy[i,0] = keypoints_copy[j,0]
keypoints_copy[i,1] = keypoints_copy[j,1]
keypoints_copy[j,0] = temp[0]
keypoints_copy[j,1] = temp[1]
#flip the image
image = image[:,-1:0:-1,:]
return {'image': image,
'keypoints': keypoints_copy}
|
[
"matplotlib.image.imread",
"numpy.copy",
"pandas.read_csv",
"cv2.cvtColor",
"numpy.ones",
"cv2.warpAffine",
"numpy.random.randint",
"numpy.random.random",
"numpy.matmul",
"os.path.join",
"cv2.getRotationMatrix2D",
"cv2.resize",
"torch.from_numpy"
] |
[((608, 629), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (619, 629), True, 'import pandas as pd\n'), ((815, 875), 'os.path.join', 'os.path.join', (['self.root_dir', 'self.key_pts_frame.iloc[idx, 0]'], {}), '(self.root_dir, self.key_pts_frame.iloc[idx, 0])\n', (827, 875), False, 'import os\n'), ((933, 957), 'matplotlib.image.imread', 'mpimg.imread', (['image_name'], {}), '(image_name)\n', (945, 957), True, 'import matplotlib.image as mpimg\n'), ((2062, 2076), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (2069, 2076), True, 'import numpy as np\n'), ((2100, 2116), 'numpy.copy', 'np.copy', (['key_pts'], {}), '(key_pts)\n', (2107, 2116), True, 'import numpy as np\n'), ((3533, 3566), 'cv2.resize', 'cv2.resize', (['image', '(new_w, new_h)'], {}), '(image, (new_w, new_h))\n', (3543, 3566), False, 'import cv2\n'), ((4368, 4399), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - new_h)'], {}), '(0, h - new_h)\n', (4385, 4399), True, 'import numpy as np\n'), ((4415, 4446), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - new_w)'], {}), '(0, w - new_w)\n', (4432, 4446), True, 'import numpy as np\n'), ((5962, 6023), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cX, cY)', 'random_angle', 'random_scale'], {}), '((cX, cY), random_angle, random_scale)\n', (5985, 6023), False, 'import cv2\n'), ((6253, 6292), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rotation', '(h, w)'], {}), '(image, rotation, (h, w))\n', (6267, 6292), False, 'import cv2\n'), ((5198, 5221), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (5214, 5221), False, 'import torch\n'), ((5252, 5277), 'torch.from_numpy', 'torch.from_numpy', (['key_pts'], {}), '(key_pts)\n', (5268, 5277), False, 'import torch\n'), ((6187, 6224), 'numpy.matmul', 'np.matmul', (['rotation', 'keypoints_copy.T'], {}), '(rotation, keypoints_copy.T)\n', (6196, 6224), True, 'import numpy as np\n'), ((7105, 7123), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7121, 7123), True, 'import numpy as np\n'), ((1633, 1672), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (1645, 1672), False, 'import cv2\n'), ((6072, 6104), 'numpy.ones', 'np.ones', (['(keypoints.shape[0], 1)'], {}), '((keypoints.shape[0], 1))\n', (6079, 6104), True, 'import numpy as np\n'), ((5752, 5770), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5768, 5770), True, 'import numpy as np\n'), ((5834, 5852), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5850, 5852), True, 'import numpy as np\n')]
|
# class Event(object):
# _observers = []
#
# def __init__(self, webscraper, item):
# self.webscraper = webscraper
# self.item = item
#
# def __repr__(self):
# return self.__class__.__name__
#
# @classmethod
# def register(cls, observer):
# if observer not in cls._observers:
# cls._observers.append(observer)
#
# @classmethod
# def unregister(cls, observer):
# if observer in cls._observers:
# cls._observers.remove(observer)
#
# @classmethod
# def notify(cls, subject, item):
# event = cls(subject, item)
# # print(cls,'-', subject,'-', event)
# print(cls._observers, end="\n")
# for observer in cls._observers:
# # print(observer, end="\n")
# observer(event)
#
# ##### 2. Ignore method repr ###
# class MagnetRequestEvent(Event):
# def repr(self):
# pass
#
# # class MagnetNotifierEvent(Event):
# # def repr(self):
# # pass
#
# def log_add_magnet(event):
# print('{0} magnet has been added: {1}'.format(event.webscraper, event.item))
#
# class Announcer():
# def __call__(self, event):
# print('Announcer Magnet Has Been Added {0}'.format(event.item))
#
# MagnetRequestEvent.register(log_add_magnet)
# MagnetRequestEvent.register(Announcer())
# MagnetRequestEvent.notify('MejorTorrentScraper', 'magnet:aferqwejklrq12')
#
# # def log(event):
# # print('{} was written'.format(event.subject))
# #
# # class AnotherObserver():
# # def __call__(self, event):
# # print('Yeah {} told me !'.format(event))
# #
# # WriteEvent.register(log)
# # WriteEvent.register(AnotherObserver())
# # WriteEvent.notify('a given file', '')
# #
# # class AnotherEvent(Event):
# # def repr(self):
# # pass
# # # AnotherEvent = Event(subject = 'test2')
# # AnotherEvent.register(log)
# # AnotherEvent.register(AnotherObserver())
# # AnotherEvent.notify('second file', '')
import numpy as np
webscraper_list = [1,2,3,4,5,6,7]
c = 80/len(webscraper_list)
magnet_counter = 30
f = c/30
for i in np.arange(0, c, f):
print(int(i))
def _calculate_progress_bar(webscraper_list, counter):
base = 80/len(webscraper_list)
chunk = base/len(counter)
return base, chunk
def _update_progress_bar(analized, chunk):
return int(analized * chunk)
|
[
"numpy.arange"
] |
[((2095, 2113), 'numpy.arange', 'np.arange', (['(0)', 'c', 'f'], {}), '(0, c, f)\n', (2104, 2113), True, 'import numpy as np\n')]
|
from copy import copy
import time
import os
import numpy as np
import numpy.linalg as linalg
import gym
from gym import spaces
from gym.utils import seeding
from roboball2d.physics import B2World
from roboball2d.robot import DefaultRobotConfig
from roboball2d.robot import DefaultRobotState
from roboball2d.ball import BallConfig
from roboball2d.ball_gun import DefaultBallGun
from roboball2d.utils import Box
class Tennis2DEnv(gym.GoalEnv):
"""2D Toy Robotic Tennis Environment.
Task: 2D robot with 3 degrees of freedom has to return tennis ball to given
goal landing point by hitting it appropriately. A sparse reward is given
only if the ball lands within the goal region."""
metadata = {"render.mode": ["human"]}
# This version of the environment uses a sparse reward
dense_reward = False
# goal properties
_goal_min = 3.0
_goal_max = 6.0
_goal_diameter = 0.8
_goal_color = (0.2, 0.7, 0.0)
# variables for reward design
_hit_reward = 2.0
# maximum episode length in seconds
_max_episode_length_sec = 5.0
# divide by this attribute to normalize angles
_angle_normalization = 0.5*np.pi
# maximum angular velocity
_max_angular_vel = 8.0
# safety factor (for joint limits because solver
# can't ensure that they are always satisfied)
_safety_factor = 1.3
# simulation steps per second
_steps_per_sec = 100
_arrow_width = 0.02
_arrow_head_size = 0.06
_arrow_scaling = 0.3
def __init__(self, slow_motion_factor = 2.0):
super().__init__()
self._subgoals = []
self._timed_subgoals = []
self._tolerances = None
self._subgoal_colors = []
# maximum episode length in steps
self.max_episode_length = int(self._max_episode_length_sec*self._steps_per_sec)
self.seed()
self.verbose = 0
self._slow_motion_factor = slow_motion_factor
self._renderer = None
self._callbacks = []
#####################################
# Physics simulation using Roboball2D
#####################################
# robot and ball configuration
self._robot_config = DefaultRobotConfig()
self._robot_config.linear_damping = 0.1
self._robot_config.angular_damping = 4.4
self._ball_configs = [BallConfig()]
self._ball_configs[0].color = (0.3, 0.3, 0.3)
self._ball_configs[0].line_color = (0.8, 0.8, 0.8)
# safety factors for joint angles to avoid giving observations out of interval
self._joint_factor = []
for index in range(3):
if index in [0, 1]:
factor = self._robot_config.rod_joint_limit*self._safety_factor
else:
factor = self._robot_config.racket_joint_limit*self._safety_factor
self._joint_factor.append(factor)
self._visible_area_width = 6.0
self._visual_height = 0.05
# physics simulation
self._world = B2World(
robot_configs = self._robot_config,
ball_configs = self._ball_configs,
visible_area_width = self._visible_area_width,
steps_per_sec = self._steps_per_sec
)
# ball gun : specifies the reset of
# the ball (by shooting a new one)
self._ball_guns = [DefaultBallGun(self._ball_configs[0])]
# robot init : specifies the reinit of the robot
# (e.g. angles of the rods and rackets, etc)
self._robot_init_state = DefaultRobotState(
robot_config = self._robot_config,
#generalized_coordinates = [0., -0.5*np.pi, 0.],
generalized_coordinates = [0.25*np.pi, -0.5*np.pi, 0.],
generalized_velocities = [0., 0., 0.])
###################
# Observation space
###################
obs_space_dict = {}
bounded_space = spaces.Box(low = -1.0, high = 1.0, shape= (1,), dtype = np.float32)
unbounded_space = spaces.Box(low = -np.inf, high = np.inf, shape= (1,), dtype = np.float32)
unit_interval = spaces.Box(low = 0.0, high = 1.0, shape= (1,), dtype = np.float32)
for index in [0, 1, 2]:
obs_space_dict["joint_" + str(index) + "_angle"] = bounded_space
for index in [0, 1, 2]:
obs_space_dict["joint_" + str(index) + "_angular_vel"] = bounded_space
obs_space_dict["ball_pos_x"] = unbounded_space
obs_space_dict["ball_pos_y"] = unbounded_space
obs_space_dict["ball_vel_x"] = unbounded_space
obs_space_dict["ball_vel_y"] = unbounded_space
obs_space_dict["ball_anguler_vel"] = unbounded_space
obs_space_dict["ball_bounced_at_least_once"] = unit_interval
obs_space_dict["ball_bouncing_second_time"] = unit_interval
obs_space_dict["ball_bounced_at_least_twice"] = unit_interval
if self.dense_reward == True:
# in case of dense reward have to include (first component of) desired goal into observation space
# (second and third component are always one and therefore not useful as observation)
obs_space_dict["desired_landing_pos_x"] = bounded_space
# partial observation space (without goal)
self._preliminary_obs_space = spaces.Dict(obs_space_dict)
# Note: Observations are scaled versions of corresponding quantities
# in physics simulation.
# in sparse reward case, also have to specifiy desired and achieved
# goal spaces
if self.dense_reward == False:
# goal space has components
# 1. ball position x
# 2. bool indicating whether ball bounced at least once
# 3. bool indicating whether ball is bouncing for the second time
# in this time step
# 4. bool indicating whether ball bounced at least twice
desired_goal_space = spaces.Box(
low = np.array([-np.inf, 0., 0., 0.]),
high = np.array([np.inf, 1., 1., 1.]),
dtype = np.float32)
achieved_goal_space = desired_goal_space
# observation space consists of dictionary of subspaces
# corresponding to observation, desired goal and achieved
# goal spaces
self.observation_space = spaces.Dict({
"observation": self._preliminary_obs_space,
"desired_goal": desired_goal_space,
"achieved_goal": achieved_goal_space
})
# in dense reward case, observation space is simply preliminary
# observation space
else:
self.observation_space = self._preliminary_obs_space
###################
# Action space
###################
# action space consists of torques applied to the three joints
# Note: Actions are scaled versions of torques in physics simulation.
act_space_dict = {}
for index in range(3):
act_space_dict["joint_" + str(index) + "_torque"] = bounded_space
self.action_space = spaces.Dict(act_space_dict)
# reset to make sure environment is not used without resetting it first
self.reset()
def step(self, action):
####################
# Physics simulation
####################
action_keys = sorted(action.keys())
torques = [action[key][0] for key in action_keys]
# perform one step of physics simulation, receive new world state
self._world_state = self._world.step(torques, relative_torques = True)
# clip angular velocities to make sure they are in a bounded interval
for joint in self._world_state.robots[0].joints:
joint.angular_velocity = np.clip(joint.angular_velocity, -self._max_angular_vel,
self._max_angular_vel)
####################
# Reward calculation
####################
reward = 0
info = {}
# check whether the ball is bouncing off the floor in this time step
self._ball_bouncing_second_time = False
if self._world_state.ball_hits_floor:
self._n_ball_bounces += 1
if self._n_ball_bounces == 2:
self._ball_bouncing_second_time = True
# set achieved goal
achieved_goal = self._get_achieved_goal()
# dense reward case
if self.dense_reward == True:
# reward for hitting ball with racket
if self._world_state.balls_hits_racket[0]:
self._n_hits_ball_racket += 1
if self._n_hits_ball_racket == 1:
reward += self._hit_reward
# reward for bouncing off ground in goal area
goal_reward = self.compute_reward(achieved_goal, self._desired_goal, info)
reward += goal_reward
if goal_reward == 0.:
done = True
# sparse reward case
else:
goal_reward = self.compute_reward(achieved_goal, self._desired_goal, info)
reward += goal_reward
if goal_reward == 0.:
done = True
# end episode after some time
if self._world_state.t >= self._max_episode_length_sec:
self.done = True
return self.get_observation(), reward, self.done, info
def _get_achieved_goal(self):
return [(self._world_state.balls[0].position[0] - self._goal_min)/(self._goal_max - self._goal_min),
int(self._n_ball_bounces >= 1),
int(self._ball_bouncing_second_time),
int(self._n_ball_bounces >= 2)]
def update_subgoals(self, subgoals, tolerances = None):
self._subgoals = subgoals
self._tolerances = tolerances
def update_timed_subgoals(self, timed_subgoals, tolerances = None):
self._subgoals = [tsg.goal for tsg in timed_subgoals if tsg is not None]
self._timed_subgoals = timed_subgoals
self._tolerances = tolerances
def reset(self):
self.t = 0
# check for consistency with GoalEnv
if self.dense_reward == False:
super().reset()
self.done = False
# reset physics simulation
self._world_state = self._world.reset(self._robot_init_state, self._ball_guns)
# reset variables necessary for computation of reward
self._n_ball_bounces = 0
self._ball_bouncing_second_time = False
self._n_hits_ball_racket = 0
# sample goal position (last three components indicate that ball bounced for
# the second time in this time step)
self._desired_goal = np.array([self.np_random.uniform(0., 1.), 1., 1., 1.])
return self.get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def render(self, mode = "human", close = False):
# have to import renderer here to avoid problems when running training without display
# server
from roboball2d.rendering import PygletRenderer
from roboball2d.rendering import RenderingConfig
import roboball2d.rendering.pyglet_utils as pyglet_utils
import pyglet.gl as gl
from ..utils.graphics_utils import get_default_subgoal_colors
# render callback method which draws arrow for velocity of racket
def render_racket_vel_callback(ws):
scaled_vector = [self._arrow_scaling*x for x in ws.robot.racket.linear_velocity]
pyglet_utils.draw_vector(
initial_point = ws.robot.racket.position,
vector = scaled_vector,
width = self._arrow_width,
arrow_head_size = self._arrow_head_size,
color = (0.8, 0.8, 0.8))
# callback function for rendering of subgoals
def render_subgoal_callback(ws):
z = -0.01
for sg, color in zip(self._subgoals, self._subgoal_colors):
# robot
generalized_coordinates = [sg[f"joint_{i}_angle"]*self._joint_factor[i] for i in range(3)]
generalized_velocities = [sg[f"joint_{i}_angular_vel"]*self._max_angular_vel for i in range(3)]
robot_state = DefaultRobotState(
robot_config = self._robot_config,
generalized_coordinates = generalized_coordinates,
generalized_velocities = generalized_velocities)
robot_state.render(
color = color,
z_coordinate = z)
# racket velocity
scaled_vector = [self._arrow_scaling*x for x in robot_state.racket.linear_velocity]
gl.glPushMatrix()
gl.glTranslatef(0., 0., z)
pyglet_utils.draw_vector(
initial_point = robot_state.racket.position,
vector = scaled_vector,
width = self._arrow_width,
arrow_head_size = self._arrow_head_size,
color = color)
gl.glPopMatrix()
z += -0.01
def render_time_bars_callback(ws):
y_pos = 2.5
for tsg, color in zip(self._timed_subgoals, self._subgoal_colors):
if tsg is not None:
width = tsg.delta_t_ach*0.01
pyglet_utils.draw_box((0.16 + 0.5*width, y_pos), width, 0.1, 0., color)
y_pos -= 0.1
########################
# Renderer from Tennis2D
########################
if self._renderer is None:
self._subgoal_colors = get_default_subgoal_colors()
self._callbacks.append(render_racket_vel_callback)
self._callbacks.append(render_subgoal_callback)
self._callbacks.append(render_time_bars_callback)
renderer_config = RenderingConfig(self._visible_area_width,
self._visual_height)
renderer_config.window.width = 1920
renderer_config.window.height = 960
renderer_config.background_color = (1.0, 1.0, 1.0, 1.0)
renderer_config.ground_color = (0.702, 0.612, 0.51)
self._renderer = PygletRenderer(renderer_config,
self._robot_config,
self._ball_configs,
self._callbacks)
# render based on the information provided by
# the physics simulation and the desired goal
goals = [(
self._desired_goal[0]*(self._goal_max - self._goal_min) \
+ self._goal_min - 0.5*self._goal_diameter,
self._desired_goal[0]*(self._goal_max - self._goal_min) \
+ self._goal_min + 0.5*self._goal_diameter,
self._goal_color
)]
self._renderer.render(
world_state = self._world_state,
goals = goals,
time_step = self._slow_motion_factor*self._world_state.applied_time_step)
def compute_reward(self, achieved_goal, desired_goal, info):
if self.dense_reward == False:
if np.all(achieved_goal[1:] == desired_goal[1:]):
if abs((desired_goal[0] - achieved_goal[0])*(self._goal_max - self._goal_min)) <= 0.5*self._goal_diameter:
return 0.
return -1.
else:
if np.all(achieved_goal[1:] == desired_goal[1:]):
return (-min(abs((desired_goal[0] - achieved_goal[0])*(self._goal_max - self._goal_min)),
self._goal_max + self._goal_diameter - self._robot_config.position) +
self._goal_max + self._goal_diameter - self._robot_config.position)
return 0.
# part of observation depending only on env state and not on goal
def _get_env_observation(self):
ws = self._world_state
env_observation = {}
for index, joint in enumerate(ws.robots[0].joints):
env_observation["joint_" + str(index) + "_angle"] = np.clip([joint.angle/self._joint_factor[index]], -1., 1.)
env_observation["joint_" + str(index) + "_angular_vel"] = [joint.angular_velocity/self._max_angular_vel]
ball = ws.balls[0]
env_observation["ball_pos_x"] = [ball.position[0]/self._ball_guns[0].initial_pos_x]
env_observation["ball_pos_y"] = [ball.position[1]/self._ball_guns[0].initial_pos_x]
env_observation["ball_vel_x"] = [ball.linear_velocity[0]/self._ball_guns[0].speed_mean]
env_observation["ball_vel_y"] = [ball.linear_velocity[1]/self._ball_guns[0].speed_mean]
env_observation["ball_anguler_vel"] = [ball.angular_velocity/self._ball_guns[0].spin_std]
env_observation["ball_bounced_at_least_once"] = [int(self._n_ball_bounces >= 1)]
env_observation["ball_bouncing_second_time"] = [int(self._ball_bouncing_second_time)]
env_observation["ball_bounced_at_least_twice"] = [int(self._n_ball_bounces >= 2)]
for key, value in env_observation.items():
env_observation[key] = np.array(value)
return env_observation
def get_observation(self):
observation = self._get_env_observation()
if self.dense_reward == False:
result = {
"observation": observation,
"achieved_goal": np.array(self._get_achieved_goal()),
"desired_goal" : np.array(self._desired_goal)
}
return result
else:
observation["desired_landing_pos_x"] = self._desired_goal[0]
return observation
def map_to_achieved_goal(self, partial_obs):
pos_x = partial_obs["ball_pos_x"][0]*self._ball_guns[0].initial_pos_x
achieved_goal = [
[(pos_x - self._goal_min)/(self._goal_max - self._goal_min)],
partial_obs["ball_bounced_at_least_once"],
partial_obs["ball_bouncing_second_time"],
partial_obs["ball_bounced_at_least_twice"]]
return np.concatenate(achieved_goal)
def _get_robot_conf_and_vel(self, robot_state):
pass
class Tennis2DDenseRewardEnv(Tennis2DEnv):
"""Dense reward version of the 2D robotic toy tennis environment.
In contrast to the sparse reward version, the dense reward version of the
environment gives a constant reward to the agent when it hits the ball
and another one when the ball bounces on the ground for the second time after being hit
by the racket. The latter reward is proportional to the nagative distance
to the goal landing point."""
dense_reward = True
|
[
"roboball2d.rendering.pyglet_utils.draw_vector",
"numpy.clip",
"roboball2d.rendering.pyglet_utils.draw_box",
"roboball2d.physics.B2World",
"gym.utils.seeding.np_random",
"roboball2d.robot.DefaultRobotState",
"roboball2d.ball.BallConfig",
"pyglet.gl.glTranslatef",
"roboball2d.ball_gun.DefaultBallGun",
"roboball2d.rendering.RenderingConfig",
"pyglet.gl.glPushMatrix",
"numpy.all",
"pyglet.gl.glPopMatrix",
"numpy.concatenate",
"gym.spaces.Dict",
"roboball2d.robot.DefaultRobotConfig",
"roboball2d.rendering.PygletRenderer",
"numpy.array",
"gym.spaces.Box"
] |
[((2207, 2227), 'roboball2d.robot.DefaultRobotConfig', 'DefaultRobotConfig', ([], {}), '()\n', (2225, 2227), False, 'from roboball2d.robot import DefaultRobotConfig\n'), ((3020, 3183), 'roboball2d.physics.B2World', 'B2World', ([], {'robot_configs': 'self._robot_config', 'ball_configs': 'self._ball_configs', 'visible_area_width': 'self._visible_area_width', 'steps_per_sec': 'self._steps_per_sec'}), '(robot_configs=self._robot_config, ball_configs=self._ball_configs,\n visible_area_width=self._visible_area_width, steps_per_sec=self.\n _steps_per_sec)\n', (3027, 3183), False, 'from roboball2d.physics import B2World\n'), ((3563, 3717), 'roboball2d.robot.DefaultRobotState', 'DefaultRobotState', ([], {'robot_config': 'self._robot_config', 'generalized_coordinates': '[0.25 * np.pi, -0.5 * np.pi, 0.0]', 'generalized_velocities': '[0.0, 0.0, 0.0]'}), '(robot_config=self._robot_config, generalized_coordinates=\n [0.25 * np.pi, -0.5 * np.pi, 0.0], generalized_velocities=[0.0, 0.0, 0.0])\n', (3580, 3717), False, 'from roboball2d.robot import DefaultRobotState\n'), ((3964, 4024), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-1.0)', 'high': '(1.0)', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)\n', (3974, 4024), False, 'from gym import spaces\n'), ((4058, 4124), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float32)\n', (4068, 4124), False, 'from gym import spaces\n'), ((4156, 4215), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0.0)', 'high': '(1.0)', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=0.0, high=1.0, shape=(1,), dtype=np.float32)\n', (4166, 4215), False, 'from gym import spaces\n'), ((5341, 5368), 'gym.spaces.Dict', 'spaces.Dict', (['obs_space_dict'], {}), '(obs_space_dict)\n', (5352, 5368), False, 'from gym import spaces\n'), ((7174, 7201), 'gym.spaces.Dict', 'spaces.Dict', (['act_space_dict'], {}), '(act_space_dict)\n', (7185, 7201), False, 'from gym import spaces\n'), ((10915, 10938), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (10932, 10938), False, 'from gym.utils import seeding\n'), ((18353, 18382), 'numpy.concatenate', 'np.concatenate', (['achieved_goal'], {}), '(achieved_goal)\n', (18367, 18382), True, 'import numpy as np\n'), ((2355, 2367), 'roboball2d.ball.BallConfig', 'BallConfig', ([], {}), '()\n', (2365, 2367), False, 'from roboball2d.ball import BallConfig\n'), ((3380, 3417), 'roboball2d.ball_gun.DefaultBallGun', 'DefaultBallGun', (['self._ball_configs[0]'], {}), '(self._ball_configs[0])\n', (3394, 3417), False, 'from roboball2d.ball_gun import DefaultBallGun\n'), ((6400, 6535), 'gym.spaces.Dict', 'spaces.Dict', (["{'observation': self._preliminary_obs_space, 'desired_goal':\n desired_goal_space, 'achieved_goal': achieved_goal_space}"], {}), "({'observation': self._preliminary_obs_space, 'desired_goal':\n desired_goal_space, 'achieved_goal': achieved_goal_space})\n", (6411, 6535), False, 'from gym import spaces\n'), ((7851, 7929), 'numpy.clip', 'np.clip', (['joint.angular_velocity', '(-self._max_angular_vel)', 'self._max_angular_vel'], {}), '(joint.angular_velocity, -self._max_angular_vel, self._max_angular_vel)\n', (7858, 7929), True, 'import numpy as np\n'), ((11630, 11813), 'roboball2d.rendering.pyglet_utils.draw_vector', 'pyglet_utils.draw_vector', ([], {'initial_point': 'ws.robot.racket.position', 'vector': 'scaled_vector', 'width': 'self._arrow_width', 'arrow_head_size': 'self._arrow_head_size', 'color': '(0.8, 0.8, 0.8)'}), '(initial_point=ws.robot.racket.position, vector=\n scaled_vector, width=self._arrow_width, arrow_head_size=self.\n _arrow_head_size, color=(0.8, 0.8, 0.8))\n', (11654, 11813), True, 'import roboball2d.rendering.pyglet_utils as pyglet_utils\n'), ((14087, 14149), 'roboball2d.rendering.RenderingConfig', 'RenderingConfig', (['self._visible_area_width', 'self._visual_height'], {}), '(self._visible_area_width, self._visual_height)\n', (14102, 14149), False, 'from roboball2d.rendering import RenderingConfig\n'), ((14454, 14546), 'roboball2d.rendering.PygletRenderer', 'PygletRenderer', (['renderer_config', 'self._robot_config', 'self._ball_configs', 'self._callbacks'], {}), '(renderer_config, self._robot_config, self._ball_configs,\n self._callbacks)\n', (14468, 14546), False, 'from roboball2d.rendering import PygletRenderer\n'), ((15434, 15479), 'numpy.all', 'np.all', (['(achieved_goal[1:] == desired_goal[1:])'], {}), '(achieved_goal[1:] == desired_goal[1:])\n', (15440, 15479), True, 'import numpy as np\n'), ((15686, 15731), 'numpy.all', 'np.all', (['(achieved_goal[1:] == desired_goal[1:])'], {}), '(achieved_goal[1:] == desired_goal[1:])\n', (15692, 15731), True, 'import numpy as np\n'), ((16341, 16402), 'numpy.clip', 'np.clip', (['[joint.angle / self._joint_factor[index]]', '(-1.0)', '(1.0)'], {}), '([joint.angle / self._joint_factor[index]], -1.0, 1.0)\n', (16348, 16402), True, 'import numpy as np\n'), ((17378, 17393), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (17386, 17393), True, 'import numpy as np\n'), ((12382, 12533), 'roboball2d.robot.DefaultRobotState', 'DefaultRobotState', ([], {'robot_config': 'self._robot_config', 'generalized_coordinates': 'generalized_coordinates', 'generalized_velocities': 'generalized_velocities'}), '(robot_config=self._robot_config, generalized_coordinates=\n generalized_coordinates, generalized_velocities=generalized_velocities)\n', (12399, 12533), False, 'from roboball2d.robot import DefaultRobotState\n'), ((12876, 12893), 'pyglet.gl.glPushMatrix', 'gl.glPushMatrix', ([], {}), '()\n', (12891, 12893), True, 'import pyglet.gl as gl\n'), ((12910, 12938), 'pyglet.gl.glTranslatef', 'gl.glTranslatef', (['(0.0)', '(0.0)', 'z'], {}), '(0.0, 0.0, z)\n', (12925, 12938), True, 'import pyglet.gl as gl\n'), ((12953, 13129), 'roboball2d.rendering.pyglet_utils.draw_vector', 'pyglet_utils.draw_vector', ([], {'initial_point': 'robot_state.racket.position', 'vector': 'scaled_vector', 'width': 'self._arrow_width', 'arrow_head_size': 'self._arrow_head_size', 'color': 'color'}), '(initial_point=robot_state.racket.position, vector=\n scaled_vector, width=self._arrow_width, arrow_head_size=self.\n _arrow_head_size, color=color)\n', (12977, 13129), True, 'import roboball2d.rendering.pyglet_utils as pyglet_utils\n'), ((13271, 13287), 'pyglet.gl.glPopMatrix', 'gl.glPopMatrix', ([], {}), '()\n', (13285, 13287), True, 'import pyglet.gl as gl\n'), ((17734, 17762), 'numpy.array', 'np.array', (['self._desired_goal'], {}), '(self._desired_goal)\n', (17742, 17762), True, 'import numpy as np\n'), ((6012, 6046), 'numpy.array', 'np.array', (['[-np.inf, 0.0, 0.0, 0.0]'], {}), '([-np.inf, 0.0, 0.0, 0.0])\n', (6020, 6046), True, 'import numpy as np\n'), ((6073, 6106), 'numpy.array', 'np.array', (['[np.inf, 1.0, 1.0, 1.0]'], {}), '([np.inf, 1.0, 1.0, 1.0])\n', (6081, 6106), True, 'import numpy as np\n'), ((13567, 13641), 'roboball2d.rendering.pyglet_utils.draw_box', 'pyglet_utils.draw_box', (['(0.16 + 0.5 * width, y_pos)', 'width', '(0.1)', '(0.0)', 'color'], {}), '((0.16 + 0.5 * width, y_pos), width, 0.1, 0.0, color)\n', (13588, 13641), True, 'import roboball2d.rendering.pyglet_utils as pyglet_utils\n')]
|
from typing import Optional, Union, List, Tuple
import os
import cv2 as cv
import numpy as np
from PySide6.QtWidgets import QLayout, QLabel, QWidget, QGridLayout
from PySide6.QtGui import QImage, QMouseEvent, QCloseEvent, QResizeEvent, QMoveEvent, QPixmap
from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal
from .ui_editor_window import Ui_EditorWindow
from .preview_window import PreviewWindow
from .cluster_image_entry import ClusterImageEntry
from .layer_image_entry import LayerImageEntry
from .layer_data import LayerData
from .utils import load_image, array2d_to_pixmap, fit_to_frame, create_cluster
class CLusterPreviewWindow(QWidget):
"""
Extends QWidget. Floating window next to ClusterEditor showing the current cluster state.
"""
def __init__(self, parent: Optional[QWidget] = None, size: QSize = QSize(600, 600), image: Optional[QImage] = None):
super(CLusterPreviewWindow, self).__init__(parent)
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
self.resize(size)
self.imageLabel = QLabel("Cluster Preview", self)
self.imageLabel.setAlignment(Qt.AlignCenter)
layout = QGridLayout(self)
layout.addWidget(self.imageLabel)
if image is not None:
self.__update_cluster_preview(QPixmap.fromImage(image))
def __update_cluster_preview(self, image: QPixmap) -> None:
self.imageLabel.setPixmap(fit_to_frame(image, QSize(self.width(), self.height())))
def update_cluster_preview(self, image: Union[np.ndarray, str]) -> None:
"""
Load an image from a string or an array and update the cluster preview.
:param image: Can be both a numpy array and a string.
"""
if isinstance(image, np.ndarray):
self.__update_cluster_preview(array2d_to_pixmap(image, normalize=True, colormap=cv.COLORMAP_JET))
return
if isinstance(image, str):
self.__update_cluster_preview(QPixmap.fromImage(QImage(image)))
return
raise ValueError("Invalid image type: {}".format(type(image)))
class ClusterEditor(PreviewWindow):
"""
Extends PreviewWindow. The window that allows editing of the clusters.
"""
applied_to_all = Signal(list)
def __init__(self, parent: Optional[QWidget], calling_image_entry: ClusterImageEntry):
super(ClusterEditor, self).__init__(parent)
self.ui = Ui_EditorWindow()
self.ui.setupUi(self)
self.ui.mergeButton.clicked.connect(self.merge)
self.ui.applyButton.clicked.connect(self.apply_to_all)
self.ui.resetButton.clicked.connect(self.reset)
# self.ui.unmergeButton.clicked.connect(self.unmerge)
self.ui.undoButton.clicked.connect(self.undo)
self._source_image_entries: List[LayerImageEntry] = []
self._selected_image_entry: Optional[LayerImageEntry] = None
self.__cluster_image_entry: ClusterImageEntry = calling_image_entry
self.__pending_mergers: List[List[int]] = []
self.__pending_ime: List[LayerImageEntry] = []
self.__old_entries: List[List[LayerImageEntry]] = []
self.__cluster_array: np.ndarray = np.load(self.__cluster_image_entry.array_path)
side_length = self.height() - self.menuBar().height()
self.__cluster_preview_window = CLusterPreviewWindow(self, QSize(side_length, side_length),
load_image(self.__cluster_image_entry.image_path))
# self.cluster_preview_window.show()
# first = True
for i in range(self.__cluster_image_entry.layer_count()):
layer_data = self.__cluster_image_entry.get_layer_data(i)
array = np.load(layer_data.array_path)
qim: QImage = load_image(layer_data.image_path)
ime = LayerImageEntry(self, qim, array, layer_data.name(), is_merger=layer_data.is_merger,
layer_index=layer_data.layer_index, parent_layers=layer_data.parent_layers)
ime.mouse_pressed.connect(self.image_entry_click_handler)
ime.state_changed.connect(self.change_merge_button_state)
self.add_source_image_entry(ime)
# if first:
# self.set_preview_image(qim, ime)
# first = False
def source_layout(self) -> QLayout:
return self.ui.scrollAreaLayersContents.layout()
def image_preview(self) -> QLabel:
return self.ui.imageLabel
@Slot(LayerImageEntry, QMouseEvent)
def image_entry_click_handler(self, sender: LayerImageEntry, event: QMouseEvent) -> None:
assert type(sender) == LayerImageEntry
self.set_preview_image(array2d_to_pixmap(sender.array, normalize=True).toImage(), sender)
def resizeEvent(self, event: QResizeEvent) -> None:
if self._selected_image_entry is None:
return
self.draw_preview_image(array2d_to_pixmap(self._selected_image_entry.array, normalize=True).toImage())
def moveEvent(self, event: QMoveEvent) -> None:
position = event.pos()
self.__cluster_preview_window.move(position - QPoint(self.__cluster_preview_window.width(), 0))
if self.__cluster_preview_window.isHidden():
self.__cluster_preview_window.show()
def closeEvent(self, event: QCloseEvent) -> None:
self.__cluster_preview_window.close()
self.__cluster_preview_window = None
def __pending_add(self, mergers_idx: List[int], ime: LayerImageEntry, old_entries: List[LayerImageEntry]) -> None:
"""
Add the result of a merge to the pending list, and store the merged layers to be able to undo the merge.
:param mergers_idx: Indices of the layers to merge.
:type mergers_idx: list of int
:param LayerImageEntry ime: The newly merged image entry.
:param old_entries: A list of the layers used to generate the merged layer.
:type old_entries: list of LayerImageEntry
"""
if not self.ui.undoButton.isEnabled():
self.ui.undoButton.setEnabled(True)
self.__pending_mergers.append(mergers_idx)
self.__pending_ime.append(ime)
self.__old_entries.append(old_entries)
def __pending_clear(self) -> None:
"""
Deletes the mergers that haven't been applied yet.
"""
self.ui.undoButton.setEnabled(False)
self.__pending_mergers.clear()
self.__pending_ime.clear()
self.__old_entries.clear()
def __pending_count(self) -> int:
n = len(self.__pending_mergers)
assert n == len(self.__pending_ime) == len(self.__old_entries)
return n
def __pending_pop(self) -> Tuple[List[int], LayerImageEntry, List[LayerImageEntry]]:
"""
Gives a tuple of the last merged indices, image entry of the merged layers and the list of image entries
used to generate the merger.
:rtype: (list of int, LayerImageEntry, list of LayerImageEntry)
"""
if self.__pending_count() == 1:
self.ui.undoButton.setEnabled(False)
return self.__pending_mergers.pop(), self.__pending_ime.pop(), self.__old_entries.pop()
@Slot(int)
def change_merge_button_state(self, state: int) -> None:
if not state == Qt.CheckState.Checked:
self.ui.mergeButton.setEnabled(True)
return
if len(self.get_selected_entries()) == len(self._source_image_entries):
self.ui.mergeButton.setEnabled(False)
@Slot()
def merge(self) -> None:
"""
Merge the selected layers only in the current view. Update the cluster preview with the newly merged cluster.
"""
if len(self.get_selected_entries()) < 2:
return
checked_indices: List[int] = []
old_ime: List[LayerImageEntry] = []
merger: Optional[np.ndarray] = None
parent_layers: List[int] = []
for index, ime in enumerate(self._source_image_entries):
if not ime.isChecked():
continue
if ime.layer_data.is_merger:
assert ime.layer_data.parent_layers is not None
parent_layers.extend(ime.layer_data.parent_layers)
else:
assert ime.layer_data.layer_index is not None
parent_layers.append(ime.layer_data.layer_index)
checked_indices.append(index)
old_ime.append(ime)
merger = self._source_image_entries[index].array if merger is None else merger | self._source_image_entries[
index].array
ime.setChecked(False)
ime.close()
for i in sorted(checked_indices, reverse=True):
self._source_image_entries.pop(i)
self.__cluster_array = create_cluster([ime.array for ime in self._source_image_entries])
self.__cluster_preview_window.update_cluster_preview(self.__cluster_array)
qim: QImage = array2d_to_pixmap(merger, normalize=True).toImage()
merged_ime = LayerImageEntry(self, qim, merger, f"m {LayerData.indices2str(parent_layers)}",
is_merger=True, parent_layers=parent_layers)
merged_ime.mouse_pressed.connect(self.image_entry_click_handler)
merged_ime.state_changed.connect(self.change_merge_button_state)
self.__pending_add(checked_indices, merged_ime, old_ime)
self.set_preview_image(qim, merged_ime)
self.add_source_image_entry(merged_ime)
self.change_all_entries_check_state(False)
@Slot()
def apply_to_all(self) -> None:
"""
Send a merge signal for each pending merge.
"""
for merger in self.__pending_mergers:
self.applied_to_all.emit(merger)
self.__pending_clear()
@Slot()
def reset(self) -> None:
"""
Removes all uncommitted changes done in the editor.
"""
if len(self.__pending_mergers) == 0:
return
self.__pending_clear()
for ime in self._source_image_entries:
ime.close()
self._source_image_entries.clear()
self.__cluster_array = np.load(self.__cluster_image_entry.array_path)
self.__cluster_preview_window.update_cluster_preview(self.__cluster_array)
for i in range(self.__cluster_image_entry.layer_count()):
layer_data = self.__cluster_image_entry.get_layer_data(i)
array = np.load(layer_data.array_path)
qim: QImage = load_image(layer_data.image_path)
ime = LayerImageEntry(self, qim, array, layer_data.name(), layer_data.is_merger, layer_data.layer_index,
layer_data.parent_layers)
ime.mouse_pressed.connect(self.image_entry_click_handler)
ime.state_changed.connect(self.change_merge_button_state)
self.add_source_image_entry(ime)
if i == 0:
self.set_preview_image(load_image(layer_data.image_path), ime)
self.change_all_entries_check_state(False)
@Slot()
def unmerge(self) -> None:
"""
Unmerge the selected layers in the editor. Global behavior not implemented.
"""
for index, ime in enumerate(self._source_image_entries):
if not ime.isChecked() or not ime.layer_data.is_merger:
continue
self._source_image_entries.pop(index)
assert ime.layer_data.parent_layers is not None
for parent_layer_index in ime.layer_data.parent_layers.copy():
directory = os.path.dirname(self.__cluster_image_entry.image_path)
path_no_ext = os.path.join(directory,
f"{self.__cluster_image_entry.basename}_layer_{parent_layer_index}")
image_path = f"{path_no_ext}.png"
array_path = f"{path_no_ext}.npy"
parent_ime = LayerImageEntry(self, load_image(image_path), np.load(array_path), str(parent_layer_index),
layer_index=parent_layer_index)
parent_ime.mouse_pressed.connect(self.image_entry_click_handler)
parent_ime.state_changed.connect(self.change_merge_button_state)
self.add_source_image_entry(parent_ime)
ime.close()
@Slot()
def undo(self) -> None:
"""
Go one step back.
"""
if self.__pending_count() == 0:
return
indices, pending_ime, old_ime = self.__pending_pop()
self._source_image_entries.pop()
pending_ime.close()
for index, ime in zip(indices, old_ime):
ime.setVisible(True)
self.add_source_image_entry(ime, index)
self.image_preview().setText("Layer")
self.__cluster_preview_window.update_cluster_preview(self.__cluster_image_entry.image_path)
self.change_all_entries_check_state(False)
|
[
"numpy.load",
"os.path.dirname",
"PySide6.QtWidgets.QLabel",
"PySide6.QtGui.QImage",
"PySide6.QtCore.Signal",
"PySide6.QtCore.QSize",
"PySide6.QtGui.QPixmap.fromImage",
"PySide6.QtCore.Slot",
"PySide6.QtWidgets.QGridLayout",
"os.path.join"
] |
[((2253, 2265), 'PySide6.QtCore.Signal', 'Signal', (['list'], {}), '(list)\n', (2259, 2265), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((4502, 4536), 'PySide6.QtCore.Slot', 'Slot', (['LayerImageEntry', 'QMouseEvent'], {}), '(LayerImageEntry, QMouseEvent)\n', (4506, 4536), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((7208, 7217), 'PySide6.QtCore.Slot', 'Slot', (['int'], {}), '(int)\n', (7212, 7217), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((7530, 7536), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (7534, 7536), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((9575, 9581), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (9579, 9581), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((9822, 9828), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (9826, 9828), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((11085, 11091), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (11089, 11091), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((12370, 12376), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (12374, 12376), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((839, 854), 'PySide6.QtCore.QSize', 'QSize', (['(600)', '(600)'], {}), '(600, 600)\n', (844, 854), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((1065, 1096), 'PySide6.QtWidgets.QLabel', 'QLabel', (['"""Cluster Preview"""', 'self'], {}), "('Cluster Preview', self)\n", (1071, 1096), False, 'from PySide6.QtWidgets import QLayout, QLabel, QWidget, QGridLayout\n'), ((1168, 1185), 'PySide6.QtWidgets.QGridLayout', 'QGridLayout', (['self'], {}), '(self)\n', (1179, 1185), False, 'from PySide6.QtWidgets import QLayout, QLabel, QWidget, QGridLayout\n'), ((3188, 3234), 'numpy.load', 'np.load', (['self.__cluster_image_entry.array_path'], {}), '(self.__cluster_image_entry.array_path)\n', (3195, 3234), True, 'import numpy as np\n'), ((10186, 10232), 'numpy.load', 'np.load', (['self.__cluster_image_entry.array_path'], {}), '(self.__cluster_image_entry.array_path)\n', (10193, 10232), True, 'import numpy as np\n'), ((3365, 3396), 'PySide6.QtCore.QSize', 'QSize', (['side_length', 'side_length'], {}), '(side_length, side_length)\n', (3370, 3396), False, 'from PySide6.QtCore import Slot, QSize, QPoint, Qt, Signal\n'), ((3735, 3765), 'numpy.load', 'np.load', (['layer_data.array_path'], {}), '(layer_data.array_path)\n', (3742, 3765), True, 'import numpy as np\n'), ((10473, 10503), 'numpy.load', 'np.load', (['layer_data.array_path'], {}), '(layer_data.array_path)\n', (10480, 10503), True, 'import numpy as np\n'), ((1301, 1325), 'PySide6.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['image'], {}), '(image)\n', (1318, 1325), False, 'from PySide6.QtGui import QImage, QMouseEvent, QCloseEvent, QResizeEvent, QMoveEvent, QPixmap\n'), ((11603, 11657), 'os.path.dirname', 'os.path.dirname', (['self.__cluster_image_entry.image_path'], {}), '(self.__cluster_image_entry.image_path)\n', (11618, 11657), False, 'import os\n'), ((11688, 11784), 'os.path.join', 'os.path.join', (['directory', 'f"""{self.__cluster_image_entry.basename}_layer_{parent_layer_index}"""'], {}), "(directory,\n f'{self.__cluster_image_entry.basename}_layer_{parent_layer_index}')\n", (11700, 11784), False, 'import os\n'), ((1995, 2008), 'PySide6.QtGui.QImage', 'QImage', (['image'], {}), '(image)\n', (2001, 2008), False, 'from PySide6.QtGui import QImage, QMouseEvent, QCloseEvent, QResizeEvent, QMoveEvent, QPixmap\n'), ((11999, 12018), 'numpy.load', 'np.load', (['array_path'], {}), '(array_path)\n', (12006, 12018), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from tensorboardX import SummaryWriter
from tqdm import tqdm
import argparse
import config
from data_gen import TextMelLoader, TextMelCollate
from taco2models.loss_function import Tacotron2Loss
from taco2models.models import Tacotron2
from taco2models.optimizer import Tacotron2Optimizer
from utils_1 import save_checkpoint, AverageMeter, get_logger, test
import os
def train_net(args):
torch.manual_seed(7)
np.random.seed(7)
checkpoint = args.checkpoint
start_epoch = 0
best_loss = float('inf')
writer = SummaryWriter()
steps_since_improvement = 0
# Initialize / load checkpoint
if checkpoint is None:
print('Training from scratch ...')
# model
model = Tacotron2(config)
print(model)
# model = nn.DataParallel(model)
# optimizer
optimizer = Tacotron2Optimizer(
torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2, betas=(0.9, 0.999), eps=1e-6))
else:
print('Loading model:{}'.format(checkpoint))
load_mode = args.load_type
if load_mode == 'dict':
checkpoint = torch.load(checkpoint)
model = checkpoint['model']
start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step'] + 1
steps_since_improvement = checkpoint['steps_since_improvement']
optimizer = checkpoint['optimizer']
model = checkpoint['model']
best_loss = checkpoint['loss']
if best_loss < 0.4:
# 为了防止loss由于best_loss太低导致 loss一直无法下降到best_loss而导致best_checkpoint存不下来
best_loss = 0.4
else:
checkpoint = torch.load(checkpoint)
model = Tacotron2(config)
model.load_state_dict(checkpoint)
optimizer = Tacotron2Optimizer(
torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2, betas=(0.9, 0.999), eps=1e-6))
print(model)
logger = get_logger()
print(f'learning rate is',optimizer.lr)
model = model.to(config.device)
criterion = Tacotron2Loss()
collate_fn = TextMelCollate(config.n_frames_per_step)
# Custom dataloaders
if args.dataset == 'biaobei':
training_files = args.dataset + '_filelist/' + args.dataset + '_audio_text_train_filelist.txt'
validation_files = args.dataset + '_filelist/' + args.dataset + '_audio_text_valid_filelist.txt'
else:
training_files = args.dataset + '_filelist/' + args.dataset + '_audio_text_train_filelist.json'
validation_files = args.dataset + '_filelist/' + args.dataset + '_audio_text_valid_filelist.json'
train_dataset = TextMelLoader(training_files, config, dataset=args.dataset)
print('batch size is ', args.batch_size)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, collate_fn=collate_fn,
pin_memory=True, shuffle=True, num_workers=args.num_workers)
print(f'loaded dataset from {training_files}')
valid_dataset = TextMelLoader(validation_files, config, dataset=args.dataset)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, collate_fn=collate_fn,
pin_memory=True, shuffle=False, num_workers=args.num_workers)
print(f'loaded dataset from {validation_files}')
# Epochs
for epoch in range(start_epoch, args.epochs):
# One epoch's training
losses = AverageMeter()
for i, batch in enumerate(train_loader):
model.train()
model.zero_grad()
x, y = model.parse_batch(batch)
# Forward prop.
y_pred = model(x)
# loss
loss = criterion(y_pred, y)
# Back prop.
optimizer.zero_grad()
loss.backward()
# Update weights
optimizer.step()
# Keep track of metrics
losses.update(loss.item())
torch.cuda.empty_cache()
writer.add_scalar('model/train_loss', losses.val, optimizer.step_num)
# Print status
if i % args.print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(epoch, i, len(train_loader), loss=losses))
# validation
if i % config.validation_steps == 0 and i != 0:
valid_losses = AverageMeter()
model.eval()
lr = optimizer.lr
step_num = optimizer.step_num
print('\nLearning rate: {}'.format(lr))
writer.add_scalar('model/learning_rate', lr, step_num)
print('Step num: {}\n'.format(step_num))
with torch.no_grad():
for batch in valid_loader:
model.zero_grad()
x, y = model.parse_batch(batch)
# Forward prop.
y_pred = model(x)
loss = criterion(y_pred, y)
# Keep track of metrics
valid_losses.update(loss.item())
valid_loss = valid_losses.avg
writer.add_scalar('model/valid_loss', valid_loss, step_num)
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Validation Loss {loss:.4f}'.format(epoch, i, len(train_loader), loss=valid_loss))
# Check if there was an improvement
is_best = valid_loss < best_loss
best_loss = min(valid_loss, best_loss)
if not is_best:
steps_since_improvement += config.validation_steps
print("\nSteps since last improvement: %d\n" % (steps_since_improvement,))
else:
steps_since_improvement = 0
# saving checkpoint and update the best checkpoint
save_checkpoint(epoch, step_num, steps_since_improvement, model, optimizer, best_loss, is_best, dataset=args.dataset,trial_type=args.trial_type)
# drawing alignment
img_align = test(model, step_num, valid_loss)
writer.add_image('model/alignment', img_align, step_num, dataformats='HWC')
def parse_args():
parser = argparse.ArgumentParser(description='Tacotron2')
parser.add_argument('--epochs', default=10000, type=int)
parser.add_argument('--max_norm', default=1, type=float, help='Gradient norm threshold to clip')
# trial type
parser.add_argument('--trial_type', type=str, default='new', help='new vaocal dict or old vaocal dict')
parser.add_argument('--load_type', type=str, default='dict', help='method to load model')
# dataset
parser.add_argument('--dataset', type=str, default='aixia', help='name of dataset')
# minibatch
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num-workers', default=4, type=int, help='Number of workers to generate minibatch')
# logging
parser.add_argument('--print_freq', default=10, type=int, help='Frequency of printing training information')
# optimizer
parser.add_argument('--lr', default=1e-3, type=float, help='Init learning rate')
parser.add_argument('--l2', default=1e-6, type=float, help='weight decay (L2)')
parser.add_argument('--checkpoint', type=str, default='biaobei_checkpoints/biaobei.tar', help='checkpoint')
args = parser.parse_args()
return args
def main():
global args
args = parse_args()
train_net(args)
if __name__ == '__main__':
main()
|
[
"tensorboardX.SummaryWriter",
"numpy.random.seed",
"utils_1.get_logger",
"torch.utils.data.DataLoader",
"argparse.ArgumentParser",
"utils_1.AverageMeter",
"torch.manual_seed",
"torch.load",
"taco2models.loss_function.Tacotron2Loss",
"data_gen.TextMelLoader",
"torch.cuda.empty_cache",
"utils_1.save_checkpoint",
"taco2models.models.Tacotron2",
"utils_1.test",
"torch.no_grad",
"data_gen.TextMelCollate"
] |
[((426, 446), 'torch.manual_seed', 'torch.manual_seed', (['(7)'], {}), '(7)\n', (443, 446), False, 'import torch\n'), ((451, 468), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (465, 468), True, 'import numpy as np\n'), ((564, 579), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (577, 579), False, 'from tensorboardX import SummaryWriter\n'), ((2013, 2025), 'utils_1.get_logger', 'get_logger', ([], {}), '()\n', (2023, 2025), False, 'from utils_1 import save_checkpoint, AverageMeter, get_logger, test\n'), ((2124, 2139), 'taco2models.loss_function.Tacotron2Loss', 'Tacotron2Loss', ([], {}), '()\n', (2137, 2139), False, 'from taco2models.loss_function import Tacotron2Loss\n'), ((2158, 2198), 'data_gen.TextMelCollate', 'TextMelCollate', (['config.n_frames_per_step'], {}), '(config.n_frames_per_step)\n', (2172, 2198), False, 'from data_gen import TextMelLoader, TextMelCollate\n'), ((2712, 2771), 'data_gen.TextMelLoader', 'TextMelLoader', (['training_files', 'config'], {'dataset': 'args.dataset'}), '(training_files, config, dataset=args.dataset)\n', (2725, 2771), False, 'from data_gen import TextMelLoader, TextMelCollate\n'), ((2837, 3000), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'collate_fn': 'collate_fn', 'pin_memory': '(True)', 'shuffle': '(True)', 'num_workers': 'args.num_workers'}), '(train_dataset, batch_size=args.batch_size,\n collate_fn=collate_fn, pin_memory=True, shuffle=True, num_workers=args.\n num_workers)\n', (2864, 3000), False, 'import torch\n'), ((3110, 3171), 'data_gen.TextMelLoader', 'TextMelLoader', (['validation_files', 'config'], {'dataset': 'args.dataset'}), '(validation_files, config, dataset=args.dataset)\n', (3123, 3171), False, 'from data_gen import TextMelLoader, TextMelCollate\n'), ((3191, 3355), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': 'args.batch_size', 'collate_fn': 'collate_fn', 'pin_memory': '(True)', 'shuffle': '(False)', 'num_workers': 'args.num_workers'}), '(valid_dataset, batch_size=args.batch_size,\n collate_fn=collate_fn, pin_memory=True, shuffle=False, num_workers=args\n .num_workers)\n', (3218, 3355), False, 'import torch\n'), ((6474, 6522), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tacotron2"""'}), "(description='Tacotron2')\n", (6497, 6522), False, 'import argparse\n'), ((750, 767), 'taco2models.models.Tacotron2', 'Tacotron2', (['config'], {}), '(config)\n', (759, 767), False, 'from taco2models.models import Tacotron2\n'), ((3558, 3572), 'utils_1.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3570, 3572), False, 'from utils_1 import save_checkpoint, AverageMeter, get_logger, test\n'), ((1160, 1182), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (1170, 1182), False, 'import torch\n'), ((1710, 1732), 'torch.load', 'torch.load', (['checkpoint'], {}), '(checkpoint)\n', (1720, 1732), False, 'import torch\n'), ((1753, 1770), 'taco2models.models.Tacotron2', 'Tacotron2', (['config'], {}), '(config)\n', (1762, 1770), False, 'from taco2models.models import Tacotron2\n'), ((4076, 4100), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4098, 4100), False, 'import torch\n'), ((4552, 4566), 'utils_1.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4564, 4566), False, 'from utils_1 import save_checkpoint, AverageMeter, get_logger, test\n'), ((6107, 6256), 'utils_1.save_checkpoint', 'save_checkpoint', (['epoch', 'step_num', 'steps_since_improvement', 'model', 'optimizer', 'best_loss', 'is_best'], {'dataset': 'args.dataset', 'trial_type': 'args.trial_type'}), '(epoch, step_num, steps_since_improvement, model, optimizer,\n best_loss, is_best, dataset=args.dataset, trial_type=args.trial_type)\n', (6122, 6256), False, 'from utils_1 import save_checkpoint, AverageMeter, get_logger, test\n'), ((6316, 6349), 'utils_1.test', 'test', (['model', 'step_num', 'valid_loss'], {}), '(model, step_num, valid_loss)\n', (6320, 6349), False, 'from utils_1 import save_checkpoint, AverageMeter, get_logger, test\n'), ((4881, 4896), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4894, 4896), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 09:28:40 2020
@author: yo
Función auxiliar para calcular el RSE
"""
import numpy as np
def calc_rse(valores,prediccion):
return(sum(valores-prediccion)**2/sum((valores-np.mean(valores))**2))
|
[
"numpy.mean"
] |
[((226, 242), 'numpy.mean', 'np.mean', (['valores'], {}), '(valores)\n', (233, 242), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import glob
import cv2
import numpy as np
import torch
import architecture as arch
import argparse
import warnings
import time
import sys
try:
import tqdm
except ImportError:
pass
from pathlib import Path
from chunks import DataChunks
model_docs = {
"RRDB_ESRGAN_x4.pth": "Official perceptual upscaling model.",
"RRDB_PSNR_x4.pth": "Official PSNR upscaling model.",
"4x_interp_08.pth": "RRDB_PSNR_x4 interpolated with RRDB_ESRGAN_x4 with 0.8 strength.",
"4x_interp_09.pth": "RRDB_PSNR_x4 interpolated with RRDB_ESRGAN_x4 with 0.9 strength.",
"4x_Box.pth": "General purpose upscaling. Larger dataset than RRDB_ESRGAN_x4.",
"4x_NickelbackFS_72000_G.pth": "General purpose upscaling. Larger dataset than Box.",
"4x_Misc_220000.pth": "Surface upscaling. Works well as general/manga upscaler too.",
"4x_Faces_N_250000.pth": "Face upscaling.",
"4x_face_focus_275k.pth": "Face deblurring and upscaling.",
"4x_Fatality_01_265000_G.pth": "Upscales pixel art.",
"4x_rebout_325k.pth": "Upscales pixel art. Trained on KOF94 Rebout.",
"4x_rebout_interp.pth": "Upscales pixel art. Trained on KOF94 Rebout. Interped.",
"4x_falcoon300.pth": "Manga upscaling. Removes dithering.",
"4x_unholy03.pth": "Manga upscaling. Interpolation of many models.",
"4x_WaifuGAN_v3_30000.pth": "Manga upscaling.",
"4x_Manga109Attempt.pth": "Manga upscaling.",
"4x_ESRGAN_Skyrim_NonTiled_Alpha_NN_128_32_105000.pth": "Upscales greyscale maps. Trained on Skyrim textures.",
"4x_detoon_225k.pth": "Tries to make toon images realistic.",
"4x_detoon_alt.pth": "Tries to make toon images realistic. Softer version.",
"4x_Lady0101_208000.pth": "Upscaled pixel art to painting style. Original version.",
"4x_Lady0101_v3_340000.pth": "Upscaled pixel art to painting style. Moderate blendering, moderate dedithering.",
"4x_Lady0101_v3_blender.pth": "Upscaled pixel art to painting style. Heavy blending, low dedithering.",
"4x_scalenx_90k.pth": "Upscales pixel art in scalenx style.",
"4x_xbrz_90k.pth": "Upscales pixel art in xbr style. No dedithering.",
"4x_xbrdd_90k.pth": "Upscales pixel art in xbr style. Dedithering.",
"1x_JPEG_00-20.pth": "Cleans up JPEG compression. For images with 0-20%% compression ratio.",
"1x_JPEG_20-40.pth": "Cleans up JPEG compression. For images with 20-40%% compression ratio.",
"1x_JPEG_40-60.pth": "Cleans up JPEG compression. For images with 40-60%% compression ratio.",
"1x_JPEG_60-80.pth": "Cleans up JPEG compression. For images with 60-80%% compression ratio.",
"1x_JPEG_80-100.pth": "Cleans up JPEG compression. For images with 80-100%% compression ratio.",
"1x_DeJpeg_Fatality_PlusULTRA_200000_G.pth": "Cleans up JPEG compression. Any compression ratio. Increases contrast and sharpness.",
"1x_BC1_take2_260850.pth": "Cleans up BC1 compression. Restricted version (only works with low-noise images).",
"1x_BC1NoiseAgressiveTake3_400000_G.pth": "Cleans up BC1 compression. Free version (more aggressive than restricted).",
"1x_cinepak_200000.pth": "Cleans up Cinepak, msvideo1 and Roq compression.",
"1x_DeSharpen.pth": "Removes over-sharpening.",
"1x_normals_generator_general_215k.pth": "Attempts to generate a normal map from a texture.",
"1x_Alias_200000_G.pth": "Performs anti-aliasing on the image.",
"1x_SSAntiAlias9x.pth": "Performs anti-aliasing on the image. Newer.",
"1x_DEDITHER_32_512_126900_G.pth": "Tries to remove dithering patterns.",
"1x_BS_Debandizer_34000G.pth": "Tries to remove banding.",
}
aliases = {
"esrgan": "RRDB_ESRGAN_x4.pth",
"psnr": "RRDB_PSNR_x4.pth",
"0.8": "4x_interp_08.pth",
"0.9": "4x_interp_09.pth",
"desharpen": "1x_DeSharpen.pth",
"deblur": "1x_Fatality_DeBlur_275000_G.pth",
"jpeg20": "1x_JPEG_00-20.pth",
"jpeg40": "1x_JPEG_20-40.pth",
"jpeg60": "1x_JPEG_40-60.pth",
"jpeg80": "1x_JPEG_60-80.pth",
"jpeg100": "1x_JPEG_80-100.pth",
"jpegF": "1x_DeJpeg_Fatality_PlusULTRA_200000_G.pth",
"box": "4x_Box.pth",
"nickelback": "4x_NickelbackFS_72000_G.pth",
"misc": "4x_Misc_220000.pth",
"facefocus": "4x_face_focus_275k.pth",
"face": "4x_Faces_N_250000.pth",
"fatality": "4x_Fatality_01_265000_G.pth",
"unholy": "4x_unholy03.pth",
"waifugan": "4x_WaifuGAN_v3_30000.pth",
"manga109": "4x_Manga109Attempt.pth",
"falcoon": "4x_falcoon300.pth",
"rebout": "4x_rebout_325k.pth",
"rebouti": "4x_rebout_interp.pth",
"detoon": "4x_detoon_225k.pth",
"detoon_alt": "4x_detoon_alt.pth",
"bc1r": "1x_BC1_take2_260850.pth",
"bc1f": "1x_BC1NoiseAgressiveTake3_400000_G.pth",
"aa": "1x_Alias_200000_G.pth",
"aa2": "1x_SSAntiAlias9x.pth",
"dedither": "1x_DEDITHER_32_512_126900_G.pth",
"deband": "1x_BS_Debandizer_34000G.pth",
"alpha": "4x_ESRGAN_Skyrim_NonTiled_Alpha_NN_128_32_105000.pth",
"ladyold": "4x_Lady0101_208000.pth",
"ladyblend": "4x_Lady0101_v3_blender.pth",
"lady": "4x_Lady0101_v3_340000.pth",
"scalenx": "4x_scalenx_90k.pth",
"xbrz": "4x_xbrz_90k.pth",
"xbrzdd": "4x_xbrdd_90k.pth",
}
class SmartFormatter(argparse.HelpFormatter):
"""
Custom Help Formatter used to split help text when '\n' was
inserted in it.
"""
def _split_lines(self, text, width):
r = []
for t in text.splitlines():
r.extend(argparse.HelpFormatter._split_lines(self, t, width))
r.append("")
return r
def enum_models(model_dir, aliases=aliases):
models = {model.name: model for model in model_dir.rglob("*.pth")}
for alias, original in aliases.items():
models[alias] = model_dir / original
return models
def get_models_help(models, aliases=aliases, model_docs=model_docs):
lines = []
for model, docs in model_docs.items():
if model not in models.keys():
continue
names = [model]
for alias, original in aliases.items():
if original == model:
names.append(alias)
quoted_names = (f'"{name}"' for name in names)
lines.append(f"{' | '.join(quoted_names)}: {docs}")
return "\n".join(lines)
def parse_args(models, models_help):
parser = argparse.ArgumentParser(
description="Upscale images with ESRGAN", formatter_class=SmartFormatter
)
parser.add_argument("images", nargs="+", type=Path, help="The images to process.")
parser.add_argument(
"-o",
"--out-dir",
type=Path,
required=False,
help="The directory to write output to. Defaults to source directory.",
)
parser.add_argument(
"-s",
"--scale",
type=int,
default=1,
help="The number of times to perform scaling. Defaults to 1.",
)
parser.add_argument(
"-m",
"--model",
choices=models.keys(),
default="0.8",
help=f'The model to use for upscaling. Defaults to "0.8" (RRDB_PSNR_x4 - RRDB_ESRGAN_x4 x 0.8 interp).\n{models_help}',
)
parser.add_argument(
"--cpu",
action="store_true",
help="Use CPU for upscaling, instead of attempting to use CUDA.",
)
parser.add_argument(
"-e",
"--end",
default="_scaled",
help="The suffix to append to scaled images. Defaults to `_scaled`.",
)
parser.add_argument(
"-a",
"--append-model",
action="store_true",
help="Append the model name to the filename, before any custom suffix.",
)
parser.add_argument(
"-x",
"--max-dimension",
help="Split image into chunks of max-dimension.",
type=int,
required=False,
default=0,
)
parser.add_argument(
"-p",
"--padding",
help="Pad image when splitting into quadrants.",
type=int,
required=False,
default=0,
)
parser.add_argument(
"-t",
"--threads",
help="Number of CPU threads to use.",
type=int,
required=False,
)
args = parser.parse_args()
if args.max_dimension != 0 or args.padding != 0:
assert (
args.padding >= 0 and args.max_dimension >= 0
), "padding and max-dimension must be positive"
assert (
args.padding < args.max_dimension
), "padding must be smaller than max-dimension"
if args.threads is not None:
assert args.threads > 0, "threads must be larger than 0"
return args
def main():
start = time.perf_counter_ns()
model_dir = Path(__file__).resolve().parent / "models"
models = enum_models(model_dir)
models_help = get_models_help(models)
args = parse_args(models, models_help)
model_path = model_dir / models[args.model]
state_dict = torch.load(model_path)
if "conv_first.weight" in state_dict:
print("Error: Attempted to load a new-format model")
return 1
# Extract model information
scale2 = 0
max_part = 0
in_nc = 3
out_nc = 3
nf = 64
nb = 23
for part in list(state_dict):
parts = part.split(".")
n_parts = len(parts)
if n_parts == 5 and parts[2] == "sub":
nb = int(parts[3])
elif n_parts == 3:
part_num = int(parts[1])
if part_num > 6 and parts[2] == "weight":
scale2 += 1
if part_num > max_part:
max_part = part_num
out_nc = state_dict[part].shape[0]
upscale = 2 ** scale2
in_nc = state_dict["model.0.weight"].shape[1]
nf = state_dict["model.0.weight"].shape[0]
if args.threads is not None and args.threads > 0:
torch.set_num_threads(args.threads)
torch.set_num_interop_threads(args.threads)
if torch.cuda.is_available() and not args.cpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = arch.RRDB_Net(
in_nc,
out_nc,
nf,
nb,
gc=32,
upscale=upscale,
norm_type=None,
act_type="leakyrelu",
mode="CNA",
res_scale=1,
upsample_mode="upconv",
)
model.load_state_dict(state_dict, strict=True)
del state_dict
model.eval()
for k, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
for i, path in enumerate(
Path(img_path)
for img_glob in args.images
for img_path in glob.glob(str(img_glob))
):
print(i + 1, path.name)
# read image
img = cv2.imread(str(path), cv2.IMREAD_COLOR)
img = img * 1.0 / 255
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for _ in range(args.scale):
img = torch.from_numpy(
np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))
).float()
img = img.unsqueeze(0)
if args.max_dimension:
data_chunks = DataChunks(
img, args.max_dimension, args.padding, upscale
)
chunks = data_chunks.iter()
if "tqdm" in sys.modules.keys():
chunks_count = data_chunks.hlen * data_chunks.vlen
chunks = tqdm.tqdm(chunks, total=chunks_count, unit=" chunks")
for chunk in chunks:
input = chunk.to(device)
output = model(input)
data_chunks.gather(output)
output = data_chunks.concatenate()
else:
input = img.to(device)
output = model(input)
img = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
img = np.transpose(img[[2, 1, 0], :, :], (1, 2, 0))
img = (img * 255.0).round()
out_dir = args.out_dir if args.out_dir is not None else path.parent
suffix = f"_{args.model}{args.end}" if args.append_model else args.end
out_path = out_dir / (path.stem + suffix + ".png")
cv2.imwrite(str(out_path), img)
period = time.perf_counter_ns() - start
print("Done in {:,}s".format(period / 1_000_000_000.0))
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"tqdm.tqdm",
"argparse.ArgumentParser",
"warnings.simplefilter",
"chunks.DataChunks",
"sys.modules.keys",
"torch.load",
"architecture.RRDB_Net",
"numpy.transpose",
"torch.set_num_interop_threads",
"torch.set_num_threads",
"torch.cuda.is_available",
"pathlib.Path",
"warnings.catch_warnings",
"torch.device",
"time.perf_counter_ns",
"argparse.HelpFormatter._split_lines"
] |
[((6290, 6391), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Upscale images with ESRGAN"""', 'formatter_class': 'SmartFormatter'}), "(description='Upscale images with ESRGAN',\n formatter_class=SmartFormatter)\n", (6313, 6391), False, 'import argparse\n'), ((8599, 8621), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (8619, 8621), False, 'import time\n'), ((8868, 8890), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (8878, 8890), False, 'import torch\n'), ((9995, 10146), 'architecture.RRDB_Net', 'arch.RRDB_Net', (['in_nc', 'out_nc', 'nf', 'nb'], {'gc': '(32)', 'upscale': 'upscale', 'norm_type': 'None', 'act_type': '"""leakyrelu"""', 'mode': '"""CNA"""', 'res_scale': '(1)', 'upsample_mode': '"""upconv"""'}), "(in_nc, out_nc, nf, nb, gc=32, upscale=upscale, norm_type=None,\n act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv')\n", (10008, 10146), True, 'import architecture as arch\n'), ((9757, 9792), 'torch.set_num_threads', 'torch.set_num_threads', (['args.threads'], {}), '(args.threads)\n', (9778, 9792), False, 'import torch\n'), ((9801, 9844), 'torch.set_num_interop_threads', 'torch.set_num_interop_threads', (['args.threads'], {}), '(args.threads)\n', (9830, 9844), False, 'import torch\n'), ((9853, 9878), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9876, 9878), False, 'import torch\n'), ((9914, 9934), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (9926, 9934), False, 'import torch\n'), ((9962, 9981), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9974, 9981), False, 'import torch\n'), ((12253, 12275), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (12273, 12275), False, 'import time\n'), ((10468, 10482), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (10472, 10482), False, 'from pathlib import Path\n'), ((10726, 10751), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (10749, 10751), False, 'import warnings\n'), ((10765, 10796), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (10786, 10796), False, 'import warnings\n'), ((5444, 5495), 'argparse.HelpFormatter._split_lines', 'argparse.HelpFormatter._split_lines', (['self', 't', 'width'], {}), '(self, t, width)\n', (5479, 5495), False, 'import argparse\n'), ((11901, 11946), 'numpy.transpose', 'np.transpose', (['img[[2, 1, 0], :, :]', '(1, 2, 0)'], {}), '(img[[2, 1, 0], :, :], (1, 2, 0))\n', (11913, 11946), True, 'import numpy as np\n'), ((8638, 8652), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (8642, 8652), False, 'from pathlib import Path\n'), ((11082, 11140), 'chunks.DataChunks', 'DataChunks', (['img', 'args.max_dimension', 'args.padding', 'upscale'], {}), '(img, args.max_dimension, args.padding, upscale)\n', (11092, 11140), False, 'from chunks import DataChunks\n'), ((11268, 11286), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (11284, 11286), False, 'import sys\n'), ((11396, 11449), 'tqdm.tqdm', 'tqdm.tqdm', (['chunks'], {'total': 'chunks_count', 'unit': '""" chunks"""'}), "(chunks, total=chunks_count, unit=' chunks')\n", (11405, 11449), False, 'import tqdm\n'), ((10897, 10942), 'numpy.transpose', 'np.transpose', (['img[:, :, [2, 1, 0]]', '(2, 0, 1)'], {}), '(img[:, :, [2, 1, 0]], (2, 0, 1))\n', (10909, 10942), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import math
# tip geometry
dia = 0.057 # m
r = dia/2
offset = 3 * dia
pen_rate_labels = ['1 m/min','2 m/min','3 m/min','4 m/min','5 m/min']
pen_rates = [1/60 , 2/60 , 3/60 , 4/60 , 5/60] # m/seks
rpm = 25 * (2*math.pi) / 60 # rad/secs
X = []
Y = []
Z = []
k=0
for rate in pen_rates:
t = np.linspace(0, 1/rate, num=300) # secs
theta = t * rpm
X.append( r*np.sin(theta) + k*offset )
Y.append( r*np.cos(theta) )
Z.append( t*rate )
# needed to set 3D aspect ratio
if k == 0:
xs = X[-1].tolist()
ys = Y[-1].tolist()
zs = Z[-1].tolist()
else:
xs += X[-1].tolist()
ys += Y[-1].tolist()
zs += Z[-1].tolist()
k += 1
# ... aspect ratio
xs = np.array(xs)
ys = np.array(ys)
zs = np.array(zs)
max_range = np.array([xs.max()-xs.min(), ys.max()-ys.min(), zs.max()-zs.min()]).max() / 2.0
mid_x = (xs.max()+xs.min()) * 0.5
mid_y = (ys.max()+ys.min()) * 0.5
mid_z = (zs.max()+zs.min()) * 0.5
fig = plt.figure()
ax = fig.gca(projection='3d')
# plot each curve
for rate, x, y, z in zip( pen_rate_labels, X, Y, Z ):
ax.plot(x, y, z, label=rate)
leg = plt.legend(loc='best', fancybox=True)
# ...aspect ratio by setting limits
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.invert_zaxis()
ax.set_ylabel('Distance y (m)', multialignment='center')
ax.set_xlabel('Distance x (m)', multialignment='center')
ax.set_zlabel('Depth (m)', multialignment='center')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.cos"
] |
[((769, 781), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (777, 781), True, 'import numpy as np\n'), ((787, 799), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (795, 799), True, 'import numpy as np\n'), ((805, 817), 'numpy.array', 'np.array', (['zs'], {}), '(zs)\n', (813, 817), True, 'import numpy as np\n'), ((1019, 1031), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1029, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1211), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fancybox': '(True)'}), "(loc='best', fancybox=True)\n", (1184, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1592, 1594), True, 'import matplotlib.pyplot as plt\n'), ((347, 380), 'numpy.linspace', 'np.linspace', (['(0)', '(1 / rate)'], {'num': '(300)'}), '(0, 1 / rate, num=300)\n', (358, 380), True, 'import numpy as np\n'), ((465, 478), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (471, 478), True, 'import numpy as np\n'), ((422, 435), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (428, 435), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import numpy as np
print('loading model and train data...')
# load model from file
net = torch.load('models/torch_rnn.model')
# load data from file
train_loader = torch.load('data/processed/torch_rnn_train.loader')
valid_loader = torch.load('data/processed/torch_rnn_validate.loader')
# loss and optimization functions
lr = 0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 8
counter = 0
print_every = 50
clip = 5 # gradient clipping
batch_size = 50
# check for gpu
print('checking if gpu avaliable...')
train_on_gpu = torch.cuda.is_available()
print('training on gpu') if train_on_gpu else print(
'no gpu avaliable, training on cpu')
# move model to gpu, if avaliable
if train_on_gpu:
net.cuda()
print('training model...')
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
# batch loop
for inputs, labels in train_loader:
counter += 1
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# create new variables for hidden state to prevent
# backpropping through entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# clip_grad_norm helps prevent the exploding gradient problem in RNNs
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
# loss stats
if counter % print_every == 0:
# get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for inputs, labels in valid_loader:
# create new variables for hidden state
val_h = tuple([each.data for each in h])
if train_on_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output.squeeze(), labels.float())
val_losses.append(val_loss.item())
net.train()
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.6f}...".format(loss.item()),
"Val Loss: {:.6f}".format(np.mean(val_losses)))
# save trained model
print('saving trained model...')
torch.save(net, 'models/torch_rnn_trained.model')
|
[
"torch.nn.BCELoss",
"torch.load",
"torch.save",
"numpy.mean",
"torch.cuda.is_available"
] |
[((125, 161), 'torch.load', 'torch.load', (['"""models/torch_rnn.model"""'], {}), "('models/torch_rnn.model')\n", (135, 161), False, 'import torch\n'), ((200, 251), 'torch.load', 'torch.load', (['"""data/processed/torch_rnn_train.loader"""'], {}), "('data/processed/torch_rnn_train.loader')\n", (210, 251), False, 'import torch\n'), ((267, 321), 'torch.load', 'torch.load', (['"""data/processed/torch_rnn_validate.loader"""'], {}), "('data/processed/torch_rnn_validate.loader')\n", (277, 321), False, 'import torch\n'), ((381, 393), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (391, 393), True, 'import torch.nn as nn\n'), ((624, 649), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (647, 649), False, 'import torch\n'), ((2677, 2726), 'torch.save', 'torch.save', (['net', '"""models/torch_rnn_trained.model"""'], {}), "(net, 'models/torch_rnn_trained.model')\n", (2687, 2726), False, 'import torch\n'), ((2599, 2618), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (2606, 2618), True, 'import numpy as np\n')]
|
import numpy as np
class RBFMMD2(object):
def __init__(self, sigma_list, num_bit, is_binary):
self.sigma_list = sigma_list
self.num_bit = num_bit
self.is_binary = is_binary
self.basis = np.arange(2**num_bit,dtype='int32')
self.K = mix_rbf_kernel(self.basis, self.basis, self.sigma_list, is_binary)
def __call__(self, px, py):
'''
Args:
px (1darray, default=None): probability for data set x, used only when self.is_exact==True.
py (1darray, default=None): same as px, but for data set y.
Returns:
float, loss.
'''
pxy = px-py
return self.kernel_expect(pxy, pxy)
def kernel_expect(self, px, py):
res = px.dot(self.K.dot(py))
return res
def mix_rbf_kernel(x, y, sigma_list, is_binary):
if is_binary:
dx2 = np.zeros([len(x)]*2, dtype='int64')
num_bit = int(np.round(np.log(len(x))/np.log(2)))
for i in range(num_bit):
dx2 += (x[:,None]>>i)&1 != (y>>i)&1
else:
dx2 = (x[:, None] - y)**2
return _mix_rbf_kernel_d(dx2, sigma_list)
def _mix_rbf_kernel_d(dx2, sigma_list):
K = 0.0
for sigma in sigma_list:
gamma = 1.0 / (2 * sigma)
K = K + np.exp(-gamma * dx2)
return K
|
[
"numpy.log",
"numpy.arange",
"numpy.exp"
] |
[((223, 261), 'numpy.arange', 'np.arange', (['(2 ** num_bit)'], {'dtype': '"""int32"""'}), "(2 ** num_bit, dtype='int32')\n", (232, 261), True, 'import numpy as np\n'), ((1270, 1290), 'numpy.exp', 'np.exp', (['(-gamma * dx2)'], {}), '(-gamma * dx2)\n', (1276, 1290), True, 'import numpy as np\n'), ((955, 964), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (961, 964), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.