prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
#!/usr/bin/env python
# Original written in Python 3.5.2
# Based on various previous iterations of lte.py scripts written by <NAME> and <NAME>
# Approximates an LTE (local thermdynamic equilibrium) mass to determine the amount of CO12 and CO13 is present in a given region of the sky
from astropy.io import fits
from astropy import constants as const
from astropy import units as u
import numpy as np
import os
import warnings
import sys
def lte(files = [], tfloor = 8., datainfo = '', tx_method = 'peak', onlywrite = [], indir = ''):
# files are in this order: [incube12, incube13, inrms12, inrms13, inmask12]
# tx_methods accounted for are 'cube' and 'peak'
# datainfo should provide info on what source data is from and possibly a number corresponding to some form of iteration
# file paths need to be absolute or defined properly in relation to working directory
# for f in files:
# if os.path.exists(f) == 1:
# print('Found {}...'.format(f))
# continue
# else:
# print('File {} does not exist'.format(f))
# return
# Declarations of input and output files
if (indir != ''):
indir = indir.rstrip('/')
incube12 = indir + '/' + files[0]
incube13 = indir + '/' + files[1]
inrms12 = indir + '/' + files[2]
inrms13 = indir + '/' + files[3]
inmask12 = indir + '/' + files[4]
else:
incube12 = files[0]
incube13 = files[1]
inrms12 = files[2]
inrms13 = files[3]
inmask12 = files[4]
outtex12 = datainfo + '_' + tx_method + '_tex12.fits.gz'
outtau13 = datainfo + '_' + tx_method + '_tau13.fits.gz'
outtau13err = datainfo + '_' + tx_method + '_tau13err.fits.gz'
outtau13pk = datainfo + '_' + tx_method + '_tau13pk.fits.gz'
outn13cube = datainfo + '_' + tx_method + '_n13cube.fits.gz'
outn13cubeerr = datainfo + '_' + tx_method + '_n13cubeerr.fits.gz'
outn13col = datainfo + '_' + tx_method + '_n13col.fits.gz'
outn13colerr = datainfo + '_' + tx_method + '_n13colerr.fits.gz'
outsnr13 = datainfo + '_' + tx_method + '_n13snr.fits.gz'
# Load 12CO cube [units K]
print('\nReading {0}...'.format(incube12))
t12cube, hd3d = fits.getdata(incube12, header = True)
if 'RESTFREQ' in hd3d.keys():
freq12 = hd3d['RESTFREQ'] * u.Hz
elif 'RESTFRQ' in hd3d.keys():
freq12 = hd3d['RESTFRQ'] * u.Hz
print('The 12CO rest frequency is {0:.4f}'.format((freq12).to(u.GHz)))
print('min/max values of 12CO [K] are {0:.2f} and {1:.2f}'.format(
np.nanmin(t12cube), np.nanmax(t12cube)))
# Load 12CO uncertainty [2D plane]
print('\nReading {0}...'.format(inrms12))
# t12err, hd2d = fits.getdata(inrms12, header = True)
# print('min/max values of 12CO uncertainty are {0:.3f} and {1:.3f}'.format(
# np.nanmin(t12err), np.nanmax(t12err)))
hd2d = fits.getheader(inrms12)
if 'datamin' in hd2d and 'datamax' in hd2d:
print('min/max values of 12CO uncertainty are {0:.3f} and {1:.3f}'.format(
hd2d['datamin'], hd2d['datamax']))
if hd2d['naxis'] == 3:
for k in list(hd2d['*3*'].keys()):
hd2d.remove(k)
for frq in list(hd2d['*frq*'].keys()):
hd2d.remove(frq)
# Load 12CO mask [3D cube or 2D plane]
print('\nReading {0}...'.format(inmask12))
mask = fits.getdata(inmask12)
print('Number of mask == 1 values: {0}'.format(np.count_nonzero(
mask[~np.isnan(mask)] > 0)))
print('Number of mask == 0 values: {0}'.format(np.count_nonzero(
mask[~np.isnan(mask)] < 1)))
print('Number of mask == NaN values: {0}'.format(np.count_nonzero(
np.isnan(mask))))
mask3d = (mask == 0)
mask2d = (np.nansum(mask, axis = 0) == 0)
# Calculate Tex for Tex > Tfloor
# Different methods are calculated slightly differently
with np.errstate(invalid = 'ignore', divide = 'ignore'):
if tx_method == 'peak':
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
t12 = np.nanmax(t12cube, axis = 0)
hdtx = hd2d
t12[mask2d] = np.nan
elif tx_method == 'cube':
t12 = t12cube
hdtx = hd3d
t12[mask3d] = np.nan
print('\nCalculating Tex [excitation temperature]...')
tcmb = 2.73 * u.K
t0_12 = (const.h * freq12 / const.k_B).to(u.K)
Jtcmb = t0_12/(np.exp(t0_12/tcmb)-1)
#tex = 11.06 / (np.log(1 + 11.06/(t12 + 0.187)))
tex = t0_12 / (np.log(1 + t0_12/((t12*u.K) + Jtcmb)))
tex[tex < (tfloor * u.K)] = (tfloor * u.K)
print('min/max values of Tex [K] are {0:.2f} and {1:.2f}'.format(
np.nanmin(tex), np.nanmax(tex)))
if (len(onlywrite) == 0) or ('outtex12' in onlywrite) == True:
hdtx['datamin'] =
|
np.nanmin(tex)
|
numpy.nanmin
|
import glob
import logging
import itertools
import os
import pickle
import numpy as np
import pandas as pd
import time
from sklearn.metrics import classification_report, matthews_corrcoef,\
accuracy_score, precision_recall_fscore_support
from .plotter import Plotter
from .config import init_logging
from ..preparation import prepare_data
from ..pipeline import run_pipeline
from ..algorithms import Algorithm
from ..utils import create_deep_dict
class Evaluator:
def __init__(self, name: str, datasets: list, get_predictors: callable, output_dir: {str}=None,
create_log_file: bool=True, store: bool=False,
n_train_samples: int=60000, n_test_samples: int=6000, downsample: bool=True):
"""
:param datasets: list of datasets
:param predictors: callable that returns list of predictors
"""
self.name = name
self.datasets = datasets
self.dataset_names = [str(x) for x in self.datasets]
assert np.unique(self.dataset_names).size == len(self.dataset_names),\
'Some datasets have the same name!'
self.get_predictors = get_predictors
# Create instances to read out name and repr (incl. seed)
temp_predictors = self.get_predictors(1)
self.predictor_names = [repr(x) if isinstance(x, Algorithm) else x.__class__.__name__
for x in temp_predictors]
self.pred_to_algo = dict((repr(p), p.name) for p in temp_predictors)
assert np.unique(self.predictor_names).size == len(self.predictor_names),\
'Some predictors have the same name!'
# Algorithm names may not be unique since they do not contain a seed
self.output_dir = output_dir or 'reports'
if create_log_file:
init_logging(os.path.join(self.output_dir, 'logs'))
self.logger = logging.getLogger(__name__)
self.metrics = None
self.predictions = None
# Temporary results if the pipeline breaks
self._metrics = None
self._predictions = None
self.plotter = Plotter(self.output_dir, f'figures/exp-{name}')
self.store = store
self.downsample = downsample
self.n_train_samples = n_train_samples
self.n_test_samples = n_test_samples
self._temp_pipelines = create_deep_dict(self.dataset_names, self.predictor_names)
def __call__(self):
multiindex = pd.MultiIndex.from_product([self.dataset_names, self.predictor_names],
names=['datasets', 'predictors'])
# metrics = create_deep_dict(self.dataset_names, self.predictor_names)
self._metrics = pd.DataFrame(0, index=['prec', 'rec', 'f1', 'acc', 'mcc'],
columns=multiindex)
self._metrics.sort_index(axis=1, inplace=True)
# Might be required if datasets have different sizes
# predictions = create_deep_dict(self.dataset_names, self.predictor_names)
self._predictions = pd.DataFrame(0, index=range(self.n_test_samples), columns=multiindex)
for ds in self.datasets:
self.logger.info(f"{'-'*10} Prepare dataset {'-'*10}")
data = prepare_data(ds, self.n_train_samples, self.n_test_samples,
downsample=self.downsample)
n_features = len(data[0].columns.levels[1])
predictors = self.get_predictors(n_features)
for predictor, predictor_name in zip(predictors, self.predictor_names):
self._execute_predictor(ds, predictor, predictor_name, data)
self.predictions = self._predictions
self.metrics = self.get_metrics_conclusion()
if self.store:
self.export_results()
self.metrics.to_csv(os.path.join(self.output_dir, f'custom/{self.name}.csv'))
self.plot_histories()
return self.metrics
def _execute_predictor(self, ds, predictor, predictor_name, data):
self.logger.info(f"{'-'*10} {predictor_name} | {ds} {'-'*10}")
pipeline, y_pred = run_pipeline(predictor, data)
time.sleep(1) # TODO: Why did I do this?
y_pred = y_pred.clip(-1, 1)
ev = self.measure_pipeline_run_results(data[3], y_pred)
if len(y_pred) != self.n_test_samples:
self.logger.warn(
f'Not enough self._predictions are available. Check the data distribution!')
self._predictions.loc[:len(y_pred)-1, (str(ds), predictor_name)] = y_pred
assert all(self._metrics.index == list(ev.keys()))
self._metrics.loc[:, (str(ds), predictor_name)] = ev.values()
self._temp_pipelines[str(ds)][predictor_name] = pipeline, y_pred
def measure_pipeline_run_results(self, y_true, y_pred):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, average='weighted')
acc = accuracy_score(y_true, y_pred)
mcc = matthews_corrcoef(y_true, y_pred)
target_names = np.array(['Down', 'Still', 'Up'])[np.unique(y_true).astype(int) + 1]
self.logger.debug(
f'\n{classification_report(y_true, y_pred, target_names=target_names)}\n\nMCC='
f'{mcc:.5f}, Accuracy={acc:.5f}, Precision={prec:.5f}, Recall={rec:.5f}, F1={f1:.5f}')
return {
'prec': round(prec, 5),
'rec': round(rec, 5),
'f1': round(f1, 5),
'acc': round(acc, 5),
'mcc': round(mcc, 5)
}
# Merge runs with same algorithms but different seeds (represented by mean and std)
def get_metrics_conclusion(self):
metrics = self._metrics
predictors = pd.DataFrame(list(metrics.columns), columns=['l1', 'l2'])
predictors['algo'] = [self.pred_to_algo[p] for _, (_, p) in predictors.iterrows()]
merged_metrics = []
for (ds, algo), matches in predictors.groupby(['l1', 'algo']):
regarding_metrics = metrics[[(ds, x) for x in matches.l2]]
if regarding_metrics.shape[1] == 1:
merged_metrics.append((f'{ds} {algo}', *regarding_metrics.iloc[:, 0]))
else:
means = regarding_metrics.mean(axis=1).round(5)
stds = regarding_metrics.std(axis=1).round(5)
merged_metrics.append(
(f'{ds} {algo}', *[f'{m} +- {s}' for m, s in zip(means, stds)]))
merged_metrics = pd.DataFrame(merged_metrics, columns=['predictor', *metrics.index])
merged_metrics.index = merged_metrics.predictor
merged_metrics = merged_metrics.T.iloc[1:]
return merged_metrics
def get_predictor(self, ds_i, predictor_i):
ds_name = self.dataset_names[ds_i] if isinstance(ds_i, int) else ds_i
pred_name = self.predictor_names[predictor_i] if \
isinstance(predictor_i, int) else predictor_i
return self._temp_pipelines[ds_name][pred_name][0].steps[-1][1]
# ----- Plotting --------------------------------------------------------- #
def plot_histories(self, store=True):
for ds, pred in itertools.product(self.dataset_names, self.predictor_names):
predictor = self.get_predictor(ds, pred)
if hasattr(predictor, 'history') and predictor.history is not None:
self.plotter.plot_history(predictor.history, f'{pred} on {ds}', store=store)
# ----- Utils ------------------------------------------------------------ #
def get_model_summary(self, ds_i, predictor_i):
return self.get_predictor(ds_i, predictor_i).model.summary()
def get_models_input(self, ds_i, predictor_i):
ds = self.datasets[ds_i]
ds_name = self.dataset_names[ds_i]
pred_name = self.predictor_names[predictor_i]
data = prepare_data(ds, self.n_train_samples, self.n_test_samples)
X_train = data[0]
pipeline = self._temp_pipelines[ds_name][pred_name][0]
X_train = pipeline.steps[0][1].transform(X_train)
X_train = pipeline.steps[1][1].transform(X_train)
X_train = pipeline.steps[2][1].transform(X_train)
return X_train
def get_mcc_metric(self, metrics=None):
metrics = metrics or self.metrics
mcc_flatten = metrics.loc['mcc'].reset_index()
mcc = mcc_flatten.pivot(*mcc_flatten.columns) # index, column, value
return mcc
def export_results(self):
output_dir = os.path.join(self.output_dir, 'evaluators')
os.makedirs(output_dir, exist_ok=True)
timestamp = time.strftime('%Y-%m-%d-%H%M%S')
path = os.path.join(output_dir, f'{self.name}-{timestamp}.pkl')
self.logger.info(f'Store evaluator results at {os.path.abspath(path)}')
save_dict = {
'name': self.name,
'dataset_names': self.dataset_names,
'predictor_names': self.predictor_names,
# 'pred_to_algo': self.pred_to_algo,
'output_dir': self.output_dir,
'_metrics': self._metrics,
'predictions': self.predictions,
'downsample': self.downsample,
'n_train_samples': self.n_train_samples,
'n_test_samples': self.n_test_samples,
}
with open(path, 'wb') as f:
pickle.dump(save_dict, f)
return path
# Import metrics & predictions if this evaluator uses the same datasets and predictors
# If you want to import old evaluators (before 22.01.2019) you need to adapt the code:
# The save_dict will only contain dataset_names, predictor_name, metrics and output_dir
def import_results(self):
output_dir = os.path.join(self.output_dir, 'evaluators')
path_regex = os.path.join(output_dir, f'{self.name}-20*.pkl') # Wildcard should only match dates
matches = glob.glob(path_regex)
path = matches[-1]
if len(matches) == 0:
self.logger.error('No evaluator with this name was found in the output directory.')
return
elif len(matches) > 1:
self.logger.info(f'Found {len(matches)} possible matches. Selecting the latest one.')
self.logger.info(f'Read evaluator results at {os.path.abspath(path)}')
with open(path, 'rb') as f:
save_dict = pickle.load(f)
assert np.array_equal(save_dict['dataset_names'], self.dataset_names), \
'Datasets do not match'
assert
|
np.array_equal(save_dict['predictor_names'], self.predictor_names)
|
numpy.array_equal
|
# Similarity-based Calibration methods
# Also includes Platt scaling and temperature scaling
# for comparison.
#
# <NAME>
import sys
import numpy as np
import scipy
from progressbar import ProgressBar, Bar, ETA
from progressbar import Counter as pCounter
from sklearn.metrics.pairwise import cosine_similarity, rbf_kernel, \
euclidean_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.ensemble import RandomForestClassifier, IsolationForest
from sklearn.preprocessing import StandardScaler
# Find optimal A, B with NLL loss function
# for Platt scaling applied to model scores
def nll_platt_fn(param, *args):
A = param[0]
B = param[1]
scores, labels = args
# use A, B to convert scores to probs
probs = 1 / (1 + np.exp(A * scores + B))
# NLL (per item); make it negative so optimizer
# goes in the right direction
nll = -np.sum(labels * np.log(probs) +
(1 - labels) * np.log(1 - probs)) / len(probs)
return nll
# Scale test probabilities (of class 1) using
# labeled data and their probs from the calibration set.
def platt_scaling(probs, labels, test_probs):
if len(probs.shape) > 1 and probs.shape[1] != 2:
print('Error: Platt only works for binary problems.')
sys.exit(1)
if probs.shape[1] == 2:
# Convert to single class prob (of class 1)
probs = probs[:, 1]
# Convert labels from 0/1 using Platt's recipe
pos_labels = labels == 1
neg_labels = labels == 0
n_pos = np.sum(pos_labels)
n_neg = np.sum(neg_labels)
platt_labels = np.zeros((len(labels)))
platt_labels[pos_labels] = (n_pos + 1) / (n_pos + 2)
platt_labels[neg_labels] = 1.0 / (n_neg + 2)
res = scipy.optimize.minimize(nll_platt_fn,
#[0.0, np.log((n_neg + 1)/(n_pos + 1))],
[1.0, np.log((n_neg + 1)/(n_pos + 1))],
args=(probs, platt_labels),
method='BFGS', tol=1e-12)
A, B = res.x[0], res.x[1]
test_probs_class1 = 1 / (1 + np.exp(A * test_probs[:, 1] + B))
new_test_probs = np.stack(((1 - test_probs_class1),
test_probs_class1),
axis=1)
return new_test_probs
# To optimize NLL for temperature scaling (Zhang et al., 2020)
# https://github.com/zhang64-llnl/Mix-n-Match-Calibration
def nll_fn(t, *args):
# find optimal temperature with NLL loss function
logit, label = args
# adjust logits by T
logit = logit / t
# convert logits to probabilities
n = np.sum(np.exp(logit), 1)
probs = np.exp(logit) / n[:, None]
# avoid values too close to 0 or 1
eps = 1e-20
probs = np.clip(probs, eps, 1 - eps)
# NLL
nll = -np.sum(label * np.log(probs)) / probs.shape[0]
return nll
# To optimize MSE for temperature scaling (Zhang et al., 2020)
# https://github.com/zhang64-llnl/Mix-n-Match-Calibration
def mse_fn(t, *args):
## find optimal temperature with MSE loss function
logit, label = args
# adjust logits by T
logit = logit / t
# convert logits to probabilities
n = np.sum(np.exp(logit), 1)
probs = np.exp(logit) / n[:, None]
# MSE
mse = np.mean((probs - label) ** 2)
return mse
# Use temperature scaling to modify logits, given labels.
# This is a good entry point if using neural networks.
# If test_logits is given, return calibrated probabilities.
# Based on:
# https://github.com/zhang64-llnl/Mix-n-Match-Calibration
def temp_scaling(logits, labels, n_classes,
test_logits=np.array(()), optim='mse'):
y = np.eye(n_classes)[labels] # one-hot encoding
if optim == 'mse':
opt_fn = mse_fn
elif optim == 'nll':
opt_fn = nll_fn
else:
print('Error: unknown optimization method %s' % optim)
sys.exit(1)
t = scipy.optimize.minimize(opt_fn, 1.0, args=(logits, y),
method='L-BFGS-B', bounds=((0.05, 5.0),),
tol=1e-12)
t = t.x
# If provided, generate calibrated probs for the test set
if len(test_logits) > 0:
test_logits = test_logits / t
new_test_probs = np.exp(test_logits) / \
np.sum(np.exp(test_logits), 1)[:, None]
return t, new_test_probs
return t
# Use temperature scaling to modify probabilities, given labels.
# This is a good entry point if you have probabilities
# but not logits.
# If test_probs is given, return its calibrated version too.
def temp_scaling_probs(probs, labels, n_classes, test_probs=np.array(()),
optim='mse'):
eps = 1e-7
ts_probs = np.clip(probs, eps, 1 - eps)
ts_logits = np.log(ts_probs) - np.log(1 - ts_probs)
# If provided, generate calibrated probs for the test set
if len(test_probs) > 0:
test_probs = np.clip(test_probs, eps, 1 - eps)
test_logits = np.log(test_probs) - np.log(1 - test_probs)
else:
test_logits = np.array(())
return temp_scaling(ts_logits, labels, n_classes,
test_logits, optim=optim)
# Compute similarity matrix (or prep for it) between X_test and X_cal
# using y_cal if needed (for sim_method='RFprox').
#
# Options:
# - sim_method options:
# 'RFprox': RF proximity
# 'Isoprox': Isolation forest proximity
# 'cosine': cosine sim. (kernel)
# 'rbf': RBF kernel
# 'sim_euclid': Euclidean distance (for sim)
# (for testing purposes:)
# 'all_one': all pairs of items have similarity 1
# '<method>-1NN': set nearest neighor to have sim. 1, else 0
# - sim: pre-computed similarity matrix (optional, used only by RFprox/Isoprox)
#
# Returns n x m array where n = |X_test| and m = |X_cal|,
# or leaf assignments if sim_method == 'RFprox' or 'Isoprox'.
def calc_sim(X_test, X_cal, y_cal=None, sim_method='sim_euclid', sim=None):
"""
>>> np.random.seed(0)
>>> X_cal = np.random.rand(5, 3)
>>> X_test = np.random.rand(2, 3)
>>> calc_sim(X_test, X_cal)
Euclid. sim: min -1.277106, max -0.341592
array([[-0.86543108, -0.63809564, -0.94847268, -0.88049546, -1.27710576],
[-0.46672837, -0.60361979, -0.34159159, -0.6034734 , -0.93324293]])
>>> calc_sim(X_test, X_cal, sim_method='sim_euclid-1NN')
Euclid. sim: min -1.277106, max -0.341592
array([[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.]])
>>> calc_sim(X_test, X_cal, sim_method='cosine')
Cosine sim: min 0.569869, max 0.996167
array([[0.81061331, 0.87508384, 0.87038602, 0.78490629, 0.56986892],
[0.99616697, 0.99472739, 0.98846092, 0.98122596, 0.89717548]])
>>> calc_sim(X_test, X_cal, sim_method='cosine-1NN')
Cosine sim: min 0.569869, max 0.996167
array([[0., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.]])
>>> calc_sim(X_test, X_cal, sim_method='rbf')
RBF sim: min 0.000000, max 0.016007
array([[7.83897810e-09, 2.95034542e-07, 1.51550553e-07, 5.60989834e-06,
8.96936190e-12],
[1.60068127e-02, 2.61365087e-03, 6.65707955e-04, 2.25654935e-05,
2.14475312e-03]])
>>> calc_sim(X_test, X_cal, sim_method='rbf-1NN')
RBF sim: min 0.000000, max 0.016007
array([[0., 0., 0., 1., 0.],
[1., 0., 0., 0., 0.]])
>>> calc_sim(X_test, X_cal, y_cal=[0, 0, 1, 1, 1], sim_method='RFprox')
RFprox sim: min 0.090000, max 0.680000
array([[0.44, 0.58, 0.58, 0.47, 0.09],
[0.55, 0.39, 0.67, 0.4 , 0.68]])
>>> calc_sim(X_test, X_cal, y_cal=[0, 0, 1, 1, 1], sim_method='RFprox-1NN')
RFprox sim: min 0.090000, max 0.680000
array([[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1.]])
>>> calc_sim(X_test, X_cal, sim_method='Isoprox')
Isoprox sim: min 0.000000, max 0.470000
array([[0.02, 0.34, 0.27, 0.39, 0. ],
[0.34, 0.11, 0.47, 0.01, 0.24]])
>>> calc_sim(X_test, X_cal, sim_method='Isoprox-1NN')
Isoprox sim: min 0.000000, max 0.470000
array([[0., 0., 0., 1., 0.],
[0., 0., 1., 0., 0.]])
>>> calc_sim(X_test, X_cal, sim_method='all_one')
All pairwise similarities = 1.0
array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]])
"""
if sim_method.startswith('RFprox'):
if sim is None:
# Train an RF for similarity estimation
prox_clf = RandomForestClassifier(n_estimators=100,
#ccp_alpha=0.001,
random_state=0)
prox_clf.fit(X_cal, y_cal)
# Apply it to the calibration set
cal_leaf_id = prox_clf.apply(X_cal).T # leaf assignments
test_leaf_id = prox_clf.apply(X_test).T # leaf assignments
# Convert to similarity
n_test, n_cal = len(X_test), len(X_cal)
sim = np.zeros((n_test, n_cal))
pbar_args = ['RFprox: ', pCounter(), '/%d' % n_test, Bar('='), ETA()]
progress = ProgressBar(widgets=pbar_args)
for i in progress(range(n_test)):
for j in range(n_cal):
sim[i, j] = np.mean(np.equal(cal_leaf_id[:, j],
test_leaf_id[:, i]))
print('RFprox sim: min %f, max %f' % (np.min(sim), np.max(sim)))
elif sim_method.startswith('Isoprox'):
if sim is None:
# Train an isolation forest for similarity estimation
iso_clf = IsolationForest(n_estimators=100,
random_state=0)
iso_clf.fit(X_cal)
# Apply it to the calibration and test sets
cal_leaf_id, test_leaf_id = [], []
for t in iso_clf.estimators_:
cal_leaf_id += [t.apply(X_cal).T] # leaf assignments
test_leaf_id += [t.apply(X_test).T] # leaf assignments
cal_leaf_id = np.array(cal_leaf_id)
test_leaf_id = np.array(test_leaf_id)
# Convert to similarity
n_test, n_cal = len(X_test), len(X_cal)
sim = np.zeros((n_test, n_cal))
pbar_args = ['Isoprox: ', pCounter(), '/%d' % n_test, Bar('='), ETA()]
progress = ProgressBar(widgets=pbar_args)
for i in progress(range(n_test)):
for j in range(n_cal):
if np.sum(cal_leaf_id[:, j] != -1) > 0:
sim[i, j] = (np.sum(np.equal(cal_leaf_id[:, j],
test_leaf_id[:, i])) /
np.sum(cal_leaf_id[:, j] != -1))
print('Isoprox sim: min %f, max %f' % (np.min(sim), np.max(sim)))
elif sim_method.startswith('cosine'):
# Normalize to range 0,1 instead of -1,1
sim = (cosine_similarity(X_test, X_cal) + 1) / 2.0
print('Cosine sim: min %f, max %f' % (np.min(sim),
|
np.max(sim)
|
numpy.max
|
import numpy as np
import tensorflow as tf
from collections import defaultdict
class Greedy_Tracker(object):
def __init__(self, cfg_tracker, cfg_train, tf_ops, tf_placeholders, session):
self.network_type = cfg_tracker.network_type
self.cls_thr = cfg_tracker.nn_gating_thr
self.det_ratio_thr = cfg_tracker.det_ratio
self.N_miss_max = cfg_tracker.N_miss_max
self.img_height = cfg_tracker.IMAGE_HEIGHT
self.img_width = cfg_tracker.IMAGE_WIDTH
self.all_tracks = defaultdict(lambda: defaultdict(defaultdict))
self.track_num = 0
self.model_info = {}
self.model_info['app_hidden_dim'] = cfg_train.APP_HIDDEN_DIM
self.model_info['mot_hidden_dim'] = cfg_train.MOT_HIDDEN_DIM
self.model_info['mot_input_dim'] = cfg_train.MOT_INPUT_DIM
self.result = []
self.cfg_train = cfg_train
self.cfg_tracker = cfg_tracker
self.sess = session
self.tf_ops = tf_ops
self.tf_plh = tf_placeholders
self.neg_mem_indices = self.precompute_neg_mem_indices()
def precompute_neg_mem_indices(self):
# get indices for online negative examples (i.e. other tracks in the scene) for each track
# NOTE: need to be set again when the code is used for tracking more objects
max_track_num = 200
max_det_num = 200
neg_mem_ind = np.zeros((max_track_num, max_det_num, max_track_num-1, 2))
for i in range(100):
for j in range(100):
xy_ind_tmp = np.zeros((max_track_num - 1, 2))
x_ind_tmp = np.arange(max_track_num, dtype=np.int32)
xy_ind_tmp[:, 0] = x_ind_tmp[x_ind_tmp != i]
xy_ind_tmp[:, 1] = j
neg_mem_ind[i, j, :, :] = xy_ind_tmp
return neg_mem_ind
def build_neg_mem_indices(self, track_num, det_num):
if track_num > 1:
neg_mem_inds = self.neg_mem_indices[:track_num, :det_num, :(track_num-1), :]
elif track_num == 1:
neg_mem_inds = None
else:
raise NotImplementedError
return neg_mem_inds
def get_lstm_states(self, h_np, c_np, cur_detbb_num, is_track_state):
h_np = np.reshape(h_np, (cur_detbb_num, cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, cur_detbb_num, -1))
if is_track_state == True:
h_np = np.transpose(h_np, (1, 0, 2))
c_np = np.transpose(c_np, (1, 0, 2))
# loop can be commented out later to improve processing time
# check lstm states
h_np = np.reshape(h_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(h_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
h_np[:cur_detbb_num, :]))
h_np = h_np[:cur_detbb_num, :]
# check lstm states
c_np = np.reshape(c_np , (cur_detbb_num * cur_detbb_num, -1))
for kkk in range(1, cur_detbb_num):
assert(np.array_equal(c_np[kkk*cur_detbb_num:(kkk+1)*cur_detbb_num, :], \
c_np[:cur_detbb_num, :]))
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_new(self, h_np, c_np, cur_detbb_num):
h_np = np.reshape(h_np, (cur_detbb_num, -1))
c_np = np.reshape(c_np, (cur_detbb_num, -1))
h_np = h_np[:cur_detbb_num, :]
c_np = c_np[:cur_detbb_num, :]
return (h_np, c_np)
def get_lstm_states_for_matched_tracks(self, matching, model_dim, h_np, c_np, trk_num, det_num):
inds_sel1 = []
track_i_sel = []
# select lstm states for matched tracks
if len(matching) > 0:
h_np_tmp = np.zeros((len(matching), model_dim))
c_np_tmp = np.zeros((len(matching), 2 * model_dim))
h_np = np.reshape(h_np, (trk_num, det_num, -1))
c_np = np.reshape(c_np, (trk_num, det_num, -1))
for kkk in range(0, len(matching)):
track_i = int(matching[kkk][0, 0])
detbb_i = int(matching[kkk][0, 1])
h_np_tmp[kkk, :] = h_np[track_i, detbb_i, :]
c_np_tmp[kkk, :] = c_np[track_i, detbb_i, :]
inds_sel1.append(detbb_i)
track_i_sel.append(track_i)
h_np = h_np_tmp
c_np = c_np_tmp
else:
h_np = []
c_np = []
return (h_np, c_np, inds_sel1, track_i_sel)
def precompute_app_features(self, imgs, bbs):
cur_detbb_num = np.shape(imgs)[0]
assert(cur_detbb_num == np.shape(bbs)[0])
feed_dict = {
self.tf_plh['detbb_num']: cur_detbb_num,
self.tf_plh['images']:imgs,
self.tf_plh['is_training']: False,
self.tf_plh['num_step_by_user']: 1,
self.tf_plh['valid_app_data']: np.ones((cur_detbb_num, 1, 1), dtype=np.int32),
self.tf_plh['indices_for_mapping']: np.reshape(np.arange(cur_detbb_num * 1, dtype=np.int32), (-1, 1)),
self.tf_plh['image_batch_shape']: np.array([cur_detbb_num * 1, self.cfg_train.APP_LAYER_DIM])
}
app_embed_np = self.sess.run(self.tf_ops['app_embed'], feed_dict=feed_dict)
return app_embed_np
def initialize_tracks(
self,
h,
c,
memory,
bbs,
bbs_norm,
det_ids,
frame,
hidden_dim,
is_dummy,
network
):
h = np.reshape(h, (-1, hidden_dim))
if network == 'app_blstm':
assert(
|
np.shape(memory)
|
numpy.shape
|
import argparse
import os
import sys
import time
import numpy as np
import torch
import torch.optim as optim
from model import Model
from xin_feeder_baidu import Feeder
from datetime import datetime
import random
import itertools
CUDA_VISIBLE_DEVICES='0'
os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed_torch()
max_x = 1.
max_y = 1.
history_frames = 6 # 3 second * 2 frame/second
future_frames = 6 # 3 second * 2 frame/second
batch_size_train = 64
batch_size_val = 32
batch_size_test = 1
total_epoch = 1
base_lr = 0.01
lr_decay_epoch = 5
dev = 'cuda:0'
work_dir = '../trained_models/'
log_file = os.path.join(work_dir,'log_test_lyft.txt')
test_result_file = 'prediction_result_lyft.txt'
criterion = torch.nn.SmoothL1Loss()
if not os.path.exists(work_dir):
os.makedirs(work_dir)
def my_print(pra_content):
with open(log_file, 'a') as writer:
print(pra_content)
writer.write(pra_content+'\n')
def display_result(pra_results, pra_pref='Train_epoch'):
all_overall_sum_list, all_overall_num_list = pra_results
overall_sum_time =
|
np.sum(all_overall_sum_list**0.5, axis=0)
|
numpy.sum
|
# -*- coding: utf-8 -*-
# -- uvod --
from collections import Counter
import math
from IPython.display import Image
import numpy as np
from numpy.linalg import inv
import scipy.linalg as la
import scipy.stats as st
from fractions import Fraction
matice =
|
np.matrix([
[1/8, 1/16, 1/32, 1/32],
[1/16, 1/8, 1/32, 1/32],
[1/16, 1/16, 1/16, 1/16],
[1/4, 0, 0, 0],
])
|
numpy.matrix
|
import sys
import math
import faiss
import numpy as np
import torch
import torch.nn as nn
sys.path.append('/cluster/yinan/met/')
from metcode.utils.utils import *
from metcode.classifiers.knn_classifier import *
def train_contrastive_1epoch_virtual(model,criterion,optimizer,train_loader,epoch,vbsizemul):
'''Train model with the contrastive loss for one-epoch.
Supports virtual batch training with gradient accumulation
in order to have larger batches with limited hardware.
'''
# set model to train mode
model.train()
model.backbone.apply(set_batchnorm_eval) #don't change the statistics of the BN layers learned on ImageNet
epoch_loss = 0.0
batch = 1
for i,(pair,targets) in enumerate(train_loader):
targets = targets.cuda()
#forward pass
embeds = model(pair[0].cuda(),pair[1].cuda())
#loss calculation
loss = criterion(embeds[0],embeds[1],targets)/vbsizemul
#backward_pass
loss.backward()
#gradient descent
if (i+1) % vbsizemul == 0 or (i+1) == len(train_loader):
optimizer.step()
optimizer.zero_grad()
# print statistics
progress(loss=loss.data.item(),
epoch=epoch,
batch=batch,
batch_size=vbsizemul*train_loader.batch_size,
dataset_size=len(train_loader.dataset))
batch +=1
epoch_loss += ((loss.item()*pair[0].size(0))/len(train_loader.dataset))*vbsizemul
print("epoch : " + str(epoch) + " finished, train loss = " + str(epoch_loss))
return epoch_loss
def save_checkpoint(state,filename,epoch):
torch.save(state,filename + "_epoch:_" + str(epoch))
def set_batchnorm_eval(m):
'''Credits to <NAME> (https://github.com/filipradenovic/cnnimageretrieval-pytorch)
'''
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
# freeze running mean and std:
# we do training one image at a time
# so the statistics would not be per batch
# hence we choose freezing (ie using imagenet statistics)
m.eval()
# # freeze parameters:
# # in fact no need to freeze scale and bias
# # they can be learned
# # that is why next two lines are commented
# for p in m.parameters():
# p.requires_grad = False
def progress(loss, epoch, batch, batch_size, dataset_size):
batches = math.ceil(float(dataset_size) / batch_size)
count = batch * batch_size
bar_len = 40
filled_len = int(round(bar_len * count / float(dataset_size)))
bar = '=' * filled_len + '-' * (bar_len - filled_len)
status = 'Epoch {}, Batch Loss: {:.8f}'.format(epoch, loss)
_progress_str = "\r \r [{}] ...{}".format(bar, status)
sys.stdout.write(_progress_str)
sys.stdout.flush()
if batch == batches:
print()
def validate(net,train_loader,train_labels,val_loader,val_labels,ret_train_descr = False,train_descr = None):
print("Validation phase")
#descriptor extraction (singlescale for validation)
if train_descr is None:
train_descr = extract_embeddings(net,train_loader,ms = [1],msp = 1.0)
val_descr = extract_embeddings(net,val_loader,ms = [1],msp = 1.0)
train_descr = np.ascontiguousarray(train_descr,dtype=np.float32)
val_descr = np.ascontiguousarray(val_descr,dtype=np.float32)
clf = KNN_Classifier(K = 1,t = 1)
clf.fit(train_descr,train_labels)
val_preds,val_confs = clf.predict(val_descr)
val_gap,val_non_distr_gap,val_acc = evaluate(np.array(val_preds),np.array(val_confs),val_labels)
if ret_train_descr:
return val_gap,val_non_distr_gap,val_acc,train_descr
else:
return val_gap,val_non_distr_gap,val_acc
def mine_negatives(image_paths,root,image_descrs,image_labels):
image_descrs = np.ascontiguousarray(image_descrs,dtype=np.float32)
index = faiss.IndexFlatIP(np.shape(image_descrs)[1])
index.add(image_descrs.astype("float32"))
#find top 25 neighbors
#they might contain images from the same class as the query, not all are negatives yet
similarities,idxs = index.search(image_descrs.astype("float32"),25)
negs_all = []
#clean those neighbors from the candidate positives
#form a list for each sample with its 10 closest negatives
for j,idx in enumerate(idxs):
negs_one = []
i = 0
while len(negs_one)<10:
if image_labels[idx[i]] != image_labels[j]:
negs_one.append(image_paths[idx[i]])
i+=1
#pick one of the 10 closest neighbors
indices = np.arange(10)
|
np.random.shuffle(indices)
|
numpy.random.shuffle
|
import numpy as np
import pandas as pd
def load_adult(normalize = True, bucket = 0):
# dta description
types = {
'age': 'int64',
'workclass': 'object', # Never-worked not in data
'fnlwgt': 'int64',
'education': 'object',
'education-num': 'int64',
'marital-status': 'object',
'occupation': 'object',
'relationship': 'object',
'race': 'object',
'sex': 'object',
'capital-gain': 'int64',
'capital-loss': 'int64',
'hours-per-week': 'int64',
'native-country': 'object',
'target': 'object'
}
names = list(types.keys())
# load data
train = pd.read_csv('./adult/adult.data', names=names, na_values="?", dtype=types, index_col=False, skipinitialspace=True)
test = pd.read_csv('./adult/adult.test', names=names, na_values="?", dtype=types, index_col=False, skipinitialspace=True, skiprows=1)
# merge data so that we have info about all potential classes & can process everything at once
train_size = len(train)
tog = train.append(test)
# clean data
tog['target'] = tog['target'].str.rstrip('.')
return process_dta(tog, train_size, normalize, bucket)
def process_dta(tog, train_size, normalize=False, bucket = 0):
# change object (string) columns to category type
for col in tog.columns[
|
np.where(tog.dtypes == 'object')
|
numpy.where
|
"""This file implements the gym environment of Rex.
"""
import collections
import math
import time
import gym
import numpy as np
import pybullet
import pybullet_data
from gym import spaces
from gym.utils import seeding
from ..model import rex, motor, mark_constants, rex_constants
from ..model.terrain import Terrain
from ..util import bullet_client
MOTOR_ANGLE_OBSERVATION_INDEX = 0
OBSERVATION_EPS = 0.01
RENDER_HEIGHT = 360
RENDER_WIDTH = 480
SENSOR_NOISE_STDDEV = rex.SENSOR_NOISE_STDDEV
DEFAULT_URDF_VERSION = "default"
NUM_SIMULATION_ITERATION_STEPS = 300
REX_URDF_VERSION_MAP = {
DEFAULT_URDF_VERSION: rex.Rex
}
def convert_to_list(obj):
try:
iter(obj)
return obj
except TypeError:
return [obj]
class RexGymEnv(gym.Env):
"""The gym environment for Rex.
It simulates the locomotion of Rex, a quadruped robot. The state space
include the angles, velocities and torques for all the motors and the action
space is the desired motor angle for each motor. The reward function is based
on how far Rex walks in 1000 steps and penalizes the energy
expenditure.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 100}
def __init__(self,
debug=False,
urdf_root=pybullet_data.getDataPath(),
urdf_version=None,
distance_weight=1.0,
energy_weight=0.0005,
shake_weight=0.005,
drift_weight=2.0,
distance_limit=float("inf"),
observation_noise_stdev=SENSOR_NOISE_STDDEV,
self_collision_enabled=False,
motor_velocity_limit=np.inf,
pd_control_enabled=False,
leg_model_enabled=True,
accurate_motor_model_enabled=False,
remove_default_joint_damping=False,
motor_kp=2.0,
motor_kd=0.03,
control_latency=0.0,
pd_latency=0.0,
torque_control_enabled=False,
motor_overheat_protection=False,
hard_reset=True,
on_rack=False,
render=True,
num_steps_to_log=1000,
action_repeat=1,
control_time_step=None,
env_randomizer=None,
forward_reward_cap=float("inf"),
reflection=True,
log_path=None,
target_orient=None,
init_orient=None,
target_position=None,
start_position=None,
base_y=0.0,
base_z=0.0,
base_roll=0.0,
base_pitch=0.0,
base_yaw=0.0,
step_length=None,
step_rotation=None,
step_angle=None,
step_period=None,
backwards=None,
signal_type="ik",
terrain_type="plane",
terrain_id=None,
mark='base'):
""" Initialize the rex gym environment.
Args:
urdf_root: The path to the urdf data folder.
urdf_version: [DEFAULT_URDF_VERSION] are allowable
versions. If None, DEFAULT_URDF_VERSION is used.
distance_weight: The weight of the distance term in the reward.
energy_weight: The weight of the energy term in the reward.
shake_weight: The weight of the vertical shakiness term in the reward.
drift_weight: The weight of the sideways drift term in the reward.
distance_limit: The maximum distance to terminate the episode.
observation_noise_stdev: The standard deviation of observation noise.
self_collision_enabled: Whether to enable self collision in the sim.
motor_velocity_limit: The velocity limit of each motor.
pd_control_enabled: Whether to use PD controller for each motor.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
accurate_motor_model_enabled: Whether to use the accurate DC motor model.
remove_default_joint_damping: Whether to remove the default joint damping.
motor_kp: proportional gain for the accurate motor model.
motor_kd: derivative gain for the accurate motor model.
control_latency: It is the delay in the controller between when an
observation is made at some point, and when that reading is reported
back to the Neural Network.
pd_latency: latency of the PD controller loop. PD calculates PWM based on
the motor angle and velocity. The latency measures the time between when
the motor angle and velocity are observed on the microcontroller and
when the true state happens on the motor. It is typically (0.001-
0.002s).
torque_control_enabled: Whether to use the torque control, if set to
False, pose control will be used.
motor_overheat_protection: Whether to shutdown the motor that has exerted
large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time
(OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in rex.py for more
details.
hard_reset: Whether to wipe the simulation and load everything when reset
is called. If set to false, reset just place Rex back to start
position and set its pose to initial configuration.
on_rack: Whether to place Rex on rack. This is only used to debug
the walk gait. In this mode, Rex's base is hanged midair so
that its walk gait is clearer to visualize.
render: Whether to render the simulation.
num_steps_to_log: The max number of control steps in one episode that will
be logged. If the number of steps is more than num_steps_to_log, the
environment will still be running, but only first num_steps_to_log will
be recorded in logging.
action_repeat: The number of simulation steps before actions are applied.
control_time_step: The time step between two successive control signals.
env_randomizer: An instance (or a list) of EnvRandomizer(s). An
EnvRandomizer may randomize the physical property of rex, change
the terrrain during reset(), or add perturbation forces during step().
forward_reward_cap: The maximum value that forward reward is capped at.
Disabled (Inf) by default.
log_path: The path to write out logs. For the details of logging, refer to
rex_logging.proto.
Raises:
ValueError: If the urdf_version is not supported.
"""
self.mark = mark
self.num_motors = mark_constants.MARK_DETAILS['motors_num'][self.mark]
self.motor_velocity_obs_index = MOTOR_ANGLE_OBSERVATION_INDEX + self.num_motors
self.motor_torque_obs_index = self.motor_velocity_obs_index + self.num_motors
self.base_orientation_obs_index = self.motor_torque_obs_index + self.num_motors
# Set up logging.
self._log_path = log_path
# @TODO fix logging
self.logging = None
# PD control needs smaller time step for stability.
if control_time_step is not None:
self.control_time_step = control_time_step
self._action_repeat = action_repeat
self._time_step = control_time_step / action_repeat
else:
# Default values for time step and action repeat
if accurate_motor_model_enabled or pd_control_enabled:
self._time_step = 0.002
self._action_repeat = 5
else:
self._time_step = 0.01
self._action_repeat = 1
self.control_time_step = self._time_step * self._action_repeat
# TODO: Fix the value of self._num_bullet_solver_iterations.
self._num_bullet_solver_iterations = int(NUM_SIMULATION_ITERATION_STEPS / self._action_repeat)
self._urdf_root = urdf_root
self._self_collision_enabled = self_collision_enabled
self._motor_velocity_limit = motor_velocity_limit
self._observation = []
self._true_observation = []
self._objectives = []
self._objective_weights = [distance_weight, energy_weight, drift_weight, shake_weight]
self._env_step_counter = 0
self._num_steps_to_log = num_steps_to_log
self._is_render = render
self._is_debug = debug
self._last_base_position = [0, 0, 0]
self._last_base_orientation = [0, 0, 0, 1]
self._distance_weight = distance_weight
self._energy_weight = energy_weight
self._drift_weight = drift_weight
self._shake_weight = shake_weight
self._distance_limit = distance_limit
self._observation_noise_stdev = observation_noise_stdev
self._action_bound = 1
self._pd_control_enabled = pd_control_enabled
self._leg_model_enabled = leg_model_enabled
self._accurate_motor_model_enabled = accurate_motor_model_enabled
self._remove_default_joint_damping = remove_default_joint_damping
self._motor_kp = motor_kp
self._motor_kd = motor_kd
self._torque_control_enabled = torque_control_enabled
self._motor_overheat_protection = motor_overheat_protection
self._on_rack = on_rack
self._cam_dist = 1.0
self._cam_yaw = 0
self._cam_pitch = -30
self._forward_reward_cap = forward_reward_cap
self._hard_reset = True
self._last_frame_time = 0.0
self._control_latency = control_latency
self._pd_latency = pd_latency
self._urdf_version = urdf_version
self._ground_id = None
self._reflection = reflection
self._env_randomizers = convert_to_list(env_randomizer) if env_randomizer else []
# @TODO fix logging
self._episode_proto = None
if self._is_render:
self._pybullet_client = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
self._pybullet_client = bullet_client.BulletClient()
if self._urdf_version is None:
self._urdf_version = DEFAULT_URDF_VERSION
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self._signal_type = signal_type
# gait inputs
self.step_length = step_length
self.step_rotation = step_rotation
self.step_angle = step_angle
self.step_period = step_period
# poses inputs
self._base_x = 0.01
self._base_y = base_y
self._base_z = base_z
self._base_roll = base_roll
self._base_pitch = base_pitch
self._base_yaw = base_yaw
# envs inputs
self._target_orient = target_orient
self._init_orient = init_orient
self._target_position = target_position
self._start_position = start_position
# computation support params
self._random_pos_target = False
self._random_pos_start = False
self._random_orient_target = False
self._random_orient_start = False
self._companion_obj = {}
self._queue = collections.deque(["base_y", "base_z", "roll", "pitch", "yaw"])
self._ranges = {
"base_x": (-0.02, 0.02, 0.01),
"base_y": (-0.007, 0.007, 0),
"base_z": (-0.048, 0.021, 0),
"roll": (-np.pi / 4, np.pi / 4, 0),
"pitch": (-np.pi / 4, np.pi / 4, 0),
"yaw": (-np.pi / 4, np.pi / 4, 0)
}
self.seed()
self._backwards = backwards
self._terrain_type = "plane"
self._terrain_id = terrain_id
self.reset()
self._terrain_type = terrain_type
self.terrain = Terrain(self._terrain_type, self._terrain_id)
if self._terrain_type is not "plane":
self.terrain.generate_terrain(self)
observation_high = (self._get_observation_upper_bound() + OBSERVATION_EPS)
observation_low = (self._get_observation_lower_bound() - OBSERVATION_EPS)
action_dim = self.num_motors
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(observation_low, observation_high)
self.viewer = None
self._hard_reset = hard_reset # This assignment need to be after reset()
self.env_goal_reached = False
def close(self):
# @TODO fix logger
# if self._env_step_counter > 0:
# self.logging.save_episode(self._episode_proto)
self.rex.Terminate()
def add_env_randomizer(self, env_randomizer):
self._env_randomizers.append(env_randomizer)
def reset(self, initial_motor_angles=None, reset_duration=1.0):
self.env_goal_reached = False
self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 0)
# @TODO fix logger
# if self._env_step_counter > 0:
# self.logging.save_episode(self._episode_proto)
# self._episode_proto = rex_logging_pb2.RexEpisode()
# rex_logging.preallocate_episode_proto(self._episode_proto, self._num_steps_to_log)
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=int(self._num_bullet_solver_iterations))
self._pybullet_client.setTimeStep(self._time_step)
self._ground_id = self._pybullet_client.loadURDF("%s/plane.urdf" % self._urdf_root)
if self._reflection:
self._pybullet_client.changeVisualShape(self._ground_id, -1, rgbaColor=[1, 1, 1, 0.8])
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_PLANAR_REFLECTION, self._ground_id)
self._pybullet_client.setGravity(0, 0, -10)
acc_motor = self._accurate_motor_model_enabled
motor_protect = self._motor_overheat_protection
if self._urdf_version not in REX_URDF_VERSION_MAP:
raise ValueError("%s is not a supported urdf_version." % self._urdf_version)
else:
self.rex = (REX_URDF_VERSION_MAP[self._urdf_version](
pybullet_client=self._pybullet_client,
action_repeat=self._action_repeat,
urdf_root=self._urdf_root,
time_step=self._time_step,
self_collision_enabled=self._self_collision_enabled,
motor_velocity_limit=self._motor_velocity_limit,
pd_control_enabled=self._pd_control_enabled,
accurate_motor_model_enabled=acc_motor,
remove_default_joint_damping=self._remove_default_joint_damping,
motor_kp=self._motor_kp,
motor_kd=self._motor_kd,
control_latency=self._control_latency,
pd_latency=self._pd_latency,
observation_noise_stdev=self._observation_noise_stdev,
torque_control_enabled=self._torque_control_enabled,
motor_overheat_protection=motor_protect,
on_rack=self._on_rack,
terrain_id=self._terrain_id,
mark=self.mark))
self.rex.Reset(reload_urdf=False,
default_motor_angles=initial_motor_angles,
reset_time=reset_duration)
# Loop over all env randomizers.
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_env(self)
if self._terrain_type is not "plane":
self.terrain.update_terrain()
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self._env_step_counter = 0
self._last_base_position = [0, 0, 0]
self._last_base_orientation = [0, 0, 0, 1]
self._objectives = []
self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw,
self._cam_pitch, [0, 0, 0])
self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 1)
# time.sleep(100)
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _transform_action_to_motor_command(self, action):
if len(action) != mark_constants.MARK_DETAILS['motors_num'][self.mark]:
# extend with arm rest pose
action = np.concatenate((action, rex_constants.ARM_POSES["rest"]))
return action
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: A list of desired motor angles for eight motors.
Returns:
observations: The angles, velocities and torques of all motors.
reward: The reward for the current state-action pair.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
self._last_base_position = self.rex.GetBasePosition()
self._last_base_orientation = self.rex.GetBaseOrientation()
if self._is_render:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self.control_time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
base_pos = self.rex.GetBasePosition()
# Keep the previous orientation of the camera set by the user.
[yaw, pitch, dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]
self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_step(self)
action = self._transform_action_to_motor_command(action)
self.rex.Step(action)
reward = self._reward()
done = self._termination()
# @TODO fix logging
# if self._log_path is not None:
# rex_logging.update_episode_proto(self._episode_proto, self.rex, action,
# self._env_step_counter)
self._env_step_counter += 1
if done:
self.rex.Terminate()
return np.array(self._get_observation()), reward, done, {'action': action}
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos = self.rex.GetBasePosition()
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(fov=60,
aspect=float(RENDER_WIDTH) / RENDER_HEIGHT,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=RENDER_WIDTH,
height=RENDER_HEIGHT,
renderer=self._pybullet_client.ER_BULLET_HARDWARE_OPENGL,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def get_rex_motor_angles(self):
"""Get the rex's motor angles.
Returns:
A numpy array of motor angles.
"""
return np.array(self._observation[MOTOR_ANGLE_OBSERVATION_INDEX:MOTOR_ANGLE_OBSERVATION_INDEX + self.num_motors])
def get_rex_motor_velocities(self):
"""Get the rex's motor velocities.
Returns:
A numpy array of motor velocities.
"""
return np.array(
self._observation[self.motor_velocity_obs_index:self.motor_velocity_obs_index + self.num_motors])
def get_rex_motor_torques(self):
"""Get the rex's motor torques.
Returns:
A numpy array of motor torques.
"""
return np.array(
self._observation[self.motor_torque_obs_index:self.motor_torque_obs_index + self.num_motors])
def get_rex_base_orientation(self):
"""Get the rex's base orientation, represented by a quaternion.
Returns:
A numpy array of rex's orientation.
"""
return np.array(self._observation[self.base_orientation_obs_index:])
def is_fallen(self):
"""Decide whether Rex has fallen.
If the up directions between the base and the world is larger (the dot
product is smaller than 0.85) or the base is very low on the ground
(the height is smaller than 0.13 meter), rex is considered fallen.
Returns:
Boolean value that indicates whether rex has fallen.
"""
orientation = self.rex.GetBaseOrientation()
rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
return np.dot(np.asarray([0, 0, 1]),
|
np.asarray(local_up)
|
numpy.asarray
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 9 15:04:39 2018
@author: luoyuhao
"""
import numpy
import numpy as np
import cv2
import time
import imageio
from skimage import transform as trans
imgSize = [112, 96];
coord5point = [[30.2946, 51.6963],
[65.5318, 51.6963],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.3655]]
face_landmarks = [[259, 137],
[319, 150],
[284, 177],
[253, 206],
[297, 216]]
def transformation_from_points(points1, points2):
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 =
|
numpy.std(points1)
|
numpy.std
|
import numpy as np
import pandas as pd
# calculate confidence {A} -> {B}
def confidence(A, B):
"""
Calculate confidence {A} -> {B}.
A and B are arrays that only contain boolean values.
e.g. A=df["antecedent"], B=df["consequent"]
"""
support_A = A.mean()
support_AB =
|
np.logical_and(A, B)
|
numpy.logical_and
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 18 17:02:08 2017
@author: wd
"""
import numpy as np
import tensorflow as tf
import random
import itertools
import PG_conv
import PG_value
import collections
from collections import deque
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import gym
# import dqn
# from dqn_conv import DQN
# import dqn_conv
env = gym.make('ND-v0')
input_size = 784
output_size = env.action_space.n # number of actions
dis = 0.9
REPLAY_MEMORY = 1000
# Make batch of data and present value of reward
def replay_train(mainDQN, targetDQN, train_batch):
# mainDQN: ....
# targetDQN: ....
# train_batch: queue contains training information during batch learning. Refer 'for' statement at below.
x_stack = np.empty(0).reshape(0, 784)
h_stack = np.empty(0).reshape(0, 50)
y_stack = np.empty(0).reshape(0, 1)
# Get stored information from the buffer
for state, action, reward, next_state, done, history_buffer in train_batch:
if done == 1:
Q = reward
else:
Q = reward + dis*np.mean(targetDQN.predict(next_state, history_buffer))
y_stack = np.vstack([y_stack, Q])
x_stack = np.vstack([x_stack,
|
np.reshape(state, 784)
|
numpy.reshape
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test for solve eigenvalues & eigen vectors"""
import pytest
import numpy as np
from mindspore import Tensor
from mindspore.scipy.ops import EighNet
np.random.seed(0)
def match(v, v_, error=0):
if error > 0:
|
np.testing.assert_almost_equal(v, v_, decimal=error)
|
numpy.testing.assert_almost_equal
|
from copy import deepcopy
import numpy as np
"""
This module defines the ranges of hyper-params to search upon
"""
# Define the high level configuration structure
_all_config = {}
_all_config['AWA1'] = {}
_all_config['SUN'] = {}
_all_config['CUB'] = {}
_all_config['FLO'] = {}
def get(dataset_name, combiner, metric_name, all_config=_all_config):
assert(dataset_name in all_config)
dataset_cfgs = all_config[dataset_name]
if metric_name in dataset_cfgs:
cfg = dataset_cfgs[metric_name].get(combiner,
dataset_cfgs[metric_name]['default'])
else:
cfg = dataset_cfgs.get(combiner, dataset_cfgs['default'])
# sanity check
assert (len(cfg['anomaly_detector_threshold']) == 1)
return deepcopy(cfg)
def cast_to_lists(hyper_params):
""" Put in a list (with len=1), if given as individual value """
hyper_params_as_lists = {}
for k,v in hyper_params.items():
if not (isinstance(v, list) or isinstance(v, np.ndarray)) :
v = [v,]
hyper_params_as_lists[k] = list(v)
return hyper_params_as_lists
def _individual_cfg(hyper_params={}, anomaly_detector_threshold={}):
assert( len(anomaly_detector_threshold) == 1 )
hyper_params_as_lists = cast_to_lists(hyper_params)
threshold_as_list = cast_to_lists(anomaly_detector_threshold)
return dict(hyper_params=hyper_params_as_lists,
anomaly_detector_threshold=threshold_as_list)
# Here we define defaults configurations
CUB_default = _individual_cfg(hyper_params=
dict(
T_cond=[0.1, 0.3, 1, 3],
),
anomaly_detector_threshold=
dict(threshold=np.arange(-2.5, 2.5, 0.1))
)
_all_config['CUB']['default'] = deepcopy(CUB_default)
SUN_default = _individual_cfg(hyper_params=
dict(
T_cond=[0.1, 0.3, 1, 3, 10],
),
anomaly_detector_threshold=
dict(threshold=np.arange(-2.5, 20, 0.2))
)
_all_config['SUN']['default'] = deepcopy(SUN_default)
_all_config['SUN']['Confidence Based Gater: T = (3,)'] = {}
_all_config['SUN']['Confidence Based Gater: T = (3,)']['adaptive_smoothing'] = \
_individual_cfg(hyper_params=
dict(
T_cond=[0.1, 0.3, 1, 3, 10],
),
anomaly_detector_threshold=
dict(threshold=
|
np.arange(0, 50, 0.2)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
docstring goes here.
:copyright: Copyright 2014 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import neo
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
import quantities as pq
import elephant.conversion as cv
def get_nearest(times, time):
return (np.abs(times-time)).argmin()
class binarize_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_1d = np.array([1.23, 0.3, 0.87, 0.56])
def test_binarize_with_spiketrain_exact(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms',
t_stop=10.0, sampling_rate=100)
times = np.arange(0, 10.+.01, .01)
target = np.zeros_like(times).astype('bool')
for time in self.test_array_1d:
target[get_nearest(times, time)] = True
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_spiketrain_exact_set_ends(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms',
t_stop=10.0, sampling_rate=100)
times = np.arange(5., 10.+.01, .01)
target = np.zeros_like(times).astype('bool')
times = pq.Quantity(times, units='ms')
res, tres = cv.binarize(st, return_times=True, t_start=5., t_stop=10.)
assert_array_almost_equal(res, target, decimal=9)
assert_array_almost_equal(tres, times, decimal=9)
def test_binarize_with_spiketrain_round(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms',
t_stop=10.0, sampling_rate=10.0)
times =
|
np.arange(0, 10.+.1, .1)
|
numpy.arange
|
import numpy as np
from abc import ABC, abstractmethod
from multiprocessing import Process, Pipe
try:
import ray
except ImportError:
pass
from tianshou.env import EnvWrapper, CloudpickleWrapper
class BaseVectorEnv(ABC):
def __init__(self, env_fns):
self._env_fns = env_fns
self.env_num = len(env_fns)
def __len__(self):
return self.env_num
@abstractmethod
def reset(self):
pass
@abstractmethod
def step(self, action):
pass
@abstractmethod
def seed(self, seed=None):
pass
@abstractmethod
def render(self, **kwargs):
pass
@abstractmethod
def close(self):
pass
class VectorEnv(BaseVectorEnv):
"""docstring for VectorEnv"""
def __init__(self, env_fns):
super().__init__(env_fns)
self.envs = [_() for _ in env_fns]
def reset(self, id=None):
if id is None:
self._obs = np.stack([e.reset() for e in self.envs])
else:
if np.isscalar(id):
id = [id]
for i in id:
self._obs[i] = self.envs[i].reset()
return self._obs
def step(self, action):
assert len(action) == self.env_num
result = [e.step(a) for e, a in zip(self.envs, action)]
self._obs, self._rew, self._done, self._info = zip(*result)
self._obs = np.stack(self._obs)
self._rew = np.stack(self._rew)
self._done = np.stack(self._done)
self._info = np.stack(self._info)
return self._obs, self._rew, self._done, self._info
def seed(self, seed=None):
if np.isscalar(seed):
seed = [seed + _ for _ in range(self.env_num)]
elif seed is None:
seed = [seed] * self.env_num
result = []
for e, s in zip(self.envs, seed):
if hasattr(e, 'seed'):
result.append(e.seed(s))
return result
def render(self, **kwargs):
result = []
for e in self.envs:
if hasattr(e, 'render'):
result.append(e.render(**kwargs))
return result
def close(self):
for e in self.envs:
e.close()
def worker(parent, p, env_fn_wrapper):
parent.close()
env = env_fn_wrapper.data()
try:
while True:
cmd, data = p.recv()
if cmd == 'step':
p.send(env.step(data))
elif cmd == 'reset':
p.send(env.reset())
elif cmd == 'close':
p.close()
break
elif cmd == 'render':
p.send(env.render(**data) if hasattr(env, 'render') else None)
elif cmd == 'seed':
p.send(env.seed(data) if hasattr(env, 'seed') else None)
else:
p.close()
raise NotImplementedError
except KeyboardInterrupt:
p.close()
class SubprocVectorEnv(BaseVectorEnv):
"""docstring for SubProcVectorEnv"""
def __init__(self, env_fns):
super().__init__(env_fns)
self.closed = False
self.parent_remote, self.child_remote = \
zip(*[Pipe() for _ in range(self.env_num)])
self.processes = [
Process(target=worker, args=(
parent, child, CloudpickleWrapper(env_fn)), daemon=True)
for (parent, child, env_fn) in zip(
self.parent_remote, self.child_remote, env_fns)
]
for p in self.processes:
p.start()
for c in self.child_remote:
c.close()
def step(self, action):
assert len(action) == self.env_num
for p, a in zip(self.parent_remote, action):
p.send(['step', a])
result = [p.recv() for p in self.parent_remote]
self._obs, self._rew, self._done, self._info = zip(*result)
self._obs = np.stack(self._obs)
self._rew = np.stack(self._rew)
self._done = np.stack(self._done)
self._info = np.stack(self._info)
return self._obs, self._rew, self._done, self._info
def reset(self, id=None):
if id is None:
for p in self.parent_remote:
p.send(['reset', None])
self._obs = np.stack([p.recv() for p in self.parent_remote])
return self._obs
else:
if np.isscalar(id):
id = [id]
for i in id:
self.parent_remote[i].send(['reset', None])
for i in id:
self._obs[i] = self.parent_remote[i].recv()
return self._obs
def seed(self, seed=None):
if np.isscalar(seed):
seed = [seed + _ for _ in range(self.env_num)]
elif seed is None:
seed = [seed] * self.env_num
for p, s in zip(self.parent_remote, seed):
p.send(['seed', s])
return [p.recv() for p in self.parent_remote]
def render(self, **kwargs):
for p in self.parent_remote:
p.send(['render', kwargs])
return [p.recv() for p in self.parent_remote]
def close(self):
if self.closed:
return
for p in self.parent_remote:
p.send(['close', None])
self.closed = True
for p in self.processes:
p.join()
class RayVectorEnv(BaseVectorEnv):
"""docstring for RayVectorEnv"""
def __init__(self, env_fns):
super().__init__(env_fns)
try:
if not ray.is_initialized():
ray.init()
except NameError:
raise ImportError(
'Please install ray to support RayVectorEnv: pip3 install ray')
self.envs = [
ray.remote(EnvWrapper).options(num_cpus=0).remote(e())
for e in env_fns]
def step(self, action):
assert len(action) == self.env_num
result_obj = [e.step.remote(a) for e, a in zip(self.envs, action)]
result = [ray.get(r) for r in result_obj]
self._obs, self._rew, self._done, self._info = zip(*result)
self._obs = np.stack(self._obs)
self._rew = np.stack(self._rew)
self._done =
|
np.stack(self._done)
|
numpy.stack
|
"""
Slightly modified from https://github.com/peter3125/sentence2vec with
some new methods for preprocessing sentences, calculating cosine similarity
with sklearn, and a pipeline for comparing an input sentence against a corpus
for some braindead question answering purposes
"""
from __future__ import print_function
import time
import gensim
from gensim.corpora.wikicorpus import WikiCorpus
from gensim.models.doc2vec import Doc2Vec
import spacy
import math
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import cosine_similarity
from typing import List
nlp = spacy.load('en')
gnews_model = gensim.models.KeyedVectors.load_word2vec_format('~/Downloads/GoogleNews-vectors-negative300.bin', binary=True)
class Word:
def __init__(self, text, vector):
self.text = text
self.vector = vector
class Sentence:
def __init__(self, word_list):
self.word_list = word_list
# return the length of a sentence
def len(self):
return(len(self.word_list))
def __str__(self):
word_str_list = [word.text for word in self.word_list]
return ' '.join(word_str_list)
def __repr__(self):
return self.__str__()
def get_word_frequency(word_text, vec_model):
wf = vec_model.vocab[word_text].count
return(wf)
def preloading_sentences(sentence_list, model):
"""
Converts a list of sentences into a list of Sentence (and Word) objects
Pretty similar to what peter3125/sentence2vec.git does
input: a list of sentences, embedding_size
output: a list of Sentence objects, containing Word objects, which contain 'text' and word vector attributes
"""
embedding_size = 300
all_sent_info = []
for sentence in sentence_list:
sent_info = []
spacy_sentence = nlp(sentence)
for word in spacy_sentence:
if word.text in model.vocab:
sent_info.append(Word(word.text, model[word.text]))
# todo: if sent_info > 0, append, else don't
all_sent_info.append(Sentence(sent_info))
return(all_sent_info)
def sentence_to_vec(sentence_list, embedding_size, a=1e-3):
"""
A SIMPLE BUT TOUGH TO BEAT BASELINE FOR SENTENCE EMBEDDINGS
<NAME>, <NAME>, <NAME>
Princeton University
"""
sentence_set = [] # intermediary list of sentence vectors before PCA
sent_list = [] # return list of input sentences in the output
for sentence in sentence_list:
this_sent = []
vs = np.zeros(embedding_size) # add all w2v values into one vector for the sentence
sentence_length = sentence.len()
for word in sentence.word_list:
this_sent.append(word.text)
word_freq = get_word_frequency(word.text, gnews_model)
a_value = a / (a + word_freq) # smooth inverse frequency, SIF
vs = np.add(vs, np.multiply(a_value, word.vector)) # vs += sif * word_vector
vs = np.divide(vs, sentence_length) # weighted average, normalized by sentence length
sentence_set.append(vs) # add to our existing re-caculated set of sentences
sent_list.append(' '.join(this_sent))
# calculate PCA of this sentence set
pca = PCA(n_components=embedding_size)
pca.fit(np.array(sentence_set))
u = pca.components_[0] # the PCA vector
u = np.multiply(u, np.transpose(u)) # u x uT
# pad the vector? (occurs if we have less sentences than embeddings_size)
if len(u) < embedding_size:
for i in range(embedding_size - len(u)):
u = np.append(u, 0)
# resulting sentence vectors, vs = vs -u * uT * vs
sentence_vecs = []
for vs in sentence_set:
sub =
|
np.multiply(u, vs)
|
numpy.multiply
|
"""The svm theta kernel as defined in :cite:`johansson2014global`."""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import collections
import warnings
import numpy as np
from scipy.linalg import eigvalsh
from sklearn.svm import OneClassSVM
from grakel.kernels import Kernel
from grakel.graph import Graph
from grakel.tools import distribute_samples
positive_eigenvalue_limit = float("+1e-6")
min_weight = float("1e-10")
class SvmTheta(Kernel):
"""Calculate the SVM theta kernel.
See :cite:`johansson2014global`.
Parameters
----------
X,Y : *valid-graph-format*
The pair of graphs on which the kernel is applied.
n_samples : int, default=50
Number of samples.
subsets_size_range : tuple, len=2, default=(2,8)
(min, max) size of the vertex set of sampled subgraphs.
metric : function (number, number -> number), default=:math:`f(x,y)=x*y`
The applied metric between the svm_theta numbers of the two graphs.
Attributes
----------
_n_samples : int
Number of samples drawn for the computation of lovasz theta.
_ssr : tuple, len=2
A tuple containing two integers designating the minimum and the maximum
size of the vertex set of considered subgraphs.
_base_kernel : function (number, number -> number)
The applied base_kernel between features of the mean lovasz_theta
numbers samples for all levels of the two graphs.
"""
_graph_format = "adjacency"
def __init__(self, n_jobs=None, normalize=False,
verbose=False, random_seed=42, n_samples=50,
subsets_size_range=(2, 8),
base_kernel=lambda x, y: x.T.dot(y)):
"""Initialise a lovasz_theta kernel."""
# setup valid parameters and initialise from parent
super(SvmTheta, self).__init__(n_jobs=n_jobs,
normalize=normalize,
verbose=verbose)
self.n_samples = n_samples
self.subsets_size_range = subsets_size_range
self.base_kernel = base_kernel
self.random_seed = random_seed
self.initialized_.update({"n_samples": False, "subsets_size_range": False,
"base_kernel": False, "random_seed": False})
def initialized_(self):
"""Initialize all transformer arguments, needing initialization."""
super(SvmTheta, self).initialize_()
if not self.initialized_["n_samles"]:
if self.n_samples <= 0 or type(self.n_samples) is not int:
raise TypeError('n_samples must an integer be bigger '
'than zero')
self.initialized_["n_samles"] = True
if not self.initialized_["subsets_size_range"]:
if (type(self.subsets_size_range) is not tuple
or len(self.subsets_size_range) != 2
or any(type(i) is not int for i in self.subsets_size_range)
or self.subsets_size_range[0] > self.subsets_size_range[1]
or self.subsets_size_range[0] <= 0):
raise TypeError('subsets_size_range subset size range'
'must be a tuple of two integers in '
'increasing order, bigger than 1')
self.initialized_["subsets_size_range"] = True
if not self.initialized_["base_kernel"]:
if not callable(self.base_kernel):
raise TypeError('base_kernel between arguments' +
'must be a function')
self.initialized_["base_kernel"] = True
if not self.initialized_["random_seed"]:
np.random.seed(self.random_seed)
self.initialized_["random_seed"] = True
def parse_input(self, X):
"""Parse and create features for svm_theta kernel.
Parameters
----------
X : iterable
For the input to pass the test, we must have:
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that correspond to the given
graph format). A valid input also consists of graph type objects.
Returns
-------
out : list
The lovasz metrics for the given input.
"""
if not isinstance(X, collections.Iterable):
raise TypeError('input must be an iterable\n')
else:
i = 0
out = list()
for (idx, x) in enumerate(iter(X)):
is_iter = False
if isinstance(x, collections.Iterable):
x, is_iter = list(x), True
if is_iter and len(x) in [0, 1, 2, 3]:
if len(x) == 0:
warnings.warn('Ignoring empty element ' +
'on index: '+str(idx))
continue
else:
x = Graph(x[0], {}, {}, self._graph_format)
elif type(x) is not Graph:
raise TypeError('each element of X must be either a ' +
'graph or an iterable with at least 1 ' +
'and at most 3 elements\n')
i += 1
A = x.get_adjacency_matrix()
dual_coeffs = _calculate_svm_theta_(A)
out.append(self._calculate_svm_theta_levels_(A, dual_coeffs))
if i == 0:
raise ValueError('parsed input is empty')
return out
def pairwise_operation(self, x, y):
"""Lovasz theta kernel as proposed in :cite:`johansson2014global`.
Parameters
----------
x, y : dict
Subgraph samples metric dictionaries for all levels.
Returns
-------
kernel : number
The kernel value.
"""
return self.base_kernel(x, y)
def _calculate_svm_theta_levels_(self, A, dual_coefs):
"""Calculate the svm_theta by levels for amaximum number of samples.
Parameters
----------
A : np.array, ndim=2
The adjacency matrix.
Returns
-------
level_values : np.array, shape=(num_of_levels, 1)
Returns for all levels the mean of lovasz_numbers for
sampled subsets.
"""
# Calculate subsets
n = A.shape[0]
samples_on_subsets = distribute_samples(n, self.subsets_size_range, self.n_samples)
# Calculate level dictionary with lovasz values
phi = np.zeros(shape=(self.subsets_size_range[1] -
self.subsets_size_range[0]+1, 1))
for (i, level) in enumerate(range(self.subsets_size_range[0],
self.subsets_size_range[1]+1)):
v = samples_on_subsets.get(level, None)
if v is not None:
level_values = list()
for k in range(v):
if level <= n:
indexes =
|
np.random.choice(n, level, replace=False)
|
numpy.random.choice
|
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
# This is the only place needed to be modified
# The path for the libwannier90 library
W90LIB = '/panfs/roc/groups/6/gagliard/phamx494/pyWannier90/src'
import sys
sys.path.append(W90LIB)
import importlib
found = importlib.util.find_spec('libwannier90') is not None
if found == True:
import libwannier90
else:
print('WARNING: Check the installation of libwannier90 and its path in pyscf/pbc/tools/pywannier90.py')
print('libwannier90 path: ' + W90LIB)
print('libwannier90 can be found at: https://github.com/hungpham2017/pyWannier90')
raise ImportError
import numpy as np
import scipy
import mcu
from mcu.vasp import const
from mcu.cell import utils as cell_utils
def angle(v1, v2):
'''
Return the angle (in radiant between v1 and v2)
'''
v1 = np.asarray(v1)
v2 = np.asarray(v2)
cosa = v1.dot(v2)/ np.linalg.norm(v1) / np.linalg.norm(v2)
return np.arccos(cosa)
def transform(x_vec, z_vec):
'''
Construct a transformation matrix to transform r_vec to the new coordinate system defined by x_vec and z_vec
'''
x_vec = x_vec/np.linalg.norm(np.asarray(x_vec))
z_vec = z_vec/np.linalg.norm(np.asarray(z_vec))
assert x_vec.dot(z_vec) == 0 # x and z have to be orthogonal to one another
y_vec = np.cross(x_vec,z_vec)
new = np.asarray([x_vec, y_vec, z_vec])
original = np.asarray([[1,0,0],[0,1,0],[0,0,1]])
tran_matrix = np.empty([3,3])
for row in range(3):
for col in range(3):
tran_matrix[row,col] = np.cos(angle(original[row],new[col]))
return tran_matrix.T
def cartesian_prod(arrays, out=None, order = 'C'):
'''
This function is similar to lib.cartesian_prod of PySCF, except the output can be in Fortran or in C order
'''
arrays = [np.asarray(x) for x in arrays]
dtype = np.result_type(*arrays)
nd = len(arrays)
dims = [nd] + [len(x) for x in arrays]
if out is None:
out = np.empty(dims, dtype)
else:
out = np.ndarray(dims, dtype, buffer=out)
tout = out.reshape(dims)
shape = [-1] + [1] * nd
for i, arr in enumerate(arrays):
tout[i] = arr.reshape(shape[:nd-i])
return tout.reshape((nd,-1),order=order).T
def periodic_grid(lattice, grid = [50,50,50], supercell = [1,1,1], order = 'C'):
'''
Generate a periodic grid for the unit/computational cell in F/C order
Note: coords has the same unit as lattice
'''
ngrid = np.asarray(grid)
qv = cartesian_prod([np.arange(-ngrid[i]*(supercell[i]//2),ngrid[i]*((supercell[i]+1)//2)) for i in range(3)], order=order)
a_frac = np.einsum('i,ij->ij', 1./ngrid, lattice)
coords = np.dot(qv, a_frac)
# Compute weight
ngrids = np.prod(grid)
ncells = np.prod(supercell)
weights = np.empty(ngrids*ncells)
vol = abs(np.linalg.det(lattice))
weights[:] = vol / ngrids / ncells
return coords, weights
def R_r(r_norm, r = 1, zona = 1):
'''
Radial functions used to compute \Theta_{l,m_r}(\theta,\phi)
Note: r_norm has the unit of Bohr
'''
if r == 1:
R_r = 2 * zona**(3/2) * np.exp(-zona*r_norm)
elif r == 2:
R_r = 1 / 2 / np.sqrt(2) * zona**(3/2) * (2 - zona*r_norm) * np.exp(-zona*r_norm/2)
else:
R_r = np.sqrt(4/27) * zona**(3/2) * (1 - 2*zona*r_norm/3 + 2*(zona**2)*(r_norm**2)/27) * np.exp(-zona*r_norm/3)
return R_r
def theta(func, cost, phi):
'''
Basic angular functions (s,p,d,f) used to compute \Theta_{l,m_r}(\theta,\phi)
'''
if func == 's': # s
theta = 1 / np.sqrt(4 * np.pi) * np.ones([cost.shape[0]])
elif func == 'pz':
theta = np.sqrt(3 / 4 / np.pi) * cost
elif func == 'px':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.cos(phi)
elif func == 'py':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(3 / 4 / np.pi) * sint * np.sin(phi)
elif func == 'dz2':
theta = np.sqrt(5 / 16 / np.pi) * (3*cost**2 - 1)
elif func == 'dxz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.cos(phi)
elif func == 'dyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 4 / np.pi) * sint * cost * np.sin(phi)
elif func == 'dx2-y2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.cos(2*phi)
elif func == 'pxy':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(15 / 16 / np.pi) * (sint**2) * np.sin(2*phi)
elif func == 'fz3':
theta = np.sqrt(7) / 4 / np.sqrt(np.pi) * (5*cost**3 - 3*cost)
elif func == 'fxz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.cos(phi)
elif func == 'fyz2':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(21) / 4 / np.sqrt(2*np.pi) * (5*cost**2 - 1) * sint * np.sin(phi)
elif func == 'fz(x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.cos(2*phi)
elif func == 'fxyz':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(105) / 4 / np.sqrt(np.pi) * sint**2 * cost * np.sin(2*phi)
elif func == 'fx(x2-3y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (np.cos(phi)**2 - 3*np.sin(phi)**2) * np.cos(phi)
elif func == 'fy(3x2-y2)':
sint = np.sqrt(1 - cost**2)
theta = np.sqrt(35) / 4 / np.sqrt(2*np.pi) * sint**3 * (3*np.cos(phi)**2 - np.sin(phi)**2) * np.sin(phi)
return theta
def theta_lmr(l, mr, cost, phi):
'''
Compute the value of \Theta_{l,m_r}(\theta,\phi)
ref: Table 3.1 and 3.2 of Chapter 3, wannier90 User Guide
'''
assert l in [0,1,2,3,-1,-2,-3,-4,-5]
assert mr in [1,2,3,4,5,6,7]
if l == 0: # s
theta_lmr = theta('s', cost, phi)
elif (l == 1) and (mr == 1): # pz
theta_lmr = theta('pz', cost, phi)
elif (l == 1) and (mr == 2): # px
theta_lmr = theta('px', cost, phi)
elif (l == 1) and (mr == 3): # py
theta_lmr = theta('py', cost, phi)
elif (l == 2) and (mr == 1): # dz2
theta_lmr = theta('dz2', cost, phi)
elif (l == 2) and (mr == 2): # dxz
theta_lmr = theta('dxz', cost, phi)
elif (l == 2) and (mr == 3): # dyz
theta_lmr = theta('dyz', cost, phi)
elif (l == 2) and (mr == 4): # dx2-y2
theta_lmr = theta('dx2-y2', cost, phi)
elif (l == 2) and (mr == 5): # pxy
theta_lmr = theta('pxy', cost, phi)
elif (l == 3) and (mr == 1): # fz3
theta_lmr = theta('fz3', cost, phi)
elif (l == 3) and (mr == 2): # fxz2
theta_lmr = theta('fxz2', cost, phi)
elif (l == 3) and (mr == 3): # fyz2
theta_lmr = theta('fyz2', cost, phi)
elif (l == 3) and (mr == 4): # fz(x2-y2)
theta_lmr = theta('fz(x2-y2)', cost, phi)
elif (l == 3) and (mr == 5): # fxyz
theta_lmr = theta('fxyz', cost, phi)
elif (l == 3) and (mr == 6): # fx(x2-3y2)
theta_lmr = theta('fx(x2-3y2)', cost, phi)
elif (l == 3) and (mr == 7): # fy(3x2-y2)
theta_lmr = theta('fy(3x2-y2)', cost, phi)
elif (l == -1) and (mr == 1): # sp-1
theta_lmr = 1/np.sqrt(2) * (theta('s', cost, phi) + theta('px', cost, phi))
elif (l == -1) and (mr == 2): # sp-2
theta_lmr = 1/np.sqrt(2) * (theta('s', cost, phi) - theta('px', cost, phi))
elif (l == -2) and (mr == 1): # sp2-1
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) + 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -2) and (mr == 2): # sp2-2
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) - 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -2) and (mr == 3): # sp2-3
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) + 2/np.sqrt(6) *theta('px', cost, phi)
elif (l == -3) and (mr == 1): # sp3-1
theta_lmr = 1/2 * (theta('s', cost, phi) + theta('px', cost, phi) + theta('py', cost, phi) + theta('pz', cost, phi))
elif (l == -3) and (mr == 2): # sp3-2
theta_lmr = 1/2 * (theta('s', cost, phi) + theta('px', cost, phi) - theta('py', cost, phi) - theta('pz', cost, phi))
elif (l == -3) and (mr == 3): # sp3-3
theta_lmr = 1/2 * (theta('s', cost, phi) - theta('px', cost, phi) + theta('py', cost, phi) - theta('pz', cost, phi))
elif (l == -3) and (mr == 4): # sp3-4
theta_lmr = 1/2 * (theta('s', cost, phi) - theta('px', cost, phi) - theta('py', cost, phi) + theta('pz', cost, phi))
elif (l == -4) and (mr == 1): # sp3d-1
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) + 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -4) and (mr == 2): # sp3d-2
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) - 1/np.sqrt(6) *theta('px', cost, phi) - 1/np.sqrt(2) * theta('py', cost, phi)
elif (l == -4) and (mr == 3): # sp3d-3
theta_lmr = 1/np.sqrt(3) * theta('s', cost, phi) + 2/np.sqrt(6) * theta('px', cost, phi)
elif (l == -4) and (mr == 4): # sp3d-4
theta_lmr = 1/np.sqrt(2) (theta('pz', cost, phi) + theta('dz2', cost, phi))
elif (l == -4) and (mr == 5): # sp3d-5
theta_lmr = 1/np.sqrt(2) (-theta('pz', cost, phi) + theta('dz2', cost, phi))
elif (l == -5) and (mr == 1): # sp3d2-1
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('px', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
+ 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 2): # sp3d2-2
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('px', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
+ 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 3): # sp3d2-3
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('py', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
- 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 4): # sp3d2-4
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('py', cost, phi) - 1/np.sqrt(12) *theta('dz2', cost, phi) \
- 1/2 *theta('dx2-y2', cost, phi)
elif (l == -5) and (mr == 5): # sp3d2-5
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) - 1/np.sqrt(2) *theta('pz', cost, phi) + 1/np.sqrt(3) *theta('dz2', cost, phi)
elif (l == -5) and (mr == 6): # sp3d2-6
theta_lmr = 1/np.sqrt(6) * theta('s', cost, phi) + 1/np.sqrt(2) *theta('pz', cost, phi) + 1/
|
np.sqrt(3)
|
numpy.sqrt
|
# With inspiration from https://medium.com/@tuzzer/cart-pole-balancing-with-q-learning-b54c6068d947
# -*- coding: utf-8 -*-
import gym
import gym_martyswing
import numpy as np
import time, math, random
import matplotlib.pyplot as plt
import matplotlib
import itertools
from PIL import Image
import matplotlib.pyplot as plt
# Create the MartySwing environment
env = gym.make('MartySwing-v0')
# Discrete actions
numActions = env.action_space.n # (straight, kick)
actionNames = ["Straight", "Kick", ""]
ACTION_STRAIGHT = 0
ACTION_KICK = 1
# Bounds for each state
stateBounds = (env.observation_space.low, env.observation_space.high)
# Discrete bounds for observation
xAccNumBins = 9
xAccBinBounds = np.linspace(stateBounds[0], stateBounds[1], xAccNumBins-1)
xAccBinBounds = xAccBinBounds.flatten()
# Directions
numDirections = 2
# Q Table indexed by state-action pair
qTable = np.zeros((xAccNumBins * numDirections, numActions))
# Learning rate and exploration settings
EXPLORATION_RATE_MAX = 1
EXPLORATION_RATE_MIN = 0.01
EXPLORATION_RATE_DECAY_FACTOR = 10
LEARN_RATE_MAX = 1
LEARN_RATE_MIN = 0.1
LEARN_RATE_DECAY_FACTOR = 50
DISCOUNT_FACTOR = 0.9
# Goal and debug settings
EPISODE_MAX = 2000
TIME_MAX = 1000
STREAK_LEN_WHEN_DONE = 50
REWARD_SUM_GOAL = 2500
LOG_DEBUG = False
LOG_DEBUG_FILE = "testruns/martySwingQLearnSegLog.txt"
SHOW_ALL_RENDERS = False
GEN_GIF = True
FIXED_ACTION = False
PERMUTE_ACTION = True
RENDER_LAST = True
RENDER_BEST_PERMUTE = True
GIF_BEST = True
PERMUTE_BEST_INDEX = 124
MAX_SWING = False
# Debug
learnRateVals = []
exploreRateVals = []
# Main
def learnToSwing():
# Set the learning and explore rates initially
learningRate = getLearningRate(0)
explorationRate = getExplorationRate(0)
# Track progress in learning
streaksNum = 0
rewardTotal = []
# Debug
logDebugFile = None
if LOG_DEBUG:
debugActPrev = -1
try:
logDebugFile = open(LOG_DEBUG_FILE, "w+")
except:
print(f"Cannot write to log file {LOG_DEBUG_FILE}")
exit(0)
# Iterate episodes
for episode in range(EPISODE_MAX):
# Reset the environment
observation = env.reset()
if MAX_SWING:
env.l2 = .2
episodeRewardSum = 0
# Initial state
statePrev = getObservationBinned(observation[0], xAccBinBounds)
state = statePrev
# Setup for permutation
if PERMUTE_ACTION:
permuteTableSetup(episode)
# Run the experiment over time steps
t = 0
rewardInState = 0
action = ACTION_STRAIGHT
while True:
# Render the scene
if SHOW_ALL_RENDERS or (RENDER_LAST and streaksNum == STREAK_LEN_WHEN_DONE-1) or (PERMUTE_ACTION and RENDER_BEST_PERMUTE and episode == PERMUTE_BEST_INDEX):
doRender(episode, streaksNum)
# Execute the action
observation, reward, done, info = env.step(action)
t += 1
state = getObservationBinned(observation[0], xAccBinBounds)
# Accumulate rewards in this state
rewardInState += reward
# Log data
if logDebugFile is not None:
logDebugFile.write(f"{actionNames[action]} --- Ep {episode} t {t} statePrev {statePrev} state {state} rew {rewardInState:.2f} {'[+]' if rewardInState > 0 else ('[~]' if rewardInState > -1 else '[-]')} PE {info['PE']:.2f} KE {info['KE']:.2f} TE {info['PE']+info['KE']:.2f} theta {info['theta']:.2f} thetaMax {info['thetaMax']:.2f} v {info['v']:.2f} explRate {explorationRate} learnRate {learningRate} Streaks {streaksNum} \n")
# Check if there has been a change of state
if state != statePrev:
if not PERMUTE_ACTION:
# Update the Q Table using the Bellman equation
best_q = np.amax(qTable[state])
qTable[statePrev, action] += learningRate*(rewardInState + DISCOUNT_FACTOR*(best_q) - qTable[statePrev, action])
# Debug
if logDebugFile is not None:
logDebugFile.write(dumpQTable(qTable))
# Select a new action
if FIXED_ACTION:
action = actionSelectFix(episode, state, explorationRate)
elif PERMUTE_ACTION:
action = actionSelectPermute(episode, state, explorationRate)
else:
action = actionSelect(episode, state, explorationRate)
# Sum rewards in episode
episodeRewardSum += rewardInState
rewardInState = 0
# Ready for next iteration
statePrev = state
# Add frame to GIF
if GEN_GIF:
addFrame(episode, streaksNum, info)
# Check for episode done
if done or t > TIME_MAX or (MAX_SWING and t > 120):
rewardTotal.append(episodeRewardSum)
logStr = f"Episode {episode} finished after {t} episodeRewardSum {episodeRewardSum:.2f} thetaMax {info['thetaMax']:.2f} learnRate {learningRate:.2f} exploreRate {explorationRate:.2f} streakLen {streaksNum}"
if logDebugFile:
logDebugFile.write("....." + logStr + "\n")
if episode % 100 == 0:
print(dumpQTable(qTable))
print(logStr)
if (episodeRewardSum >= REWARD_SUM_GOAL):
streaksNum += 1
else:
streaksNum = 0
break
# It's considered done when it's solved over 100 times consecutively
if (streaksNum > STREAK_LEN_WHEN_DONE) or (PERMUTE_ACTION and permutesDone(episode)):
break
# Update parameters
learnRateVals.append(learningRate)
exploreRateVals.append(explorationRate)
explorationRate = getExplorationRate(episode)
learningRate = getLearningRate(episode)
# Close debug log
if logDebugFile:
logDebugFile.close()
# Save GIF
if GEN_GIF:
saveGIF()
print(dumpQTable(qTable))
plt.plot(rewardTotal, 'p')
if PERMUTE_ACTION:
plt.xlabel('Pattern Permutation', fontsize=16)
plt.suptitle("Marty Swing Exhaustive", fontsize=20)
else:
plt.suptitle("Marty Swing Q-Learning", fontsize=20)
plt.xlabel('Training Episode', fontsize=16)
plt.ylabel('Total Reward', fontsize=16)
plt.show()
# plt.plot(learnRateVals, 'g')
# plt.plot(exploreRateVals, 'b')
# plt.show()
def actionSelect(episode, state, explorationRate):
# The exploration rate determines the likelihood of taking a random
# action vs the action with the best Q
if random.random() < explorationRate:
# Random action
action = env.action_space.sample()
else:
# Action with best Q for current state
action = np.argmax(qTable[state])
return action
def actionSelectFix(episode, state, explorationRate):
if state == 4 or state == 13:
return ACTION_KICK
return ACTION_STRAIGHT
# Permutations of actions
def actionSelectPermute(episode, state, explorationRate):
return np.argmax(qTable[state])
# This is a list of tables which represent the action to perform in a specific state
# So each table is like a Q-Table except that it contains the action number for the best action
# It is used to populate the Q-Table
permuteUsedBinStart = 2
permuteSecondDirectionBinEnd = xAccNumBins * 2 - permuteUsedBinStart - 1
permuteUsedBinCount = 10
permutationsTable = [perm for perm in itertools.product(range(numActions), repeat=permuteUsedBinCount)]
def permuteTableSetup(episode):
for i, perm in enumerate(permutationsTable[episode % len(permutationsTable)]):
if i < permuteUsedBinCount // 2:
qTable[i+permuteUsedBinStart][1] = perm
else:
qTable[permuteSecondDirectionBinEnd-(i-permuteUsedBinCount // 2)][1] = perm
def permutesDone(episode):
return episode > len(permutationsTable)
def getExplorationRate(t):
# Exploration rate is a log function reducing over time
return max(EXPLORATION_RATE_MIN, EXPLORATION_RATE_MAX * (1.0 - math.log10(t/EXPLORATION_RATE_DECAY_FACTOR+1)))
def getLearningRate(t):
# Learning rate is a log function reducing over time
return max(LEARN_RATE_MIN, LEARN_RATE_MAX * (1.0 - math.log10(t/LEARN_RATE_DECAY_FACTOR+1)))
# Sensing direction (using a moving average)
obsList = []
obsSum = 0
obsWindowLen = 3
def getObservationBinned(val, bins):
global obsList, obsSum
# Smooth the observations
obsSumPrev = obsSum
if len(obsList) >= obsWindowLen:
obsSum -= obsList[0]
else:
obsList = [val] * (obsWindowLen-1)
obsSum = val * (obsWindowLen-1)
obsSumPrev = obsSum + val
obsList = obsList[-(obsWindowLen-1):]
obsList.append(val)
obsSum += val
discreteVal = np.digitize(val, bins)
if obsSum >= obsSumPrev:
return discreteVal
return xAccNumBins * 2 - 1 - discreteVal
def dumpQTable(qTable):
dumpStr = ""
for i, st in enumerate(qTable):
for ac in st:
dumpStr += f"{ac:0.4f}\t"
if st[0] == st[1]:
bestAct = 2
else:
bestAct = np.argmax(st)
dumpStr += f"{'RL ' if i < len(qTable)/2 else 'LR '} {actionNames[bestAct]}\n"
return dumpStr
indHueMin = 0/360
indHueMax = 100/360
kickIndicators = []
binBoundsAngles = [np.arcsin(np.clip(binBound / 9.81, -1, 1)) for binBound in xAccBinBounds]
binCentreAngles = [(binBoundsAngles[binBoundsIdx]+binBoundsAngles[binBoundsIdx-1])/2 for binBoundsIdx in range(1,len(binBoundsAngles))]
def doRender(episode, numStreaks, mode='human'):
from gym.envs.classic_control import rendering
oldViewer = env.viewer
env.render(mode)
if oldViewer is None:
lineStart = -0.5
lineEnd = -1
textPosns = [-0.6, -0.9]
for binBoundsAngle in binBoundsAngles:
# Draw line
x1 = np.sin(binBoundsAngle) * lineStart
y1 =
|
np.cos(binBoundsAngle)
|
numpy.cos
|
import numpy as np
import math
def vec(matin): return matin.T.ravel()
def mat(vecin, nr, nc): return np.reshape(vecin, (nc, nr)).T
_sqrt2 = math.sqrt(2)
def X_to_vec(X):
n = X.shape[0]
return X.T[np.tri(n, dtype=np.bool).T]
def vec_to_X(v_X):
n = int(math.sqrt(2 * len(v_X)))
if len(v_X) != n * (n + 1) / 2:
raise ValueError(
"v_X is not the right shape for a vectorized lower triangular matrix. Tried to turn vector of size {} into matrix with width {} ".format(len(v_X), n))
Y = np.zeros((n, n))
Y[np.tri(n, dtype=np.bool).T] = v_X
return Y + np.triu(Y, 1).T
def dot(A, B, axes=(1, 0)):
# I should document what happens to the order of the remaining axes
if not isinstance(axes, tuple) or len(axes) != 2:
raise ValueError("Incorrect axes parameter.")
if A.shape[axes[0]] != B.shape[axes[1]]:
raise ValueError("Dimension mismatch")
rolled_A = np.rollaxis(A.transpose(), A.ndim - 1 - axes[0]).transpose()
rolled_B = np.rollaxis(B.transpose(), B.ndim - 1 - axes[1], 1).transpose()
return rolled_A.dot(rolled_B)
def multiply_diag(A, D):
# multiply a matrix A by diagonal tensor D
n1, n2 = A.shape
n3, n4, n5 = D.shape
assert(n2 == n3)
out = np.zeros((n1, n2, n4, n5))
for i in range(n4):
for j in range(n5):
out[:, :, i, j] = A * D[:, i, j]
return out
def P(v_X):
X = vec_to_X(v_X)
n = X.shape[0]
# perform scaling
_i = tuple(range(n))
X[_i, _i] *= _sqrt2
Lam, U = np.linalg.eigh(X)
idx = np.argsort(Lam)
Lam = Lam[idx]
U = U[:, idx]
Lam_max = np.maximum(Lam, 0.0)
out = U.dot(np.diag(Lam_max).dot(U.T))
# undo scaling
out[_i, _i] /= _sqrt2
return X_to_vec(out)
def dU_dL_dX(Lam, U):
# compute dU_dX and dL_dX given U and L
n = len(Lam)
_i = tuple(range(n))
idx = np.argsort(Lam)
Lam = Lam[idx]
U = U[:, idx]
dU_dX = np.zeros((n, n, n, n))
for i, (l0, u0) in enumerate(zip(Lam, U.T)):
d = (l0 - Lam)
d[d != 0] = 1. / d[d != 0]
inv = (U.dot(np.diag(d)).dot(U.T))
tmp = np.multiply.outer(inv, u0)
tmp += np.rollaxis(tmp, 2, 1)
tmp[:, _i, _i] /= 2.0
# for j in range(n):
# tmp[:,j,j] /= 2.0
# set the ith column
dU_dX[:, i, :, :] = tmp
dL_dX = np.zeros((n, n, n))
for i, u0 in enumerate(U.T):
tmp = 2 * np.multiply.outer(u0, u0)
for j in range(n):
tmp[j, j] /= 2.0
dL_dX[i, :, :] = tmp
return dU_dX, dL_dX
def J(v_X):
X = vec_to_X(v_X)
n = X.shape[0]
# perform scaling
_i = tuple(range(n))
X[_i, _i] *= _sqrt2
Lam, U = np.linalg.eigh(X)
idx = np.argsort(Lam)
Lam = Lam[idx]
U = U[:, idx]
L = np.diag(Lam)
L_max =
|
np.maximum(L, 0.0)
|
numpy.maximum
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import os as os
from jams.date2dec import date2dec
from jams.dec2date import dec2date
import re
def timestepcheck(indir, pat, outfile, begin, end, numhead=1, timeint=30,
format='ascii', empty='-9999', delimiter=',', skiprows=0):
'''
Checks ascii data files with first column being a time stamp in ascii
of eng style for correctness. If for a given time interval time steps
are missing, they will will be filled with the correct time stamp and
empty values. A beginning and end for the output files can be set to which
data should be kept or filled. Pat defines the file name pattern to consider
in the check. Multiple files, which match the pat are concatenated.
Definition
----------
timestepcheck(indir, pat, outfile, begin, end, numhead=1, timeint=30,
format='ascii', empty='-9999', delimiter=',', skiprows=0):
Input
-----
indir str, path of the folder where the input files are
pat str, name or regular expression of the input files
outfile str, path and name of the output file
begin str, start time of the output file, must be in the same format
as time stamps in the input files
end str, end time of the output file, must be in the same format
as time stamps in the input files
Optional Input
--------------
numhead int, number of header lines in the input files (default: 1)
timeint int, time interval of the input file in minutes (default: 30)
format str, format of time stamps in input files. 'ascii' or 'eng' is
possible (default: 'ascii')
empty str, value for missing values (default: '-9999')
delimiter str, delimiter of the input files (default: ',')
skiprows int, rows to skip in input files, e.g. logger fuzzle before
actual data header starts (default: 0)
Output
------
outfile file with missing time steps filled with empty values cut from
begin to end
Restrictions
------------
TODO: tested thoroughly only for timeint=30
TODO: more bad value checks can be included, see sternchen and tuedelchen
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, AP, Aug 2014
'''
###########################################################################
# time interval list
interval = range(0,60,timeint)
#jdint = date2dec(yr=-4712,mo=1,dy=1,hr=12,mi=timeint)
#jdmin = date2dec(yr=-4712,mo=1,dy=1,hr=12,mi=0,sc=30)# (=precision)
jdint = date2dec(yr=1,mo=1,dy=1,hr=12,mi=timeint) % 1
jdmin = date2dec(yr=1,mo=1,dy=1,hr=12,mi=0,sc=30) % 1# (=precision)
if format == 'ascii':
jdbegin = date2dec(ascii = np.array([begin]))
jdend = date2dec(ascii = np.array([end]))
elif format == 'eng':
jdbegin = date2dec(eng = np.array([begin]))
jdend = date2dec(eng = np.array([end]))
###########################################################################
# reading input directory
pat = re.compile(pat)
new = True
filelist = os.listdir(indir)
for file in filelist:
if re.search(pat, file):
if new:
data = np.loadtxt('./%s/%s'%(indir, file), dtype='|S21',\
delimiter=delimiter, skiprows=skiprows)
if data.shape[0] == 0:
print('Warning: File %s is empty!' %(file))
else:
if np.shape(data.shape)[0] == 1:
data = data.reshape((1,-1))
new = False
else:
add_data = np.loadtxt('./%s/%s'%(indir, file), dtype='|S21',
delimiter=delimiter,
skiprows=numhead+skiprows)
if add_data.shape[0] == 0:
print('Warning: File %s is empty!' %(file))
elif np.shape(add_data.shape)[0] == 1:
add_data = add_data.reshape((1,-1))
data = np.append(data, add_data, 0)
else:
data = np.append(data, add_data, 0)
###########################################################################
# sternchen check :-D
# replace with regular expression check
data[data=='***********'] = empty #!!! uberprufen auf lange und re
data[data=='********'] = empty #!!! uberprufen auf lange und re
data[data=='*********'] = empty
###########################################################################
# tuedelchen check :-D
# replace with regular expression check
if data[numhead,0][0] == '"':
data[numhead:,0] = np.array([x[1:-1] for x in data[numhead:,0]])
###########################################################################
# "NAN" check :-D
# replace with regular expression check
data[data=='"NAN"'] = empty #!!! uberprufen auf lange und re
###########################################################################
# leerzeilen check
blankline = np.where(data[0:2,0]=='')[0]
data = np.delete(data, blankline, 0)
###########################################################################
# missing values check
data[data==''] = empty
data[data=='""'] = empty
columns = np.shape(data)[1]-1
###########################################################################
# calculate julian date
if format == 'ascii':
import time
jd = date2dec(ascii = data[numhead:,0])
elif format == 'eng':
jd = date2dec(eng = data[numhead:,0])
###########################################################################
# wrong time stamp check
diff = jd[1:] - jd[:-1]
minute = np.array([x.split()[1][3:5] for x in data[numhead:,0]]).astype(int)
nii = np.nonzero(~np.in1d(minute, interval))[0]
ts = np.nonzero(np.less(diff, jdint-jdmin))[0]
wrong = np.unique(np.append(nii, ts))
if data.shape[0]-numhead-2 in wrong:
wrong = np.append(wrong, [data.shape[0]-numhead-1], 0)
delete = []
for i in wrong:
print('\nHERE IS SOMETHING WRONG:\n')
print('BOF' if numhead+i-2<0 else data[numhead+i-2,:4])
print('BOF' if numhead+i-1<0 else data[numhead+i-1,:4])
print('-----------------------------------------------')
print(data[numhead+i,:4])
print('-----------------------------------------------')
print('EOF' if numhead+i+1>=np.shape(data)[0] else data[numhead+i+1,:4])
print('EOF' if numhead+i+2>=np.shape(data)[0] else data[numhead+i+2,:4])
do = raw_input("\n(d)elete entry, (s)et to empty, (t)ype in date, (i)gnore: ")
if do == 'd':
delete += [numhead+i]
elif do == 's':
data[numhead+i,1:] = empty
elif do == 't':
newdate = str(raw_input("\nreplace with: "))
data[numhead+i,0] = newdate
# newmin = str(raw_input("\n%s"%(data[numhead+i,0][:-2])))
# data[numhead+i,0] = data[numhead+i,0][:-2] + newmin
elif do == 'i':
pass
data = np.delete(data, delete, 0)
###########################################################################
# calculate julian date again
if format == 'ascii':
jd = date2dec(ascii = data[numhead:,0])
elif format == 'eng':
jd = date2dec(eng = data[numhead:,0])
###########################################################################
# check time step
diff = jd[1:] - jd[:-1]
ingap = np.where(np.greater(diff, jdint+jdmin))[0]
nugap = np.rint((diff[ingap]/jdint)-1)
###########################################################################
# insert missing time steps
for i in range(np.size(ingap))[::-1]:
where = np.ones(int(nugap[i]), dtype=int)*(ingap[i]+1+numhead)
if format == 'ascii':
span = np.arange(1,nugap[i]+1)*jdint + jd[ingap[i]]
what = dec2date(span.astype('|S16').astype(float), ascii=True)
elif format == 'eng':
span = np.arange(1,nugap[i]+1)*jdint + jd[ingap[i]]
what = dec2date(span.astype('|S16').astype(float), eng=True)
what = np.array([x[:-3] for x in what])
miss = np.empty((int(nugap[i]),columns), dtype='|S11')
miss[:] = empty
what = np.append(np.reshape(what, (-1,1)), miss, 1)
data = np.insert(data, where, what, 0)
###########################################################################
# fill/cut up/off beginning and end
start = np.where(data[:,0]==begin)[0]
if start == numhead:
pass
elif start > numhead:
data = np.delete(data, np.arange(numhead, start), 0)
else:
if format == 'ascii':
tofill = int((date2dec(ascii = data[numhead,0]) - jdbegin)/jdint)
span =
|
np.arange(0,tofill)
|
numpy.arange
|
"""
In tabular MDP setting, evaluates the learning of optimal policy using different guidance discount factors
On-policy means we run episodes,
in each episode we generate roll-outs/trajectories of current policy and run algorithm to improve the policy.
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
from copy import deepcopy
import timeit
import time
from main_MRP import run_main_mrp
from utils.common_utils import set_random_seed, create_result_dir, save_run_data, load_run_data, write_to_log, get_grid, start_ray, set_default_plot_params, save_fig
set_default_plot_params()
# -------------------------------------------------------------------------------------------
# Run mode
# -------------------------------------------------------------------------------------------
load_run_data_flag = False # False/True If true just load results from dir, o.w run simulation
result_dir_to_load = './saved/2020_02_04_06_19_35' # '2020_02_04_06_19_35' | '2020_02_03_21_45_36'
save_PDF = False # False/True - save figures as PDF file
local_mode = False # True/False - run non-parallel to get error messages and debugging
# -------------------------------------------------------------------------------------------
# Set Parameters
# -------------------------------------------------------------------------------------------
args = argparse.Namespace()
# ----- Run Parameters ---------------------------------------------#
args.run_name = '' # 'Name of dir to save results in (if empty, name by time)'
args.seed = 1 # random seed
args.n_reps = 1000 # default 1000 # number of experiment repetitions
# how to create parameter grid:
args.gam_grid_def = {'type': 'gamma_guidance', 'spacing': 'linspace', 'start': 0.9, 'stop': 0.99, 'num': 11, 'decimals': 10}
args.l2_grid_def = {'type': 'l2_factor', 'spacing': 'linspace', 'start': 0., 'stop': 0.1, 'num': 11, 'decimals': 10}
# ----- Problem Parameters ---------------------------------------------#
args.mrp_def = {'type': 'GridWorld', 'N0': 4, 'N1': 4, 'reward_std': 0.5, 'forward_prob_distrb': 'uniform', 'goal_reward': 1, 'R_low': -0.5, 'R_high': 0.5, 'policy': 'uniform'}
args.depth = 50 # default: 10 for 'chain', 100 for 'GridWorld' # Length of trajectory
args.gammaEval = 0.99 # default: 0.99 # gammaEval
args.initial_state_distrb_type = 'uniform' # 'uniform' | 'middle'
args.n_trajectories = 2 #
args.train_sampling_def = {'type': 'Trajectories'}
# args.train_sampling_def = {'type': 'Generative_uniform'}
# args.train_sampling_def = {'type': 'sample_all_s'}
args.config_grid_def = {'type': 'None', 'spacing': 'list', 'list': [None]}
args.evaluation_loss_type = 'L2_uni_weight' # 'rankings_kendalltau' | 'L2_uni_weight | 'L2' | 'one_pol_iter_l2_loss'
# ----- Algorithm Parameters ---------------------------------------------#
args.default_gamma = None # default: None # The default guidance discount factor (if None use gammaEval)
args.alg_type = 'LSTD' # 'LSTD' | 'LSTD_Nested' | 'batch_TD_value_evaluation' | 'LSTD_Nested_Standard' | 'model_based_pol_eval' | 'model_based_known_P'
args.use_reward_scaling = False # False | True. set False for LSTD
args.base_lstd_l2_fp = 1e-5
args.base_lstd_l2_proj = 1e-4
# if batch_TD_value_evaluation is used:
args.default_l2_TD = None # default: None # The default L2 factor for TD (if using discount regularization)
args.TD_Init_type = 'zero' # How to initialize V # Options: 'Vmax' | 'zero' | 'random_0_1' | 'random_0_Vmax' | '0.5_'Vmax' |
args.n_TD_iter = 5000 # Default: 500 for RandomMDP, 5000 for GridWorld # number of TD iterations
args.learning_rate_def = {'type': 'a/(b+i_iter)', 'a': 500, 'b': 1000, 'scale': False}
# -------------------------------------------------------------------------------------------
def run_simulations(args, local_mode):
start_ray(local_mode)
create_result_dir(args)
write_to_log('local_mode == {}'.format(local_mode), args)
start_time = timeit.default_timer()
create_result_dir(args)
set_random_seed(args.seed)
l2_grid = get_grid(args.l2_grid_def)
gam_grid = get_grid(args.gam_grid_def)
grid_shape = (len(l2_grid), len(gam_grid))
loss_avg = np.zeros(grid_shape)
loss_std = np.zeros(grid_shape)
run_idx = 0
for i0 in range(grid_shape[0]):
for i1 in range(grid_shape[1]):
args_run = deepcopy(args)
args_run.param_grid_def = {'type': 'l2_factor', 'spacing': 'list', 'list': [l2_grid[i0]]}
args_run.default_gamma = gam_grid[i1]
info_dict = run_main_mrp(args_run, save_result=False, plot=False, local_mode=local_mode)
loss_avg[i0, i1] = info_dict['loss_avg'][0]
loss_std[i0, i1] = info_dict['loss_std'][0]
run_idx += 1
print("Finished {}/{}".format(run_idx, loss_avg.size))
# end for
# end for
grid_results_dict = {'l2_grid': l2_grid, 'gam_grid': gam_grid, 'loss_avg': loss_avg,
'loss_std': loss_std}
save_run_data(args, grid_results_dict)
stop_time = timeit.default_timer()
write_to_log('Total runtime: ' +
time.strftime("%H hours, %M minutes and %S seconds", time.gmtime(stop_time - start_time)), args)
return grid_results_dict
# -------------------------------------------------------------------------------------------
if __name__ == "__main__":
if load_run_data_flag:
args, grid_results_dict = load_run_data(result_dir_to_load)
else:
grid_results_dict = run_simulations(args, local_mode)
l2_grid = grid_results_dict['l2_grid']
gam_grid = grid_results_dict['gam_grid']
loss_avg = grid_results_dict['loss_avg']
loss_std = grid_results_dict['loss_std']
ci_factor = 1.96 /
|
np.sqrt(args.n_reps)
|
numpy.sqrt
|
import numpy as np
import matplotlib.pyplot as plt
pageSpeeds = np.random.normal(3.0, 1.0, 1000)
purchaseAmount = np.random.normal(50.0, 10.0, 1000) / pageSpeeds
plt.scatter(pageSpeeds, purchaseAmount)
# fit the data using 4th degree polynomial
x =
|
np.array(pageSpeeds)
|
numpy.array
|
# coding: utf-8
# # Weather Prediction Using Recurrent Neural Networks
#
# ## Adrian, Ben, and Sai
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
from functools import reduce
import datetime
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
from sklearn.preprocessing import MinMaxScaler
import timeit
import random
import sys
import os
# # Preprocessing
###########################################################################################################################
############# Preprocessing ##############################################################################################
###########################################################################################################################
# ### Read in the files
# In[2]:
# Filenames
city_file = 'city_attributes.csv'
temp_file = 'temperature.csv'
humid_file = 'humidity.csv'
press_file = 'pressure.csv'
desc_file = 'weather_description.csv'
wdir_file = 'wind_direction.csv'
wspeed_file = 'wind_speed.csv'
# Load the files
city_df = pd.read_csv(city_file)
city_df.rename(str.lower, axis = 'columns', inplace = True)
city_df.drop(['country'], axis = 1, inplace = True)
city_df.set_index(['city'], inplace = True)
temp_df = pd.read_csv(temp_file)
humid_df = pd.read_csv(humid_file)
press_df = pd.read_csv(press_file)
desc_df = pd.read_csv(desc_file)
wdir_df = pd.read_csv(wdir_file)
wspeed_df = pd.read_csv(wspeed_file)
# In[3]:
# These are the cities that universally have > 1% missing across all weather values
drop_city = set(temp_df.columns[temp_df.isna().sum() > 500]) & set(humid_df.columns[humid_df.isna().sum() > 500]) & set(press_df.columns[press_df.isna().sum() > 500]) & set(desc_df.columns[desc_df.isna().sum() > 500]) & set(wdir_df.columns[wdir_df.isna().sum() > 500]) & set(wspeed_df.columns[wspeed_df.isna().sum() > 500])
# In[4]:
# Remove the undesired cities and melt the tables to be conducive for joining
alt_temp_df = pd.melt(temp_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'temperature')
alt_humid_df = pd.melt(humid_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'humidity')
alt_press_df = pd.melt(press_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'pressure')
alt_desc_df = pd.melt(desc_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'weather_description')
alt_wdir_df = pd.melt(wdir_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'wind_direction')
alt_wspeed_df = pd.melt(wspeed_df.drop(drop_city, axis = 1), id_vars = ['datetime'], var_name = 'city', value_name = 'wind_speed')
# Set proper indices
alt_temp_df = alt_temp_df.set_index(['city', 'datetime'])
alt_humid_df = alt_humid_df.set_index(['city', 'datetime'])
alt_press_df = alt_press_df.set_index(['city', 'datetime'])
alt_desc_df = alt_desc_df.set_index(['city', 'datetime'])
alt_wdir_df = alt_wdir_df.set_index(['city', 'datetime'])
alt_wspeed_df = alt_wspeed_df.set_index(['city', 'datetime'])
# ### Join tables together
# In[5]:
# Join tables on the city and datetime info
dfs = [city_df, alt_temp_df, alt_humid_df, alt_press_df, alt_wspeed_df, alt_wdir_df, alt_desc_df]
df_final = reduce(lambda left, right : pd.merge(left, right, left_index = True, right_index = True), dfs)
# ### Deal with Missing Values
# In[6]:
# Get number of nulls for Charlotte - SUPER CONVOLUTED, but it works
temp = df_final.reset_index()
temp = temp[temp.city == "Charlotte"]
temp.isnull().sum()
#city 0
#datetime 0
#latitude 0
#longitude 0
#temperature 3
#humidity 589
#pressure 3
#wind_speed 2
#wind_direction 1
#weather_description 1
#dtype: int64
# INTERPOLATION HAPPENS HERE -- Break up by city
df_final = df_final.groupby('city').apply(lambda group: group.interpolate(limit_direction = 'both'))
# Need to do something special for weather_description
arr, cat = df_final['weather_description'].factorize()
df_final['weather_description'] = pd.Series(arr).replace(-1, np.nan).interpolate(method = 'nearest', limit_direction = 'both').interpolate(limit_direction = 'both').astype('category').cat.rename_categories(cat).astype('str').values
# In[7]:
# The whole purpose here is to encode wind direction. It's not continuous so don't really want to scale it
# Also have more granularity in wind dir if need be.
#dir_df = pd.DataFrame({'dir' : ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', 'N'],
# 'lower' : [348.75, 11.25, 33.75, 56.25, 78.75, 101.25, 123.75, 146.25, 168.75, 191.25, 213.75, 236.25, 258.75, 281.25, 303.75, 326.25, 0],
# 'upper' : [360, 33.75, 56.25, 78.75, 101.25, 123.75, 146.25, 168.75, 191.25, 213.75, 236.25, 258.75, 281.25, 303.75, 326.25, 348.75, 11.25]})
dir_df = pd.DataFrame({'dir' : ['N', 'NE', 'E', 'SE', 'S', 'SW', 'W', 'NW', 'N'],
'lower' : [337.5, 22.5, 67.5, 112.5, 157, 202.5, 247.5, 292.5, 0],
'upper' : [360, 67.5, 112.5, 157, 202.5, 247.5, 292.5, 337.5, 22.5]})
# Make a copy to fool around in
fill_this = df_final['wind_direction'].copy()
# And overwrite the copy
for i in reversed(range(len(dir_df))):
# print(str(dir_df.loc[i,'lower']) + " and " + str(dir_df.loc[i,'upper']))
fill_this.loc[df_final['wind_direction'].between(dir_df.loc[i,'lower'], dir_df.loc[i,'upper'])] = i
# This is a bit ugly here; but it maintains any missing values nicely
df_final['wind_direction'] = dir_df.loc[fill_this, 'dir'].values
# In[8]:
# Go ahead and drop lat and long, we wont need them for now
df_final.drop(["latitude", "longitude"], inplace=True, axis=1)
# In[12]:
# Convert the data to Farenheit and note the min and max values
df_final["temperature"] = df_final["temperature"] * 9/5 - 459.67
# ### Normalize data through min-max scaling
# In[13]:
# Scaling happens here -- IMPUTATION MUST HAPPEN FIRST
scale_df = df_final[['temperature', 'humidity', 'pressure', 'wind_speed']].values
scaler = MinMaxScaler()
# We have access to min and max so we can transform back and forth
scale_df = scaler.fit_transform(scale_df)
df_final_scaled = df_final.copy()
df_final_scaled[['temperature', 'humidity', 'pressure', 'wind_speed']] = scale_df
df_final_scaled.head()
# In[14]:
# Collapse a lot of these groupings
weather_dict = {'scattered clouds' : 'partly_cloudy', 'sky is clear' : 'clear',
'few clouds' : 'partly_cloudy', 'broken clouds' : 'partly_cloudy',
'overcast clouds' : 'cloudy', 'mist' : 'cloudy', 'haze' : 'cloudy',
'dust' : 'other', 'fog' : 'cloudy', 'moderate rain' : 'rain',
'light rain' : 'rain', 'heavy intensity rain' : 'rain', 'light intensity drizzle' : 'rain',
'heavy snow' : 'snow', 'snow' : 'snow', 'light snow' : 'snow', 'very heavy rain' : 'rain',
'thunderstorm' : 'tstorm', 'proximity thunderstorm' : 'tstorm', 'smoke' : 'other', 'freezing rain' : 'snow',
'thunderstorm with light rain' : 'tstorm', 'drizzle' : 'rain', 'sleet' : 'snow',
'thunderstorm with rain' : 'tstorm', 'thunderstorm with heavy rain' : 'tstorm',
'squalls' : 'rain', 'heavy intensity drizzle' : 'rain', 'light shower snow' : 'snow',
'light intensity shower rain' : 'rain', 'shower rain' : 'rain',
'heavy intensity shower rain' : 'rain', 'proximity shower rain' : 'rain',
'proximity sand/dust whirls' : 'other', 'proximity moderate rain' : 'rain', 'sand' : 'other',
'shower snow' : 'snow', 'proximity thunderstorm with rain' : 'tstorm',
'sand/dust whirls' : 'other', 'proximity thunderstorm with drizzle' : 'tstorm',
'thunderstorm with drizzle' : 'tstorm', 'thunderstorm with light drizzle' : 'tstorm',
'light rain and snow' : 'snow', 'thunderstorm with heavy drizzle' : 'tstorm',
'ragged thunderstorm' : 'tstorm', 'tornado' : 'other', 'volcanic ash' : 'other', 'shower drizzle' : 'rain',
'heavy shower snow' : 'snow', 'light intensity drizzle rain' : 'rain',
'light shower sleet' : 'snow', 'rain and snow' : 'snow'}
# In[15]:
adj_weather = [weather_dict[val] for val in df_final_scaled['weather_description']]
df_final_scaled['adj_weather'] = adj_weather
df_final_scaled = df_final_scaled.drop('weather_description', axis = 1)
# ### Make weather and wind direction dummy variables
# In[16]:
# And one-hot encode the wind_directions and weather
df_final_scaled = pd.get_dummies(df_final_scaled, prefix=['wind_dir', 'weather'],
columns=['wind_direction', 'adj_weather'])
# In[17]:
df_final_scaled = df_final_scaled.reset_index('city')
# In[18]:
# Write for distribution
df_final_scaled.to_csv('df_weather_scaled_encoded.csv')
# In[124]:
# Clean up the local environment
get_ipython().run_line_magic('reset', '')
###########################################################################################################################
############# Part 1: Temperature Prediction ##############################################################################
###########################################################################################################################
# In[2]:
# ## Split into train, test, and validation
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import rnn
from sklearn.metrics import f1_score
import random
import timeit
import sys
from collections import defaultdict
# In[2]:
full_df = pd.read_csv("df_weather_scaled_encoded.csv")
# In[3]:
# Filter by the city of interest
current_city = "Charlotte"
full_df = full_df[full_df["city"] == current_city]
min_dataset = 0.515
max_dataset = 99.95
# In[4]:
# Extract
years = np.array([y[0:4] for y in full_df.datetime])
train = full_df[years < '2016']
valid = full_df[years == '2016']
test = full_df[years > '2016']
if(train.shape[0] + valid.shape[0] + test.shape[0] != years.shape[0]):
raise Exception("Partition did not work")
# Drop the city and timestamp for all three
train.drop(["city", "datetime"], inplace=True, axis=1)
valid.drop(["city", "datetime"], inplace=True, axis=1)
test.drop(["city", "datetime"], inplace=True, axis=1)
# In[ ]:
# Wrapper for data object
# Modified from <NAME>
class DataSet(object):
def __init__(self, x, y, shuffle=True):
self._num_examples = len(x)
self._x = x
self._y = y
self._epochs_done = 0
self._index_in_epoch = 0
if shuffle:
np.random.seed(123456)
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._x = [self._x[i] for i in perm]
self._y = [self._y[i] for i in perm]
random.seed(123456)
@property
def features(self):
return self._x
@property
def response(self):
return self._y
@property
def num_examples(self):
return self._num_examples
@property
def epochs_done(self):
return self._epochs_done
def reset_batch_index(self):
self._index_in_epoch = 0
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
done = False
if self._index_in_epoch > self._num_examples:
# After each epoch we update this
self._epochs_done += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._x = self._x
self._y = self._y
start = 0
self._index_in_epoch = batch_size
done = True
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._x[start:end], self._y[start:end], done
# ## Create baselines
# ### Create observations using a sliding sequence window
# In[26]:
# Wrapper function to perform the entire creation of observations given the subset
# data. Can specify sequence_size, lookahead, response (temp means 'temperature'),
# and whether you want a greedy baseline.
def create_observations(train, test, valid, seq_size = 24, lookahead = 1, temp = True, baseline=False):
train_x = []
train_y = []
# If we are doing the temperature variable, extract that feature
if temp:
for i in range(train.shape[0] - seq_len - lookahead + 1):
# Slide over input, storing each "sequence size" window
train_x.append([x for x in train.iloc[i:i+seq_len, :].values])
train_y.append([y for y in train.iloc[i+lookahead:i+seq_len+lookahead, 0]])
# Otherwise, extract out the weather type
else:
for i in range(train.shape[0] - seq_len - lookahead + 1):
train_x.append([x for x in train.iloc[i:i+seq_len, :].values])
train_y.append([y for y in train.iloc[i+lookahead:i+seq_len+lookahead, -7:].values])
# Convert to a Dataset object
train_data = DataSet(train_x, train_y)
# Repeat the above process on the validation set
valid_x = []
valid_y = []
# If we are doing the temperature variable, extract that feature
if temp:
for i in range(valid.shape[0] - seq_len - lookahead + 1):
# Slide over input, storing each "sequence size" window
valid_x.append([x for x in valid.iloc[i:i+seq_len, :].values])
valid_y.append([y for y in valid.iloc[i+lookahead:i+seq_len+lookahead, 0]])
# Otherwise, extract out the weather type
else:
for i in range(valid.shape[0] - seq_len - lookahead + 1):
valid_x.append([x for x in valid.iloc[i:i+seq_len, :].values])
valid_y.append([y for y in valid.iloc[i+lookahead:i+seq_len+lookahead, -7:].values])
valid_data = DataSet(valid_x, valid_y)
# Repeat for test except also track the baseline prediction error
test_x = []
test_y = []
test_baseline_err = []
if temp:
for i in range(test.shape[0] - seq_len - lookahead + 1):
test_x.append([x for x in test.iloc[i:i+seq_len, :].values])
test_y.append([y for y in test.iloc[i+lookahead:i+seq_len+lookahead, 0]])
# Get the baseline prediction error by taking the MSE between the current hour and the
# temperature of the next hour. This is the trivial case where our prediction for temp
# is just the current temp
if baseline:
test_baseline_err.append((
|
np.mean(test.iloc[i:i+seq_len, 0]*(max_dataset-min_dataset)+min_dataset)
|
numpy.mean
|
import argparse, random
import numpy as np
import glob
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
from tqdm import tqdm
import multiprocessing as mp
import os, pickle
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import data_loader_nba
import data_loader_carla
import data_loader_boid
from model_GDSW import GDSW
from model_GVCRN import GVCRN
from model_RNN import RNN
from utils import batch_error, compute_x_ind, std_ste
from torch.multiprocessing import Pool, Process, set_start_method
HIDDEN_SIZE = 32
CUDA = True
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["HDF5_USE_FILE_LOCKING"]="FALSE"
def compute_loss(a_or_ipw_outputs,f_outcome_out,x_out,fr_targets,targets,x_inputs_,L_kl,lengths,Pred_X,criterion,args,obs_w,batchSize,device,inference=False):
a_or_ipw_loss = torch.zeros(1).to(device)
outcome_loss = torch.zeros(1).to(device)
fail_loss = torch.zeros(1).to(device)
curve_loss = torch.zeros(1).to(device)
L_recX = torch.zeros(1).to(device)
time_length = x_inputs_.shape[1]-1
if not args.variable_length:
non_nan = None
mean_time_length = time_length-args.burn_in0-1
else:
mean_time_length = torch.mean(lengths-1)-args.burn_in0-1
t_pred = 1 if args.y_pred else 0
if "B" in args.model or "RNN" in args.model: # GradientReversal or negative gradient
if "B" in args.model:
a_or_ipw_outputs = torch.cat(a_or_ipw_outputs, dim=1)
ps = torch.sigmoid(a_or_ipw_outputs)
for i in range(args.burn_in0+1,time_length): # time
# propensity score
if args.variable_length:
non_nan = lengths>=obs_w-i
if "Ne" in args.model:
a_or_ipw_loss -= criterion(non_nan*ps[:,i], non_nan*fr_targets[:, i].float(), reduction='sum')
else:
a_or_ipw_loss += criterion(non_nan*ps[:,i], non_nan*fr_targets[:, i].float(), reduction='sum')
else:
if "Ne" in args.model:
a_or_ipw_loss -= criterion(ps[:,i], fr_targets[:, i].float(), reduction='sum')
else:
a_or_ipw_loss += criterion(ps[:,i], fr_targets[:, i].float(), reduction='sum')
a_or_ipw_loss = a_or_ipw_loss/mean_time_length/batchSize
# y,x,fail
x_dim = int(args.x_dim_permuted//args.n_agents)
n_agents = args.n_agents
n_agents_ = n_agents if "T" in args.model and args.dim_rec_global == 0 else n_agents+1
for i in range(args.burn_in0+1,time_length): # time
if args.variable_length:
non_nan = lengths>=obs_w-i
outcome_loss += torch.sum(non_nan.squeeze()*torch.abs(f_outcome_out[:,i] - targets[:,i+t_pred]) )
if Pred_X:
for k in range(n_agents_): # n_agents+args.dim_rec_global):
if k < n_agents:
if 'carla' in args.data:
L_recX += batch_error(x_out[:,i,k*x_dim:k*x_dim+3], x_inputs_[:,i,k*x_dim:k*x_dim+3],index=non_nan)
else: # nba
L_recX += batch_error(x_out[:,i,k*x_dim:(k+1)*x_dim], x_inputs_[:,i,k*x_dim:(k+1)*x_dim],index=non_nan)
else:
k2 = args.x_dim_permuted+args.dim_rec_global
L_recX += batch_error(x_out[:,i,args.x_dim_permuted:k2+1], x_inputs_[:,i,args.x_dim_permuted:k2+1],index=non_nan)
if 'carla' in args.data and Pred_X:
fail_loss += torch.sum(non_nan.squeeze()*torch.abs(x_out[:,i,-1] - x_inputs_[:,i+1,-1]) )
for k in range(n_agents_):
curve_loss += batch_error(x_out[:,i,k*x_dim+3:k*x_dim+5], x_out[:,i-1,k*x_dim+3:k*x_dim+5],index=non_nan)
else:
outcome_loss += torch.sum(torch.abs(f_outcome_out[:,i] - targets[:,i+t_pred]) )
if Pred_X:
for k in range(n_agents+args.dim_rec_global):
if k < n_agents:
if 'boid' in args.data:
try: L_recX += batch_error(x_out[:,i,k*x_dim+2:k*x_dim+4], x_inputs_[:,i,k*x_dim+2:k*x_dim+4])
except: import pdb; pdb.set_trace()
else:
k2 = args.x_dim_permuted+k-n_agents
L_recX += batch_error(x_out[:,i,k2:k2+1], x_inputs_[:,i,k2:k2+1])
L_recX /= mean_time_length*(n_agents+args.dim_rec_global)*batchSize #
fail_loss /= mean_time_length*(n_agents-1)*batchSize
curve_loss /= mean_time_length*(n_agents-1)*batchSize
else:
a_or_ipw_outputs = torch.cat(a_or_ipw_outputs, dim=1)
ps = torch.sigmoid(a_or_ipw_outputs)
weights = torch.zeros(a_or_ipw_outputs.shape)
for i in range(args.burn_in0+1,time_length):
a_or_ipw_pred_norm = a_or_ipw_outputs[:,i]
if args.variable_length:
non_nan = lengths>=obs_w-i
a_or_ipw_loss += criterion(non_nan*a_or_ipw_pred_norm, non_nan*fr_targets[:, i].float(), reduction='sum')
else:
a_or_ipw_loss += criterion(a_or_ipw_pred_norm, fr_targets[:, i].float(), reduction='sum')
for j in range(a_or_ipw_outputs.size(0)): # batch
p_treated = torch.where(fr_targets[j] == 1)[0].size(0) / fr_targets.size(1)
if fr_targets[j,i] != 0:
weights[j,i] += p_treated / ps[j,i] if ps[j,i] > 1e-3 else p_treated / 1e-3
else:
weights[j,i] += (1 - p_treated) / (1 - ps[j,i]) if 1-ps[j,i] > 1e-3 else (1 - p_treated) / 1e-3
weights2 = torch.where(weights[:,i] >= 100, torch.Tensor([100]), weights[:,i])
weights3 = torch.where(weights2 <= 0.01, torch.Tensor([0.01]), weights2)
if args.variable_length:
outcome_loss += torch.sum(weights3*torch.abs(non_nan*f_outcome_out[:,i] - non_nan*targets[:,i+t_pred]) )
else:
outcome_loss += torch.sum(weights3*torch.abs(f_outcome_out[:,i] - targets[:,i+t_pred]) )
a_or_ipw_loss = a_or_ipw_loss/mean_time_length/batchSize
outcome_loss = outcome_loss/mean_time_length/batchSize
L_kl /= batchSize
if torch.sum(torch.isnan(outcome_loss))>0:
import pdb; pdb.set_trace()
return a_or_ipw_loss, outcome_loss, fail_loss, curve_loss, L_recX, L_kl
def display_loss(a_or_ipw_losses, outcome_losses, L_recXs, L_fails, L_curves, L_kls, epoch, args, Pred_X):
epoch_losses_a_or_ipw = np.mean(a_or_ipw_losses)
outcome_losses = np.mean(outcome_losses)
if Pred_X and 'carla' in args.data:
str_fail_loss = ', L_fail: {:.4f}'.format(np.mean(L_fails))
str_curve_loss = ', L_curve: {:.4f}'.format(np.mean(L_curves))
else:
str_fail_loss = ''
str_curve_loss = ''
if not 'RNN' in args.model:
print('Epoch: {}, a_or_ipw loss: {:.4f}, Outcome loss: {:.4f}'.format(epoch, epoch_losses_a_or_ipw, outcome_losses), flush=True)
if Pred_X:
L_recXs = np.mean(np.sqrt(L_recXs))
if 'V' in args.model:
L_kls = np.mean(L_kls)
print('Epoch: {}, L_kls: {:.4f}, L_recXs: {:.4f}'.format(epoch, L_kls, L_recXs)+str_fail_loss+str_curve_loss, flush=True)
else:
print('Epoch: {}, L_recXs: {:.4f}'.format(epoch, L_recXs)+str_fail_loss+str_curve_loss, flush=True)
else:
L_recXs = np.mean(L_recXs)
print('Epoch: {}, Outcome train loss: {:.4f}, L_recXs: {:.4f}'.format(epoch, outcome_losses, L_recXs)+str_fail_loss+str_curve_loss, flush=True)
def trainInitIPTW(train_loader, val_loader,test_loader, model, args, epochs, optimizer, criterion,
use_cuda=False, save_model=None,TEST=False):
if use_cuda:
print("====> Using CUDA device: ", torch.cuda.current_device(), flush=True)
model.cuda()
model = model.to('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
burn_in = args.burn_in
obs_w = args.observation_window
# Train network
best_loss_val = torch.tensor(float('inf')).to(device)
if not TEST and not args.has_GT:
CF = False
else:
CF = True
Pred_X = True if 'V' in args.model or 'RNN' in args.model or 'X' in args.model else False
if args.cont:
print('args.cont = True')
if os.path.exists(save_model):
model = torch.load(save_model)
print('best model was loaded')
else:
print('args.cont = True but file did not exist')
no_update = 0
if not TEST:
for epoch in range(epochs):
a_or_ipw_losses = []
outcome_losses = []
L_kls = []
L_recXs = []
L_fails = []
L_curves = []
for x_inputs, x_static_inputs, x_fr_inputs, targets, lengths, _,_,_,_,_ in tqdm(train_loader): # train_loader # tqdm(train_loader): # x_all, x_fr_all, targets_all,x_fr_opt
model.train()
# train
optimizer.zero_grad()
fr_targets = x_fr_inputs
if use_cuda:
x_inputs, x_static_inputs, x_fr_inputs = x_inputs.cuda(), x_static_inputs.cuda(), x_fr_inputs.cuda()
targets, fr_targets = targets.cuda(), fr_targets.cuda()
if args.variable_length:
lengths = lengths.cuda()
if args.data == 'nba':# or args.data == 'carla':
x_inputs_ = x_inputs[:,:,:-1]
else:
x_inputs_ = x_inputs
x_inputs__ = x_inputs_
if not 'V' in args.model and not 'RNN' in args.model:
a_or_ipw_outputs, f_outcome_out, _, _, x_out,_ = model(x_inputs_, x_static_inputs, fr_targets, targets, cf_treatment=None, lengths=lengths)
L_kl = torch.zeros(1).to(device)
else:
a_or_ipw_outputs, f_outcome_out, _, _, L_kl, x_out,_ = model(x_inputs_, x_static_inputs, fr_targets, targets, cf_treatment=None, Train=True, lengths=lengths)
f_treatment = torch.where(fr_targets.sum(1) > 0, torch.Tensor([1]), torch.Tensor([0]))
f_outcome_out = torch.stack(f_outcome_out,dim=1).squeeze()
batchSize = f_outcome_out.shape[0]
a_or_ipw_loss, outcome_loss, fail_loss, curve_loss, L_recX, L_kl = compute_loss(a_or_ipw_outputs,f_outcome_out,x_out,fr_targets,targets,x_inputs__,L_kl,lengths,Pred_X,criterion,args,obs_w,batchSize,device)
if not 'RNN' in args.model:
loss = a_or_ipw_loss * args.lambda_weight + outcome_loss
else:
loss = outcome_loss
if Pred_X:
loss += L_recX[0]*args.lambda_X
if 'V' in args.model:
loss += L_kl[0]*args.lambda_KL
if 'carla' in args.data:
loss += fail_loss*args.lambda_event
loss += curve_loss*args.lambda_event
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 20)
optimizer.step()
a_or_ipw_losses.append(a_or_ipw_loss.item())
outcome_losses.append(outcome_loss.item())
if Pred_X:
L_recXs.append(L_recX.item())
if 'V' in args.model:
L_kls.append(L_kl.item())
if 'carla' in args.data:
L_fails.append(fail_loss.item())
L_curves.append(curve_loss.item())
display_loss(a_or_ipw_losses, outcome_losses, L_recXs, L_fails, L_curves, L_kls, epoch, args, Pred_X)
# validation
print('Validation:')
pehe_val, _, mse_val, loss_val = model_eval(model, val_loader, criterion, args, epoch, eval_use_cuda=use_cuda, CF=CF)
if loss_val < best_loss_val:
best_loss_val = loss_val
if save_model:
print('Best model. Saving...\n')
torch.save(model, save_model)
elif np.isnan(loss_val) or (epoch==0 and loss_val == best_loss_val):
print('loss is nan or inf')
import pdb; pdb.set_trace()
else:
no_update += 1
if no_update >= 3:
try: model = torch.load(save_model)
except: import pdb; pdb.set_trace()
print('since no update continues, best model was loaded')
no_update = 0
else:
epoch = 0
print('Test:')
model = torch.load(save_model)
rmse_y_CF_max,rmse_best_timing,rmse,_ = model_eval(model, test_loader,criterion, args, epoch, eval_use_cuda=use_cuda, save=True, TEST=True)
def detach(data,eval_use_cuda):
if eval_use_cuda:
return data.to('cpu').detach().data.numpy()
else:
return data.detach().data.numpy()
def transfer_data(model, dataloader, criterion, args, epoch, eval_use_cuda=False, save=False, TEST=False):
burn_in = args.burn_in
burn_in_test = args.burn_in_test
obs_w = args.observation_window
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('TEST ='+str(TEST))
Pred_X = True if 'V' in args.model or 'RNN' in args.model or 'X' in args.model else False
with torch.no_grad():
model.eval()
a_or_ipw_losses = []
outcome_losses = []
L_kls = []
L_recXs = []
L_fails = []
L_curves = []
f_outcome_outputs = []
cf_outcome_outputs = []
f_outcome_outputs_long = []
f_outcome_true = []
cf_outcome_true = []
treatment_f = []
treatment_all = []
treatment_opts = []
a_or_ipw_true = []
loss_all = []
x_outs = []
x_outs_cf = []
x_outs_true = []
x_inputs_f, x_static_inputs_f, lengths_all,indices_all = [],[],[],[]
for x_inputs, x_static_inputs, x_fr_inputs, targets, lengths, indices, x_fr_all, x_all, targets_all, x_fr_opt in dataloader:
fr_targets = x_fr_inputs
if eval_use_cuda:
x_inputs, x_static_inputs, x_fr_inputs = x_inputs.cuda(), x_static_inputs.cuda(), x_fr_inputs.cuda()
x_fr_all, targets, fr_targets = x_fr_all.cuda(), targets.cuda(), fr_targets.cuda()
if args.variable_length:
lengths = lengths.cuda()
if args.has_GT:
x_all, targets_all = x_all.cuda(), targets_all.cuda()
if args.data == 'nba':# or args.data == 'carla':
x_inputs_ = x_inputs[:,:,:-1].clone()
x_all = x_all[:,:,:-1]
else:
x_inputs_ = x_inputs.clone()
x_inputs__ = x_inputs_.clone()
if not TEST and not args.has_GT:
x_fr_all_ = None
CF = False
else:
x_fr_all_ = x_fr_all
CF = True
# counterfactual treatments
if not 'V' in args.model and not 'RNN' in args.model:
a_or_ipw_outputs, f_outcome_out, cf_outcome_out, _, x_out, x_out_cf = model(x_inputs_, x_static_inputs, fr_targets, targets, cf_treatment=x_fr_all_, burn_in=burn_in, lengths=lengths)
if not args.has_GT: # nba
a_or_ipw_outputs, f_outcome_out2, _, _, x_out, _ = model(x_inputs_, x_static_inputs, fr_targets, targets, cf_treatment=None, burn_in=burn_in_test, lengths=lengths)
L_kl = torch.zeros(1).to(device)
else:
a_or_ipw_outputs, f_outcome_out, cf_outcome_out, _, L_kl, x_out, x_out_cf = model(x_inputs_, x_static_inputs, fr_targets, targets, cf_treatment=x_fr_all_, burn_in=burn_in, lengths=lengths)
if not args.has_GT: # nba
a_or_ipw_outputs, f_outcome_out2, _, _, L_kl, x_out, _ = model(x_inputs_, x_static_inputs, fr_targets, targets, cf_treatment=None, burn_in=burn_in_test, lengths=lengths)
if CF:
cf_outcome_out = cf_outcome_out.permute(2,1,0,3)
f_outcome_out = torch.stack(f_outcome_out,dim=1).squeeze()
if not args.has_GT: # nba
f_outcome_out_long = f_outcome_out.clone()
f_outcome_out = torch.stack(f_outcome_out2,dim=1).squeeze()
# x_out = torch.stack(x_out,dim=1)
batchSize = f_outcome_out.shape[0]
a_or_ipw_loss, outcome_loss, fail_loss, curve_loss, L_recX, L_kl = compute_loss(a_or_ipw_outputs,f_outcome_out,x_out,fr_targets,targets,x_inputs__,L_kl,lengths,Pred_X,criterion,args,obs_w,batchSize,device,inference=True)
a_or_ipw_losses.append(a_or_ipw_loss.item())
outcome_losses.append(outcome_loss.item())
if Pred_X:
L_recXs.append(L_recX.item())
if 'V' in args.model:
L_kls.append(L_kl.item())
if 'carla' in args.data:
L_fails.append(fail_loss.item())
L_curves.append(curve_loss.item())
if not 'RNN' in args.model:
loss = a_or_ipw_loss * args.lambda_weight + outcome_loss
else:
loss = outcome_loss
if Pred_X:
loss += L_recX[0]*args.lambda_X
if 'V' in args.model:
loss += L_kl[0]*args.lambda_KL
if 'carla' in args.data:
loss += fail_loss*args.lambda_event
loss += curve_loss*args.lambda_event
a_or_ipw_losses.append(a_or_ipw_loss.item())
outcome_losses.append(outcome_loss.item())
if Pred_X:
L_recXs.append(L_recX.item())
if 'V' in args.model:
L_kls.append(L_kl.item())
if 'carla' in args.data:
L_fails.append(fail_loss.item())
L_curves.append(curve_loss.item())
# detach
for i in range(len(a_or_ipw_outputs)):
try: a_or_ipw_outputs[i] = detach(a_or_ipw_outputs[i],eval_use_cuda)
except: import pdb; pdb.set_trace()
fr_targets = detach(fr_targets,eval_use_cuda)
targets = detach(targets,eval_use_cuda)
f_outcome_out = detach(f_outcome_out,eval_use_cuda)
if CF:
cf_outcome_out = detach(cf_outcome_out,eval_use_cuda)
if not args.has_GT: # nba
f_outcome_out_long = detach(f_outcome_out_long,eval_use_cuda)
loss = detach(loss,eval_use_cuda)
if args.has_GT:
targets_all = detach(targets_all,eval_use_cuda)
x_all = detach(x_all,eval_use_cuda)
x_fr_all = detach(x_fr_all,eval_use_cuda)
if Pred_X:
x_out = detach(x_out,eval_use_cuda)
x_out_cf = detach(x_out_cf,eval_use_cuda)
x_inputs = detach(x_inputs,eval_use_cuda)
x_static_inputs = detach(x_static_inputs,eval_use_cuda)
# x_fr_inputs = detach(x_fr_inputs,eval_use_cuda)
if args.variable_length:
lengths = detach(lengths,eval_use_cuda)
if save:
indices = detach(indices,eval_use_cuda)
# append
treatment_f.append(fr_targets)
treatment_all.append(x_fr_all)
if args.has_GT:
x_fr_opt = x_fr_opt.detach().data.numpy()
treatment_opts.append(x_fr_opt)
a_or_ipw_true.append(np.where(fr_targets.sum(1) > 0, 1, 0))
f_outcome_true.append(targets)
if args.has_GT:
cf_outcome_true.append(targets_all)
f_outcome_outputs.append(f_outcome_out)
if CF:
cf_outcome_outputs.append(cf_outcome_out)
if not args.has_GT:
f_outcome_outputs_long.append(f_outcome_out_long)
loss_all.append(loss)
if Pred_X:
x_outs.append(x_out)
x_outs_cf.append(x_out_cf)
x_outs_true.append(x_all)
x_inputs_f.append(x_inputs)
x_static_inputs_f.append(x_static_inputs)
# x_fr_inputs_f.append(x_fr_inputs)
if args.variable_length:
lengths_all.append(lengths)
indices_all.append(indices)
display_loss(a_or_ipw_losses, outcome_losses, L_recXs, L_fails, L_curves, L_kls, epoch, args, Pred_X)
# concatenate
a_or_ipw_true = np.concatenate(a_or_ipw_true).transpose()
f_outcome_true = np.concatenate(f_outcome_true)
if args.has_GT:
cf_outcome_true = np.concatenate(cf_outcome_true)
treatment_opts = np.concatenate(treatment_opts)
else:
cf_outcome_true = None
treatment_opts = None
f_outcome_outputs = np.concatenate(f_outcome_outputs)
if CF:
cf_outcome_outputs = np.concatenate(cf_outcome_outputs)
if not args.has_GT:
f_outcome_outputs_long = np.concatenate(f_outcome_outputs_long)
# loss_all = np.concatenate(loss_all)
loss_all = np.mean(loss_all)
if Pred_X:
x_outs = np.concatenate(x_outs,0)
x_outs_cf = np.concatenate(x_outs_cf,1)
x_outs_true = np.concatenate(x_outs_true)
else:
x_outs = None
x_outs_true = None
x_inputs_f = np.concatenate(x_inputs_f)
x_static_inputs_f = np.concatenate(x_static_inputs_f)
if args.data == 'nba' or args.data == 'carla':
lengths_all = np.concatenate(lengths_all)
if save:
indices_all = np.concatenate(indices_all)
else:
lengths_all,indices_all = [],[]
# for saving
if args.has_GT:
outcomes = [f_outcome_true, cf_outcome_true, f_outcome_outputs, cf_outcome_outputs]
else:
outcomes = [f_outcome_true, cf_outcome_true, f_outcome_outputs, cf_outcome_outputs, f_outcome_outputs_long]
covariates = [x_inputs_f, x_outs, x_outs_true, x_static_inputs_f, x_outs_cf]
if save:
others = [treatment_f, treatment_all, treatment_opts, lengths_all, indices_all, loss_all]
else:
others = [treatment_f, treatment_all, treatment_opts, lengths_all, loss_all]
return outcomes, covariates, others
def model_eval(model, dataloader, criterion, args, epoch, eval_use_cuda=False, save=False, TEST=False, CF=True):
burn_in = args.burn_in #+ 1
burn_in_test = args.burn_in_test #+ 1
burn_in_ = burn_in if args.has_GT else burn_in_test
Pred_X = True if 'V' in args.model or 'RNN' in args.model or 'X' in args.model else False
outcomes, covariates, others = transfer_data(model, dataloader, criterion, args, epoch, eval_use_cuda, save=save, TEST=TEST)
if args.has_GT:
f_outcome_true, cf_outcome_true, f_outcome_outputs, cf_outcome_outputs = outcomes
else:
f_outcome_true, cf_outcome_true, f_outcome_outputs, cf_outcome_outputs, _ = outcomes
x_inputs_f, x_outs, x_outs_true, x_static_inputs_f, x_outs_cf = covariates
if save:
treatment_f, treatment_all, treatment_opts, lengths_all, indices_all, loss_all = others
else:
treatment_f, treatment_all, treatment_opts, lengths_all, loss_all = others
if args.data == 'nba':
n_agents = args.n_agents - 1
x_dim_permuted_ = n_agents*2 if not args.vel else n_agents*4 # 4 args.x_dim_permuted-2
else:
n_agents = args.n_agents
x_dim_permuted_ = args.x_dim_permuted
n_dim_each_permuted = int(x_dim_permuted_//n_agents)
model = args.model
std = False
t_pred = 1 if args.y_pred else 0
if args.has_GT:
# ouctome
if args.y_pred:
y_all_true = cf_outcome_true[:,burn_in_+1:].reshape((cf_outcome_true.shape[0],-1))
else:
y_all_true = cf_outcome_true[:,burn_in_:-1].reshape((cf_outcome_true.shape[0],-1))
y_pred_true = cf_outcome_outputs[:,burn_in_:,:,0].reshape((cf_outcome_true.shape[0],-1))
rmse = mean_squared_error(y_all_true,y_pred_true, multioutput='raw_values',squared=False)
# best timing
if 'boid' in args.data:
if args.y_pred:
cfo_true_last = cf_outcome_true[:,burn_in_+1:,:]
tau_true = cf_outcome_true[:,burn_in_+1:,:5] - np.repeat(cf_outcome_true[:,burn_in_+1:,-1,np.newaxis],5,axis=2)
else:
cfo_true_last = cf_outcome_true[:,burn_in_:-1,:]
tau_true = cf_outcome_true[:,burn_in_:-1,:5] - np.repeat(cf_outcome_true[:,burn_in_:-1,-1,np.newaxis],5,axis=2)
cfo_last = cf_outcome_outputs[:,burn_in_:,:,0]
cfo_true_last = np.max(np.abs(cfo_true_last),1)
cfo_last = np.max(np.abs(cfo_last),1)
cfo_last_max_ind = np.argmax(cfo_last,1)
best_timing_true = np.argmax(cfo_true_last,1)
timing_or_diff = np.sqrt((best_timing_true-cfo_last_max_ind)**2)
tau_pred = cf_outcome_outputs[:,burn_in_:,:5,0] - np.repeat(cf_outcome_outputs[:,burn_in_:,-1],5,axis=2)
if args.data == 'carla':
diff_f = cf_outcome_true[:,-2+t_pred,0] - cf_outcome_true[:,-2+t_pred,1]
diff_cf = cf_outcome_outputs[:,-1,0,0] - cf_outcome_outputs[:,-1,1,0]
timing_or_diff = np.abs(diff_f-diff_cf).reshape((cf_outcome_true.shape[0],-1))
if args.y_pred:
tau_true = cf_outcome_true[:,burn_in_+1:,0:1] - cf_outcome_true[:,burn_in_+1:,1:]
else:
tau_true = cf_outcome_true[:,burn_in_:-1,0:1] - cf_outcome_true[:,burn_in_:-1,1:]
tau_pred = cf_outcome_outputs[:,burn_in_:,0] - cf_outcome_outputs[:,burn_in_:,1]
# PEHE
N = tau_true.shape[0]
PEHE = np.sqrt(np.sum((tau_true-tau_pred)**2,axis=0)/N).reshape(-1,)
# ATE
ATE = np.abs(np.sum(tau_true,axis=0)/N - np.sum(tau_pred,axis=0)/N).reshape(-1,)
if args.data == 'carla':
timing_or_diff = PEHE
if not Pred_X:
# rmse_max,rmse_best_timing_,rmse_ = result
#if 'boid' in args.data:
# timing_or_diff = rmse_best_timing
print(model+': '
+' ' + '{:.3f}'.format(np.mean(rmse))+' $\pm$ '+'{:.3f}'.format(std_ste(rmse,std=std))+' &'
+' ' + '{:.3f}'.format(np.mean(PEHE))+' $\pm$ '+'{:.3f}'.format(std_ste(PEHE,std=std))+' &'
+' ' + '{:.3f}'.format(np.mean(timing_or_diff))+' $\pm$ '+'{:.3f}'.format(std_ste(timing_or_diff,std=std))+' & ---'
)
rmse_cf_x = None
if args.data == 'carla':
loss_all = np.mean(rmse) + np.mean(timing_or_diff) # 1/10
elif 'boid' in args.data:
loss_all = np.mean(rmse) + np.mean(PEHE) + np.mean(timing_or_diff) # 2/1
else:
# rmse_max,rmse_best_timing_,rmse_,rmse_cf_x_ = result
#if 'boid' in args.data:
# timing_or_diff = rmse_best_timing
n_dim_x = 2
n_agents = args.n_agents
if args.data == 'carla':
n_dim_agent = 7
ind_x = np.concatenate([np.arange(1,n_agents*n_dim_agent,n_dim_agent)[:,np.newaxis],
|
np.arange(2,n_agents*n_dim_agent,n_dim_agent)
|
numpy.arange
|
from __future__ import print_function
from __future__ import division
import numpy as np
from pygame.transform import scale
import config
import espControl as led
from virtualLed import VirtualLedTable
phase = 0
amplitude = 3
reverseAmp = False
reverseRGB = False
count_frames = 1
rgb = [np.linspace(0, 128, 256),
np.linspace(0, 128, 256),
np.linspace(128, 0, 256)]
def map1dto2d(pixels):
width = 8
mapped_pixels = []
for color in pixels:
rgb =
|
np.zeros(config.N_PIXELS)
|
numpy.zeros
|
# line plot
# Print the last item from year and pop
print(year[-1])
print(pop[-1])
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Make a line plot: year on the x-axis, pop on the y-axis
plt.plot(year, pop)
plt.show()
# Print the last item of gdp_cap and life_exp
print(gdp_cap[-1])
print(life_exp[-1])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
plt.plot(gdp_cap, life_exp)
# Display the plot
plt.show()
# Change the line plot below to a scatter plot
plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
plt.xscale('log')
# Show plot
plt.show()
# Build Histogram
# Create histogram of life_exp data
plt.hist(life_exp)
# Display histogram
plt.show()
# Build histogram with 5 bins
plt.hist(life_exp, bins=5)
# Show and clean up plot
plt.show()
plt.clf()
# Build histogram with 20 bins
plt.hist(life_exp, bins=20)
# Show and clean up again
plt.show()
plt.clf()
# Histogram of life_exp, 15 bins
plt.hist(life_exp, bins=15)
# Show and clear plot
plt.show()
plt.clf()
# Histogram of life_exp1950, 15 bins
plt.hist(life_exp1950, bins=15)
# Show and clear plot again
plt.show()
plt.clf()
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
# Add title
# After customizing, display the plot
plt.show()
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Definition of tick_val and tick_lab
tick_val = [1000,10000,100000]
tick_lab = ['1k','10k','100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# After customizing, display the plot
plt.show()
# Import numpy as np
import numpy as np
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop * 2
# Update: set s argument to np_pop
plt.scatter(gdp_cap, life_exp, s = np_pop)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000, 10000, 100000],['1k', '10k', '100k'])
# Display the plot
plt.show()
dict = {
'Asia':'red',
'Europe':'green',
'Africa':'blue',
'Americas':'yellow',
'Oceania':'black'
}
# Specify c and alpha inside plt.scatter()
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Show the plot
plt.show()
# Scatter plot
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Additional customizations
plt.text(1550, 71, 'India')
plt.text(5700, 80, 'China')
# Add grid() call
plt.grid(True)
# Show the plot
plt.show()
## intro to python for datascience
### numpy
# Create list baseball
baseball = [180, 215, 210, 210, 188, 176, 209, 200]
# Import the numpy package as np
import numpy as np
# Create a Numpy array from baseball: np_baseball
np_baseball = np.array(baseball)
# Print out type of np_baseball
print(type(np_baseball))
# height is available as a regular list
# Import numpy
import numpy as np
# Create a Numpy array from height: np_height
np_height = np.array(height)
# Print out np_height
print(np_height)
# Convert np_height to m: np_height_m
np_height_m = np_height * 0.0254
# Print np_height_m
print(np_height_m)
# height and weight are available as a regular lists
# Import numpy
import numpy as np
# Create array from height with correct units: np_height_m
np_height_m = np.array(height) * 0.0254
# Create array from weight with correct units: np_weight_kg
np_weight_kg = np.array(weight) * 0.453592
# Calculate the BMI: bmi
bmi = np_weight_kg / np_height_m ** 2
# Print out bmi
print (bmi)
# height and weight are available as a regular lists
# Import numpy
import numpy as np
# Calculate the BMI: bmi
np_height_m = np.array(height) * 0.0254
np_weight_kg = np.array(weight) * 0.453592
bmi = np_weight_kg / np_height_m ** 2
# Create the light array
light = bmi < 21
# Print out light
print(light)
# Print out BMIs of all baseball players whose BMI is below 21
print(bmi[light])
In [2]:
|
np.array([True, 1, 2])
|
numpy.array
|
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script."""
import argparse
import os
import csv
import random
import json
import logging
import pandas as pd
import shutil
import spacy
import time
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from tensorboardX import SummaryWriter
from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
OpenAIAdam, WEIGHTS_NAME, CONFIG_NAME,
GPT2DoubleHeadsModel, GPT2LMHeadModel, GPT2Tokenizer)
from pytorch_pretrained_bert.optimization import WarmupLinearSchedule
DATA_DIR = '../data'.format(os.getenv('HOME'))
q_sep = 'Q'
a_sep = 'A'
mc_task_names = {'rocstories'}
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def accuracy(out, labels):
outputs =
|
np.argmax(out, axis=1)
|
numpy.argmax
|
import collections
import numpy as np
import time
import datetime
import os
import networkx as nx
import pytz
import cloudvolume
import pandas as pd
from multiwrapper import multiprocessing_utils as mu
from . import mincut
from google.api_core.retry import Retry, if_exception_type
from google.api_core.exceptions import Aborted, DeadlineExceeded, \
ServiceUnavailable
from google.auth import credentials
from google.cloud import bigtable
from google.cloud.bigtable.row_filters import TimestampRange, \
TimestampRangeFilter, ColumnRangeFilter, ValueRangeFilter, RowFilterChain, \
ColumnQualifierRegexFilter, RowFilterUnion, ConditionalRowFilter, \
PassAllFilter, BlockAllFilter, RowFilter
from google.cloud.bigtable.column_family import MaxVersionsGCRule
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
# global variables
HOME = os.path.expanduser("~")
N_DIGITS_UINT64 = len(str(np.iinfo(np.uint64).max))
LOCK_EXPIRED_TIME_DELTA = datetime.timedelta(minutes=2, seconds=00)
UTC = pytz.UTC
# Setting environment wide credential path
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = \
HOME + "/.cloudvolume/secrets/google-secret.json"
def compute_indices_pandas(data) -> pd.Series:
""" Computes indices of all unique entries
Make sure to remap your array to a dense range starting at zero
https://stackoverflow.com/questions/33281957/faster-alternative-to-numpy-where
:param data: np.ndarray
:return: pandas dataframe
"""
d = data.ravel()
f = lambda x: np.unravel_index(x.index, data.shape)
return pd.Series(d).groupby(d).apply(f)
def log_n(arr, n):
""" Computes log to base n
:param arr: array or float
:param n: int
base
:return: return log_n(arr)
"""
if n == 2:
return np.log2(arr)
elif n == 10:
return np.log10(arr)
else:
return np.log(arr) / np.log(n)
def pad_node_id(node_id: np.uint64) -> str:
""" Pad node id to 20 digits
:param node_id: int
:return: str
"""
return "%.20d" % node_id
def serialize_uint64(node_id: np.uint64) -> bytes:
""" Serializes an id to be ingested by a bigtable table row
:param node_id: int
:return: str
"""
return serialize_key(pad_node_id(node_id)) # type: ignore
def deserialize_uint64(node_id: bytes) -> np.uint64:
""" De-serializes a node id from a BigTable row
:param node_id: bytes
:return: np.uint64
"""
return np.uint64(node_id.decode()) # type: ignore
def serialize_key(key: str) -> bytes:
""" Serializes a key to be ingested by a bigtable table row
:param key: str
:return: bytes
"""
return key.encode("utf-8")
def deserialize_key(key: bytes) -> str:
""" Deserializes a row key
:param key: bytes
:return: str
"""
return key.decode()
def row_to_byte_dict(row: bigtable.row.Row, f_id: str = None, idx: int = None
) -> Dict[int, Dict]:
""" Reads row entries to a dictionary
:param row: row
:param f_id: str
:param idx: int
:return: dict
"""
row_dict = {}
for fam_id in row.cells.keys():
row_dict[fam_id] = {}
for row_k in row.cells[fam_id].keys():
if idx is None:
row_dict[fam_id][deserialize_key(row_k)] = \
[c.value for c in row.cells[fam_id][row_k]]
else:
row_dict[fam_id][deserialize_key(row_k)] = \
row.cells[fam_id][row_k][idx].value
if f_id is not None and f_id in row_dict:
return row_dict[f_id]
elif f_id is None:
return row_dict
else:
raise Exception("Family id not found")
def compute_bitmasks(n_layers: int, fan_out: int) -> Dict[int, int]:
"""
:param n_layers: int
:return: dict
layer -> bits for layer id
"""
bitmask_dict = {}
for i_layer in range(n_layers, 0, -1):
if i_layer == 1:
# Lock this layer to an 8 bit layout to maintain compatibility with
# the exported segmentation
# n_bits_for_layers = np.ceil(log_n(fan_out**(n_layers - 2), fan_out))
n_bits_for_layers = 8
else:
n_bits_for_layers = max(1,
np.ceil(log_n(fan_out**(n_layers - i_layer),
fan_out)))
# n_bits_for_layers = fan_out ** int(np.ceil(log_n(n_bits_for_layers, fan_out)))
n_bits_for_layers = int(n_bits_for_layers)
assert n_bits_for_layers <= 8
bitmask_dict[i_layer] = n_bits_for_layers
return bitmask_dict
class ChunkedGraph(object):
def __init__(self,
table_id: str,
instance_id: str = "pychunkedgraph",
project_id: str = "neuromancer-seung-import",
chunk_size: Tuple[int, int, int] = None,
fan_out: Optional[int] = None,
n_layers: Optional[int] = None,
credentials: Optional[credentials.Credentials] = None,
client: bigtable.Client = None,
cv_path: str = None,
is_new: bool = False) -> None:
if client is not None:
self._client = client
else:
self._client = bigtable.Client(project=project_id, admin=True,
credentials=credentials)
self._instance = self.client.instance(instance_id)
self._table_id = table_id
self._table = self.instance.table(self.table_id)
if is_new:
self.check_and_create_table()
self._n_layers = self.check_and_write_table_parameters("n_layers",
n_layers)
self._fan_out = self.check_and_write_table_parameters("fan_out",
fan_out)
self._cv_path = self.check_and_write_table_parameters("cv_path",
cv_path)
self._chunk_size = self.check_and_write_table_parameters("chunk_size",
chunk_size)
self._bitmasks = compute_bitmasks(self.n_layers, self.fan_out)
self._cv = None
# Hardcoded parameters
self._n_bits_for_layer_id = 8
self._cv_mip = 3
@property
def client(self) -> bigtable.Client:
return self._client
@property
def instance(self) -> bigtable.instance.Instance:
return self._instance
@property
def table(self) -> bigtable.table.Table:
return self._table
@property
def table_id(self) -> str:
return self._table_id
@property
def instance_id(self):
return self.instance.instance_id
@property
def project_id(self):
return self.client.project
@property
def family_id(self) -> str:
return "0"
@property
def incrementer_family_id(self) -> str:
return "1"
@property
def log_family_id(self) -> str:
return "2"
@property
def cross_edge_family_id(self) -> str:
return "3"
@property
def fan_out(self) -> int:
return self._fan_out
@property
def chunk_size(self) -> np.ndarray:
return self._chunk_size
@property
def n_layers(self) -> int:
return self._n_layers
@property
def bitmasks(self) -> Dict[int, int]:
return self._bitmasks
@property
def cv_path(self) -> str:
return self._cv_path
@property
def cv_mip(self) -> int:
return self._cv_mip
@property
def cv(self) -> cloudvolume.CloudVolume:
if self._cv is None:
self._cv = cloudvolume.CloudVolume(self.cv_path, mip=self._cv_mip)
return self._cv
@property
def root_chunk_id(self):
return self.get_chunk_id(layer=int(self.n_layers), x=0, y=0, z=0)
def check_and_create_table(self) -> None:
""" Checks if table exists and creates new one if necessary """
table_ids = [t.table_id for t in self.instance.list_tables()]
if not self.table_id in table_ids:
self.table.create()
f = self.table.column_family(self.family_id)
f.create()
f_inc = self.table.column_family(self.incrementer_family_id,
gc_rule=MaxVersionsGCRule(1))
f_inc.create()
f_log = self.table.column_family(self.log_family_id)
f_log.create()
f_ce = self.table.column_family(self.cross_edge_family_id,
gc_rule=MaxVersionsGCRule(1))
f_ce.create()
print("Table created")
def check_and_write_table_parameters(self, param_key: str,
value: Optional[np.uint64] = None
) -> np.uint64:
""" Checks if a parameter already exists in the table. If it already
exists it returns the stored value, else it stores the given value. It
raises an exception if no value is passed and the parameter does not
exist, yet.
:param param_key: str
:param value: np.uint64
:return: np.uint64
value
"""
ser_param_key = serialize_key(param_key)
row = self.table.read_row(serialize_key("params"))
if row is None or ser_param_key not in row.cells[self.family_id]:
assert value is not None
if param_key in ["fan_out", "n_layers"]:
val_dict = {param_key: np.array(value,
dtype=np.uint64).tobytes()}
elif param_key in ["cv_path"]:
val_dict = {param_key: serialize_key(value)}
elif param_key in ["chunk_size"]:
val_dict = {param_key: np.array(value,
dtype=np.uint64).tobytes()}
else:
raise Exception("Unknown type for parameter")
row = self.mutate_row(serialize_key("params"), self.family_id,
val_dict)
self.bulk_write([row])
else:
value = row.cells[self.family_id][ser_param_key][0].value
if param_key in ["fan_out", "n_layers"]:
value = np.frombuffer(value, dtype=np.uint64)[0]
elif param_key in ["cv_path"]:
value = deserialize_key(value)
elif param_key in ["chunk_size"]:
value = np.frombuffer(value, dtype=np.uint64)
else:
raise Exception("Unknown key")
return value
def get_serialized_info(self):
""" Rerturns dictionary that can be used to load this ChunkedGraph
:return: dict
"""
info = {"table_id": self.table_id,
"instance_id": self.instance_id,
"project_id": self.project_id}
try:
info["credentials"] = self.client.credentials
except:
info["credentials"] = self.client._credentials
return info
def get_chunk_layer(self, node_or_chunk_id: np.uint64) -> int:
""" Extract Layer from Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: int
"""
return int(node_or_chunk_id) >> 64 - self._n_bits_for_layer_id
def get_chunk_coordinates(self, node_or_chunk_id: np.uint64
) -> np.ndarray:
""" Extract X, Y and Z coordinate from Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: Tuple(int, int, int)
"""
layer = self.get_chunk_layer(node_or_chunk_id)
bits_per_dim = self.bitmasks[layer]
x_offset = 64 - self._n_bits_for_layer_id - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
x = int(node_or_chunk_id) >> x_offset & 2 ** bits_per_dim - 1
y = int(node_or_chunk_id) >> y_offset & 2 ** bits_per_dim - 1
z = int(node_or_chunk_id) >> z_offset & 2 ** bits_per_dim - 1
return np.array([x, y, z])
def get_chunk_id(self, node_id: Optional[np.uint64] = None,
layer: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
z: Optional[int] = None) -> np.uint64:
""" (1) Extract Chunk ID from Node ID
(2) Build Chunk ID from Layer, X, Y and Z components
:param node_id: np.uint64
:param layer: int
:param x: int
:param y: int
:param z: int
:return: np.uint64
"""
assert node_id is not None or \
all(v is not None for v in [layer, x, y, z])
if node_id is not None:
layer = self.get_chunk_layer(node_id)
bits_per_dim = self.bitmasks[layer]
if node_id is not None:
chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim
return np.uint64((int(node_id) >> chunk_offset) << chunk_offset)
else:
if not(x < 2 ** bits_per_dim and
y < 2 ** bits_per_dim and
z < 2 ** bits_per_dim):
raise Exception("Chunk coordinate is out of range for"
"this graph on layer %d with %d bits/dim."
"[%d, %d, %d]; max = %d."
% (layer, bits_per_dim, x, y, z,
2 ** bits_per_dim))
layer_offset = 64 - self._n_bits_for_layer_id
x_offset = layer_offset - bits_per_dim
y_offset = x_offset - bits_per_dim
z_offset = y_offset - bits_per_dim
return np.uint64(layer << layer_offset | x << x_offset |
y << y_offset | z << z_offset)
def get_chunk_ids_from_node_ids(self, node_ids: Iterable[np.uint64]
) -> np.ndarray:
""" Extract a list of Chunk IDs from a list of Node IDs
:param node_ids: np.ndarray(dtype=np.uint64)
:return: np.ndarray(dtype=np.uint64)
"""
# TODO: measure and improve performance(?)
return np.array(list(map(lambda x: self.get_chunk_id(node_id=x),
node_ids)), dtype=np.uint64)
def get_segment_id_limit(self, node_or_chunk_id: np.uint64) -> np.uint64:
""" Get maximum possible Segment ID for given Node ID or Chunk ID
:param node_or_chunk_id: np.uint64
:return: np.uint64
"""
layer = self.get_chunk_layer(node_or_chunk_id)
bits_per_dim = self.bitmasks[layer]
chunk_offset = 64 - self._n_bits_for_layer_id - 3 * bits_per_dim
return np.uint64(2 ** chunk_offset - 1)
def get_segment_id(self, node_id: np.uint64) -> np.uint64:
""" Extract Segment ID from Node ID
:param node_id: np.uint64
:return: np.uint64
"""
return node_id & self.get_segment_id_limit(node_id)
def get_node_id(self, segment_id: np.uint64,
chunk_id: Optional[np.uint64] = None,
layer: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
z: Optional[int] = None) -> np.uint64:
""" (1) Build Node ID from Segment ID and Chunk ID
(2) Build Node ID from Segment ID, Layer, X, Y and Z components
:param segment_id: np.uint64
:param chunk_id: np.uint64
:param layer: int
:param x: int
:param y: int
:param z: int
:return: np.uint64
"""
if chunk_id is not None:
return chunk_id | segment_id
else:
return self.get_chunk_id(layer=layer, x=x, y=y, z=z) | segment_id
def get_unique_segment_id_range(self, chunk_id: np.uint64, step: int = 1
) -> np.ndarray:
""" Return unique Segment ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i" followed by the chunk id
row_key = serialize_key("i%s" % pad_node_id(chunk_id))
append_row = self.table.row(row_key, append=True)
append_row.increment_cell_value(self.incrementer_family_id,
counter_key, step)
# This increments the row entry and returns the value AFTER incrementing
latest_row = append_row.commit()
max_segment_id_b = latest_row[self.incrementer_family_id][counter_key][0][0]
max_segment_id = int.from_bytes(max_segment_id_b, byteorder="big")
min_segment_id = max_segment_id + 1 - step
segment_id_range = np.array(range(min_segment_id, max_segment_id + 1),
dtype=np.uint64)
return segment_id_range
def get_unique_segment_id(self, chunk_id: np.uint64) -> np.uint64:
""" Return unique Segment ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
return self.get_unique_segment_id_range(chunk_id=chunk_id, step=1)[0]
def get_unique_node_id_range(self, chunk_id: np.uint64, step: int = 1
) -> np.ndarray:
""" Return unique Node ID range for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:param step: int
:return: np.uint64
"""
segment_ids = self.get_unique_segment_id_range(chunk_id=chunk_id,
step=step)
node_ids = np.array([self.get_node_id(segment_id, chunk_id)
for segment_id in segment_ids], dtype=np.uint64)
return node_ids
def get_unique_node_id(self, chunk_id: np.uint64) -> np.uint64:
""" Return unique Node ID for given Chunk ID
atomic counter
:param chunk_id: np.uint64
:return: np.uint64
"""
return self.get_unique_node_id_range(chunk_id=chunk_id, step=1)[0]
def get_max_node_id(self, chunk_id: np.uint64) -> np.uint64:
""" Gets maximal node id in a chunk based on the atomic counter
This is an approximation. It is not guaranteed that all ids smaller or
equal to this id exists. However, it is guaranteed that no larger id
exist at the time this function is executed.
:return: uint64
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i"
row_key = serialize_key("i%s" % pad_node_id(chunk_id))
row = self.table.read_row(row_key)
# Read incrementer value
if row is not None:
max_node_id_b = row.cells[self.incrementer_family_id][counter_key][0].value
max_node_id = int.from_bytes(max_node_id_b, byteorder="big")
else:
max_node_id = 0
return np.uint64(max_node_id)
def get_unique_operation_id(self) -> np.uint64:
""" Finds a unique operation id
atomic counter
Operations essentially live in layer 0. Even if segmentation ids might
live in layer 0 one day, they would not collide with the operation ids
because we write information belonging to operations in a separate
family id.
:return: str
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i"
row_key = serialize_key("ioperations")
append_row = self.table.row(row_key, append=True)
append_row.increment_cell_value(self.incrementer_family_id,
counter_key, 1)
# This increments the row entry and returns the value AFTER incrementing
latest_row = append_row.commit()
operation_id_b = latest_row[self.incrementer_family_id][counter_key][0][0]
operation_id = int.from_bytes(operation_id_b, byteorder="big")
return np.uint64(operation_id)
def get_max_operation_id(self) -> np.uint64:
""" Gets maximal operation id based on the atomic counter
This is an approximation. It is not guaranteed that all ids smaller or
equal to this id exists. However, it is guaranteed that no larger id
exist at the time this function is executed.
:return: uint64
"""
counter_key = serialize_key('counter')
# Incrementer row keys start with an "i"
row_key = serialize_key("ioperations")
row = self.table.read_row(row_key)
# Read incrementer value
if row is not None:
max_operation_id_b = row.cells[self.incrementer_family_id][counter_key][0].value
max_operation_id = int.from_bytes(max_operation_id_b,
byteorder="big")
else:
max_operation_id = 0
return np.uint64(max_operation_id)
def get_cross_chunk_edges_layer(self, cross_edges):
if len(cross_edges) == 0:
return np.array([], dtype=np.int)
cross_chunk_edge_layers = np.ones(len(cross_edges), dtype=np.int) * 2
cross_edge_coordinates = []
for cross_edge in cross_edges:
cross_edge_coordinates.append(
[self.get_chunk_coordinates(cross_edge[0]),
self.get_chunk_coordinates(cross_edge[1])])
cross_edge_coordinates = np.array(cross_edge_coordinates, dtype=np.int)
for layer in range(3, self.n_layers):
cross_edge_coordinates = cross_edge_coordinates // self.fan_out
edge_diff = np.sum(np.abs(cross_edge_coordinates[:, 0] -
cross_edge_coordinates[:, 1]), axis=1)
cross_chunk_edge_layers[edge_diff > 0] += 1
return cross_chunk_edge_layers
def get_cross_chunk_edge_dict(self, cross_edges):
cce_layers = self.get_cross_chunk_edges_layer(cross_edges)
u_cce_layers = np.unique(cce_layers)
cross_edge_dict = {}
for l in range(2, self.n_layers):
cross_edge_dict[l] = \
np.array([], dtype=np.uint64).reshape(-1, 2)
val_dict = {}
for cc_layer in u_cce_layers:
layer_cross_edges = cross_edges[cce_layers == cc_layer]
if len(layer_cross_edges) > 0:
val_dict["atomic_cross_edges_%d" % cc_layer] = \
layer_cross_edges.tobytes()
cross_edge_dict[cc_layer] = layer_cross_edges
return cross_edge_dict
def read_row(self, node_id: np.uint64, key: str, idx: int = 0,
dtype: type = np.uint64, get_time_stamp: bool = False,
fam_id: str = None) -> Any:
""" Reads row from BigTable and takes care of serializations
:param node_id: uint64
:param key: table column
:param idx: column list index
:param dtype: np.dtype
:param get_time_stamp: bool
:param fam_id: str
:return: row entry
"""
key = serialize_key(key)
if fam_id is None:
fam_id = self.family_id
row = self.table.read_row(serialize_uint64(node_id),
filter_=ColumnQualifierRegexFilter(key))
if row is None or key not in row.cells[fam_id]:
if get_time_stamp:
return None, None
else:
return None
cell_entries = row.cells[fam_id][key]
if dtype is None:
cell_value = cell_entries[idx].value
else:
cell_value = np.frombuffer(cell_entries[idx].value, dtype=dtype)
if get_time_stamp:
return cell_value, cell_entries[idx].timestamp
else:
return cell_value
def mutate_row(self, row_key: bytes, column_family_id: str, val_dict: dict,
time_stamp: Optional[datetime.datetime] = None
) -> bigtable.row.Row:
""" Mutates a single row
:param row_key: serialized bigtable row key
:param column_family_id: str
serialized column family id
:param val_dict: dict
:param time_stamp: None or datetime
:return: list
"""
row = self.table.row(row_key)
for column, value in val_dict.items():
row.set_cell(column_family_id=column_family_id, column=column,
value=value, timestamp=time_stamp)
return row
def bulk_write(self, rows: Iterable[bigtable.row.DirectRow],
root_ids: Optional[Union[np.uint64,
Iterable[np.uint64]]] = None,
operation_id: Optional[np.uint64] = None,
slow_retry: bool = True,
block_size: int = 2000) -> bool:
""" Writes a list of mutated rows in bulk
WARNING: If <rows> contains the same row (same row_key) and column
key two times only the last one is effectively written to the BigTable
(even when the mutations were applied to different columns)
--> no versioning!
:param rows: list
list of mutated rows
:param root_ids: list if uint64
:param operation_id: uint64 or None
operation_id (or other unique id) that *was* used to lock the root
the bulk write is only executed if the root is still locked with
the same id.
:param slow_retry: bool
:param block_size: int
"""
if slow_retry:
initial = 5
else:
initial = 1
retry_policy = Retry(
predicate=if_exception_type((Aborted,
DeadlineExceeded,
ServiceUnavailable)),
initial=initial,
maximum=15.0,
multiplier=2.0,
deadline=LOCK_EXPIRED_TIME_DELTA.seconds)
if root_ids is not None and operation_id is not None:
if isinstance(root_ids, int):
root_ids = [root_ids]
if not self.check_and_renew_root_locks(root_ids, operation_id):
return False
for i_row in range(0, len(rows), block_size):
status = self.table.mutate_rows(rows[i_row: i_row + block_size],
retry=retry_policy)
if not all(status):
raise Exception(status)
return True
def _range_read_execution(self, start_id, end_id,
row_filter: RowFilter = None,
n_retries: int = 100):
""" Executes predefined range read (read_rows)
:param start_id: np.uint64
:param end_id: np.uint64
:param row_filter: BigTable RowFilter
:param n_retries: int
:return: dict
"""
# Set up read
range_read = self.table.read_rows(
start_key=serialize_uint64(start_id),
end_key=serialize_uint64(end_id),
# allow_row_interleaving=True,
end_inclusive=True,
filter_=row_filter)
range_read.consume_all()
# Execute read
consume_success = False
# Retry reading if any of the writes failed
i_tries = 0
while not consume_success and i_tries < n_retries:
try:
range_read.consume_all()
consume_success = True
except:
time.sleep(i_tries)
i_tries += 1
if not consume_success:
raise Exception("Unable to consume range read: "
"%d - %d -- n_retries = %d" %
(start_id, end_id, n_retries))
return range_read.rows
def range_read(self, start_id: np.uint64, end_id: np.uint64,
n_retries: int = 100, max_block_size: int = 50000,
row_keys: Optional[Iterable[str]] = None,
row_key_filters: Optional[Iterable[str]] = None,
time_stamp: datetime.datetime = datetime.datetime.max
) -> Union[
bigtable.row_data.PartialRowData,
Dict[bytes, bigtable.row_data.PartialRowData]]:
""" Reads all ids within a given range
:param start_id: np.uint64
:param end_id: np.uint64
:param n_retries: int
:param max_block_size: int
:param row_keys: list of str
more efficient read through row filters
:param row_key_filters: list of str
rows *with* this column will be ignored
:param time_stamp: datetime.datetime
:return: dict
"""
# Comply to resolution of BigTables TimeRange
time_stamp -= datetime.timedelta(
microseconds=time_stamp.microsecond % 1000)
# Create filters: time and id range
time_filter = TimestampRangeFilter(TimestampRange(end=time_stamp))
if row_keys is not None:
filters = []
for k in row_keys:
filters.append(ColumnQualifierRegexFilter(serialize_key(k)))
if len(filters) > 1:
row_filter = RowFilterUnion(filters)
else:
row_filter = filters[0]
else:
row_filter = None
if row_filter is None:
row_filter = time_filter
else:
row_filter = RowFilterChain([time_filter, row_filter])
if row_key_filters is not None:
for row_key in row_key_filters:
key_filter = ColumnRangeFilter(
column_family_id=self.family_id,
start_column=row_key,
end_column=row_key,
inclusive_start=True,
inclusive_end=True)
row_filter = ConditionalRowFilter(base_filter=key_filter,
false_filter=row_filter,
true_filter=BlockAllFilter(True))
max_block_size = np.uint64(max_block_size)
block_start_ids = range(start_id, end_id, max_block_size)
row_dict = {}
for block_start_id in block_start_ids:
block_end_id = np.uint64(block_start_id + max_block_size)
if block_end_id > end_id:
block_end_id = end_id
block_row_dict = self._range_read_execution(start_id=block_start_id,
end_id=block_end_id,
row_filter=row_filter,
n_retries=n_retries)
row_dict.update(block_row_dict)
return row_dict
def range_read_chunk(self, layer: int, x: int, y: int, z: int,
n_retries: int = 100, max_block_size: int = 1000000,
row_keys: Optional[Iterable[str]] = None,
row_key_filters: Optional[Iterable[str]] = None,
time_stamp: datetime.datetime = datetime.datetime.max,
) -> Union[
bigtable.row_data.PartialRowData,
Dict[bytes, bigtable.row_data.PartialRowData]]:
""" Reads all ids within a chunk
:param layer: int
:param x: int
:param y: int
:param z: int
:param n_retries: int
:param max_block_size: int
:param row_keys: list of str
more efficient read through row filters
:param row_key_filters: list of str
rows *with* this column will be ignored
:param time_stamp: datetime.datetime
:return: dict
"""
chunk_id = self.get_chunk_id(layer=layer, x=x, y=y, z=z)
if layer == 1:
max_segment_id = self.get_segment_id_limit(chunk_id)
max_block_size = max_segment_id + 1
else:
max_segment_id = self.get_max_node_id(chunk_id=chunk_id)
# Define BigTable keys
start_id = self.get_node_id(
|
np.uint64(0)
|
numpy.uint64
|
import os
import sys
import glob
import pandas as pd
import numpy as np
import simplejson as json
import hi_processing.images as hip
import matplotlib as mpl
import matplotlib.pyplot as plt
import PIL.Image as Image
import scipy.stats as sps
from datetime import datetime, timedelta
import misc
from data_helcats import HELCATS
from data_stereo_hi import STEREOHI
class CompareSolarStormsWorkflow:
def __init__(self, data_loc, fig_loc):
# set dirs
for loc in [data_loc, fig_loc]:
if not os.path.exists(loc):
raise ValueError("path " + loc + " doesn't exist")
self.data_loc = data_loc
self.fig_loc = os.path.join(fig_loc, 'POPFSS')
self.root = os.path.join(data_loc, 'POPFSS')
for loc in [self.root, self.fig_loc]:
if not os.path.exists(loc):
os.mkdir(loc)
# Zooniverse details
self.workflow_name = 'Compare Solar Storms'
self.workflow_id = 6496
# project data comes saved with '-' instead of ' ' in the name
self.project_name = 'Protect our Planet from Solar Storms'
self.project_save_name = 'protect-our-planet-from-solar-storms'
self.project_id = 6480
# details of the different project phases, to split classifications
self.diff = dict({'phase' : 1,
'workflow_version' : 19.32,
'subject_id_min' : 21571858,
'subject_id_max' : 27198377,
'classification_id_min' : 107482001,
'classification_id_max' : 134398497})
self.diff_be = dict({'phase' : 2,
'workflow_version' : 19.32,
'subject_id_min' : 34364886,
'subject_id_max' : 34387958,
'classification_id_min' : 172529512,
'classification_id_max' : 240608296})
self.norm = dict({'phase' : 3,
'workflow_version' : 19.32,
'subject_id_min' : 44304478,
'subject_id_max' : 46571066,
'classification_id_min' : 251146634})
############### setting up the project
def make_assets(self, img_type, camera='hi1', background_type=1):
"""
Function to loop over the HELCATS CMEs, find all relevant HI1A and HI1B 1-day background images, and produce
plain, differenced and relative difference images.
"""
helcats = HELCATS(self.data_loc)
hi_data = STEREOHI(self.data_loc)
cme_list = helcats.find_cmes(te_track=True)
for n, cme in enumerate(cme_list):
print(cme)
craft, time = helcats.get_cme_details(cme)
start, mid, end, mid_el = helcats.get_te_track_times(cme,
camera=camera)
hi_data.make_img(cme, mid, craft, 'POPFSS', img_type,
camera=camera, background_type=background_type)
def find_comps(self, n, cycles='all', rounds=1, rn=0):
"""Finds pairwise comparisons to use in the manifest file.
:param: n: number of objects to compare
:param: cycles: number of cycles with n comparisons, must be between 1 and
ceil(n/2)
:param: rounds: number of files to split total comparisons over
:param: rn: current round number, adds an offset to the cycle numbers run
:returns: lists containing indexes of each asset to compare
"""
# calculate maximum values
max_cycles = np.int(np.ceil(n/2))-1
max_ccs = n*max_cycles
max_comps = np.int((n/2)*(n-1))
# if no number of cycles chosen, set at maximum
if cycles == 'all':
cycles = max_cycles
if cycles != np.int(cycles):
raise ValueError("number of cycles must be an integer")
if cycles < 1:
raise ValueError("must be at least one cycle")
if cycles > max_cycles:
raise ValueError("number of cycles cannot be greater than ceil(n/2)-1")
if (cycles * rounds) > max_cycles:
raise ValueError("cycles*rounds must be less than %s" %(max_cycles))
if rn > rounds:
raise ValueError("round number cannot exceed number of rounds")
# build nxn matrix
matrix = np.zeros((n,n), dtype=int)
spacing = np.int(np.floor(max_cycles/cycles))
cycle_nos = np.arange(1, np.int(np.ceil(n/2)), spacing)[0:cycles]
# change dependant on round number
for c in range(len(cycle_nos)):
cycle_nos[c] = cycle_nos[c] + rn - 1
# each s is a loop with n comparisons
# starts at diagonal under 1, as the 0 diagonal is the origin
for s in cycle_nos:
print(s)
# change 0s to 1s for comparisons in this loop
for i in range(0, n):
j = np.mod(s+i, n)
# Check this hasn't been compared already...
if matrix[j, i] == 0:
# Do this comparison
matrix[i, j] = 1
print('cycles run: %s out of %s' %(cycles, max_cycles))
print('comparisons generated: %s out of %s' %(np.sum(matrix), max_ccs))
m = self.matrix_to_list(matrix)
return m
def matrix_to_list(self, matrix):
"""
Takes a matrix and returns a list of the rows/columns of the non-zero
values.
"""
first = []
second = []
n = len(matrix)
# loop over rows
for i in range(0, n):
# loop over values in row
for j in range(0, n):
if matrix[i, j] == 1:
first.append(i)
second.append(j)
return first, second
def make_manifest(self, img_type, cycles=16, m_files=30):
"""
This function produces the manifest to serve the ssw assets. This has the format of a CSV file with:
asset_name,file1,file2,...fileN.
Asset names will be given the form of sswN_helcatsM_craft_type_t1_t3, where t1 and t3 correspond to the times of the
first and third image in sets of three.
This works by searching the 'out_data/comp_assets' folder for assets and
creating a manifest file for these assets.
:param: m_files: number of manifest files to split comparisons into
:return: Outputs a "manifest.csv" file in the event/craft/type directory of these images, or multiple files
"""
# Want list of all images from both craft
sta_data_dir = os.path.join(self.data_loc, 'STEREO_HI', 'Images',
'POPFSS', img_type, 'sta')
stb_data_dir = os.path.join(self.data_loc, 'STEREO_HI', 'Images',
'POPFSS', img_type, 'stb')
sta_files = glob.glob(os.path.join(sta_data_dir, '*'))
stb_files = glob.glob(os.path.join(stb_data_dir, '*'))
# get only filename not full path, exclude extension
sta_files = [os.path.basename(f) for f in sta_files]
stb_files = [os.path.basename(f) for f in stb_files]
images = np.append(sta_files, stb_files)
images.sort()
print("found %s images, generating comparisons..." %(len(images)))
# Create manifest files
for r in range(m_files):
# Make the manifest file
manifest_path = os.path.join(self.root, 'manifest'+str(r+1)+'.csv')
with open(manifest_path, 'w') as manifest:
# Add in manifest headers
manifest.write("subject_id,asset_0,asset_1\n")
# Get comparisons list for this manifest file
comps = self.find_comps(len(images), cycles=cycles, rounds=m_files, rn=r+1)
# returns lists of left and right images to compare
# Write comparisons list into correct columns
# loop over each comparison
i = 0
# give each comparison a subject id
sub_id = 0
while i < len(comps[0]):
manifest_elements = [str(sub_id), images[comps[0][i]], images[comps[1][i]]]
i = i + 1
sub_id += 1
# Write out as comma sep list
manifest.write(",".join(manifest_elements) + "\n")
def get_helcats_names(self, image_list):
"""returns HELCATS name string given image name
e.g.ssw_067_helcats_HCME_B__20131128_02_stb_diff_20131129_005001.jpg
returns HCME_B__20131128_02
"""
helcats_name_list = []
for img in image_list:
parts = img.split('_')
hn = parts[3] + '_' + parts[4] + '__' + parts[6] + '_' + parts[7]
helcats_name_list.append(hn)
return helcats_name_list
def analyse_manifest(self, manifest_name):
df = pd.read_csv(os.path.join(self.root, manifest_name))
hc = HELCATS(self.data_loc)
# add columns for helcats names, dates and craft of each image
for n, side in enumerate(['left', 'right']):
df[side + '_helcats_name'] = self.get_helcats_names(df['asset_' + str(n)])
craft_list, time_list = hc.get_cme_details_list(df[side + '_helcats_name'])
df[side + '_craft'] = pd.Series(craft_list, index=df.index)
df[side + '_time'] = pd.Series(time_list, index=df.index)
# CME occurence as left or right image
l_occurences = []
r_occurences = []
for cme in np.unique(df['left_helcats_name']):
l = df[df['left_helcats_name'] == cme]
r = df[df['right_helcats_name'] == cme]
l_occurences.append(len(l))
r_occurences.append(len(r))
plt.figure(figsize=[9, 9])
plt.scatter(l_occurences, r_occurences)
plt.xlabel("# times CME shown as left image", fontsize=16)
plt.ylabel("# times CME shown as right image", fontsize=16)
totals = [sum(x) for x in zip(l_occurences, r_occurences)]
print("Each CME is compared to between %s and %s different CMEs." %(
|
np.min(totals)
|
numpy.min
|
from __future__ import division, print_function
import os
import numpy as np
""" This module contains:
- Polar: class to represent a polar (computes steady/unsteady parameters, corrections etc.)
- blend: function to blend two polars
- thicknessinterp_from_one_set: interpolate polars at different thickeness based on one set of polars
JPJ 7/20 : This class can probably be combined with Polar() from airfoilprep.py.
They do not have one-to-one matching for the methods.
Because both are not tested extensively, we first need to write tests for both
before attempting to combine them.
"""
class Polar(object):
"""
Defines section lift, drag, and pitching moment coefficients as a
function of angle of attack at a particular Reynolds number.
Different parameters may be computed and different corrections applied.
Available routines:
- cl_interp : cl at given alpha values
- cd_interp : cd at given alpha values
- cm_interp : cm at given alpha values
- cn_interp : cn at given alpha values
- f_st_interp : separation function (compared to fully separated polar)
- cl_fs_interp : cl fully separated at given alpha values
- cl_inv_interp : cl at given alpha values
- correction3D : apply 3D rotatational correction
- extrapolate : extend polar data set using Viterna's method
- unsteadyParams : computes unsteady params e.g. needed by AeroDyn15
- plot : plots the polar
- alpha0 : computes and returns alpha0, also stored in _alpha0
- linear_region : determines the alpha and cl values in the linear region
- cl_max : cl_max
- cl_linear_slope : linear slope and the linear region
- cl_fully_separated : fully separated cl
- dynaStallOye_DiscreteStep : compute aerodynamical force from aerodynamic data
"""
def __init__(self, Re, alpha, cl, cd, cm, compute_params=False, radians=None):
"""Constructor
Parameters
----------
Re : float
Reynolds number
alpha : ndarray (deg)
angle of attack
cl : ndarray
lift coefficient
cd : ndarray
drag coefficient
cm : ndarray
moment coefficient
"""
self.Re = Re
self.alpha = np.array(alpha)
self.cl = np.array(cl)
self.cd = np.array(cd)
self.cm = np.array(cm)
self.f_st = None # separation function
self.cl_fs = None # cl_fully separated
self._linear_slope = None
self._alpha0 = None
if radians is None:
# If the max alpha is above pi, most likely we are in degrees
self._radians = np.mean(np.abs(self.alpha)) <= np.pi / 2
else:
self._radians = radians
# NOTE: method needs to be in harmony for linear_slope and the one used in cl_fully_separated
if compute_params:
self._linear_slope, self._alpha0 = self.cl_linear_slope(method="max")
self.cl_fully_separated()
self.cl_inv = self._linear_slope * (self.alpha - self._alpha0)
def cl_interp(self, alpha):
return np.interp(alpha, self.alpha, self.cl)
def cd_interp(self, alpha):
return np.interp(alpha, self.alpha, self.cd)
def cm_interp(self, alpha):
return np.interp(alpha, self.alpha, self.cm)
def cn_interp(self, alpha):
return np.interp(alpha, self.alpha, self.cn)
def f_st_interp(self, alpha):
if self.f_st is None:
self.cl_fully_separated()
return np.interp(alpha, self.alpha, self.f_st)
def cl_fs_interp(self, alpha):
if self.cl_fs is None:
self.cl_fully_separated()
return np.interp(alpha, self.alpha, self.cl_fs)
def cl_inv_interp(self, alpha):
if (self._linear_slope is None) and (self._alpha0 is None):
self._linear_slope, self._alpha0 = self.cl_linear_slope()
return self._linear_slope * (alpha - self._alpha0)
@property
def cn(self):
"""returns : Cl cos(alpha) + Cd sin(alpha)
NOT: Cl cos(alpha) + (Cd-Cd0) sin(alpha)
"""
if self._radians:
return self.cl * np.cos(self.alpha) + self.cd * np.sin(self.alpha)
else:
return self.cl * np.cos(self.alpha * np.pi / 180) + self.cd * np.sin(self.alpha * np.pi / 180)
def correction3D(self, r_over_R, chord_over_r, tsr, alpha_max_corr=30, alpha_linear_min=-5, alpha_linear_max=5):
"""Applies 3-D corrections for rotating sections from the 2-D data.
Parameters
----------
r_over_R : float
local radial position / rotor radius
chord_over_r : float
local chord length / local radial location
tsr : float
tip-speed ratio
alpha_max_corr : float, optional (deg)
maximum angle of attack to apply full correction
alpha_linear_min : float, optional (deg)
angle of attack where linear portion of lift curve slope begins
alpha_linear_max : float, optional (deg)
angle of attack where linear portion of lift curve slope ends
Returns
-------
polar : Polar
A new Polar object corrected for 3-D effects
Notes
-----
The Du-Selig method :cite:`Du1998A-3-D-stall-del` is used to correct lift, and
the Eggers method :cite:`Eggers-Jr2003An-assessment-o` is used to correct drag.
"""
# rename and convert units for convenience
alpha = np.radians(self.alpha)
cl_2d = self.cl
cd_2d = self.cd
alpha_max_corr = np.radians(alpha_max_corr)
alpha_linear_min = np.radians(alpha_linear_min)
alpha_linear_max = np.radians(alpha_linear_max)
# parameters in Du-Selig model
a = 1
b = 1
d = 1
lam = tsr / (1 + tsr ** 2) ** 0.5 # modified tip speed ratio
expon = d / lam / r_over_R
# find linear region
idx = np.logical_and(alpha >= alpha_linear_min, alpha <= alpha_linear_max)
p = np.polyfit(alpha[idx], cl_2d[idx], 1)
m = p[0]
alpha0 = -p[1] / m
# correction factor
fcl = 1.0 / m * (1.6 * chord_over_r / 0.1267 * (a - chord_over_r ** expon) / (b + chord_over_r ** expon) - 1)
# not sure where this adjustment comes from (besides AirfoilPrep spreadsheet of course)
adj = ((np.pi / 2 - alpha) / (np.pi / 2 - alpha_max_corr)) ** 2
adj[alpha <= alpha_max_corr] = 1.0
# Du-Selig correction for lift
cl_linear = m * (alpha - alpha0)
cl_3d = cl_2d + fcl * (cl_linear - cl_2d) * adj
# JPJ 7/20 :
# This drag correction is what differs between airfoilprep's Polar and
# this class. If we use the Du-Selig correction for drag here,
# the `test_stall` results match exactly.
# I'm leaving it as-is so dac.py and other untested scripts are not affected.
# Eggers 2003 correction for drag
delta_cl = cl_3d - cl_2d
delta_cd = delta_cl * (np.sin(alpha) - 0.12 * np.cos(alpha)) / (np.cos(alpha) + 0.12 * np.sin(alpha))
cd_3d = cd_2d + delta_cd
return type(self)(self.Re, np.degrees(alpha), cl_3d, cd_3d, self.cm)
def extrapolate(self, cdmax, AR=None, cdmin=0.001, nalpha=15):
"""Extrapolates force coefficients up to +/- 180 degrees using Viterna's method
:cite:`Viterna1982Theoretical-and`.
Parameters
----------
cdmax : float
maximum drag coefficient
AR : float, optional
aspect ratio = (rotor radius / chord_75% radius)
if provided, cdmax is computed from AR
cdmin: float, optional
minimum drag coefficient. used to prevent negative values that can sometimes occur
with this extrapolation method
nalpha: int, optional
number of points to add in each segment of Viterna method
Returns
-------
polar : Polar
a new Polar object
Notes
-----
If the current polar already supplies data beyond 90 degrees then
this method cannot be used in its current form and will just return itself.
If AR is provided, then the maximum drag coefficient is estimated as
>>> cdmax = 1.11 + 0.018*AR
"""
if cdmin < 0:
raise Exception("cdmin cannot be < 0")
# lift coefficient adjustment to account for assymetry
cl_adj = 0.7
# estimate CD max
if AR is not None:
cdmax = 1.11 + 0.018 * AR
self.cdmax = max(max(self.cd), cdmax)
# extract matching info from ends
alpha_high = np.radians(self.alpha[-1])
cl_high = self.cl[-1]
cd_high = self.cd[-1]
cm_high = self.cm[-1]
alpha_low = np.radians(self.alpha[0])
cl_low = self.cl[0]
cd_low = self.cd[0]
if alpha_high > np.pi / 2:
raise Exception("alpha[-1] > pi/2")
return self
if alpha_low < -np.pi / 2:
raise Exception("alpha[0] < -pi/2")
return self
# parameters used in model
sa = np.sin(alpha_high)
ca = np.cos(alpha_high)
self.A = (cl_high - self.cdmax * sa * ca) * sa / ca ** 2
self.B = (cd_high - self.cdmax * sa * sa) / ca
# alpha_high <-> 90
alpha1 = np.linspace(alpha_high, np.pi / 2, nalpha)
alpha1 = alpha1[1:] # remove first element so as not to duplicate when concatenating
cl1, cd1 = self.__Viterna(alpha1, 1.0)
# 90 <-> 180-alpha_high
alpha2 = np.linspace(np.pi / 2, np.pi - alpha_high, nalpha)
alpha2 = alpha2[1:]
cl2, cd2 = self.__Viterna(np.pi - alpha2, -cl_adj)
# 180-alpha_high <-> 180
alpha3 = np.linspace(np.pi - alpha_high, np.pi, nalpha)
alpha3 = alpha3[1:]
cl3, cd3 = self.__Viterna(np.pi - alpha3, 1.0)
cl3 = (alpha3 - np.pi) / alpha_high * cl_high * cl_adj # override with linear variation
if alpha_low <= -alpha_high:
alpha4 = []
cl4 = []
cd4 = []
alpha5max = alpha_low
else:
# -alpha_high <-> alpha_low
# Note: this is done slightly differently than AirfoilPrep for better continuity
alpha4 = np.linspace(-alpha_high, alpha_low, nalpha)
alpha4 = alpha4[1:-2] # also remove last element for concatenation for this case
cl4 = -cl_high * cl_adj + (alpha4 + alpha_high) / (alpha_low + alpha_high) * (cl_low + cl_high * cl_adj)
cd4 = cd_low + (alpha4 - alpha_low) / (-alpha_high - alpha_low) * (cd_high - cd_low)
alpha5max = -alpha_high
# -90 <-> -alpha_high
alpha5 = np.linspace(-np.pi / 2, alpha5max, nalpha)
alpha5 = alpha5[1:]
cl5, cd5 = self.__Viterna(-alpha5, -cl_adj)
# -180+alpha_high <-> -90
alpha6 = np.linspace(-np.pi + alpha_high, -np.pi / 2, nalpha)
alpha6 = alpha6[1:]
cl6, cd6 = self.__Viterna(alpha6 + np.pi, cl_adj)
# -180 <-> -180 + alpha_high
alpha7 = np.linspace(-np.pi, -np.pi + alpha_high, nalpha)
cl7, cd7 = self.__Viterna(alpha7 + np.pi, 1.0)
cl7 = (alpha7 + np.pi) / alpha_high * cl_high * cl_adj # linear variation
alpha = np.concatenate((alpha7, alpha6, alpha5, alpha4, np.radians(self.alpha), alpha1, alpha2, alpha3))
cl = np.concatenate((cl7, cl6, cl5, cl4, self.cl, cl1, cl2, cl3))
cd = np.concatenate((cd7, cd6, cd5, cd4, self.cd, cd1, cd2, cd3))
cd = np.maximum(cd, cdmin) # don't allow negative drag coefficients
# Setup alpha and cm to be used in extrapolation
cm1_alpha = np.floor(self.alpha[0] / 10.0) * 10.0
cm2_alpha = np.ceil(self.alpha[-1] / 10.0) * 10.0
alpha_num = abs(int((-180.0 - cm1_alpha) / 10.0 - 1))
alpha_cm1 = np.linspace(-180.0, cm1_alpha, alpha_num)
alpha_cm2 = np.linspace(cm2_alpha, 180.0, int((180.0 - cm2_alpha) / 10.0 + 1))
alpha_cm = np.concatenate(
(alpha_cm1, self.alpha, alpha_cm2)
) # Specific alpha values are needed for cm function to work
cm1 = np.zeros(len(alpha_cm1))
cm2 = np.zeros(len(alpha_cm2))
cm_ext = np.concatenate((cm1, self.cm, cm2))
if np.count_nonzero(self.cm) > 0:
cmCoef = self.__CMCoeff(cl_high, cd_high, cm_high) # get cm coefficient
cl_cm = np.interp(alpha_cm, np.degrees(alpha), cl) # get cl for applicable alphas
cd_cm = np.interp(alpha_cm, np.degrees(alpha), cd) # get cd for applicable alphas
alpha_low_deg = self.alpha[0]
alpha_high_deg = self.alpha[-1]
for i in range(len(alpha_cm)):
cm_new = self.__getCM(i, cmCoef, alpha_cm, cl_cm, cd_cm, alpha_low_deg, alpha_high_deg)
if cm_new is None:
pass # For when it reaches the range of cm's that the user provides
else:
cm_ext[i] = cm_new
cm = np.interp(np.degrees(alpha), alpha_cm, cm_ext)
return type(self)(self.Re, np.degrees(alpha), cl, cd, cm)
def __Viterna(self, alpha, cl_adj):
"""private method to perform Viterna extrapolation"""
alpha = np.maximum(alpha, 0.0001) # prevent divide by zero
cl = self.cdmax / 2 * np.sin(2 * alpha) + self.A * np.cos(alpha) ** 2 / np.sin(alpha)
cl = cl * cl_adj
cd = self.cdmax * np.sin(alpha) ** 2 + self.B * np.cos(alpha)
return cl, cd
def __CMCoeff(self, cl_high, cd_high, cm_high):
"""private method to obtain CM0 and CMCoeff"""
found_zero_lift = False
for i in range(len(self.cm) - 1):
if abs(self.alpha[i]) < 20.0 and self.cl[i] <= 0 and self.cl[i + 1] >= 0:
p = -self.cl[i] / (self.cl[i + 1] - self.cl[i])
cm0 = self.cm[i] + p * (self.cm[i + 1] - self.cm[i])
found_zero_lift = True
break
if not found_zero_lift:
p = -self.cl[0] / (self.cl[1] - self.cl[0])
cm0 = self.cm[0] + p * (self.cm[1] - self.cm[0])
self.cm0 = cm0
alpha_high = np.radians(self.alpha[-1])
XM = (-cm_high + cm0) / (cl_high * np.cos(alpha_high) + cd_high * np.sin(alpha_high))
cmCoef = (XM - 0.25) / np.tan((alpha_high - np.pi / 2))
return cmCoef
def __getCM(self, i, cmCoef, alpha, cl_ext, cd_ext, alpha_low_deg, alpha_high_deg):
"""private method to extrapolate Cm"""
cm_new = 0
if alpha[i] >= alpha_low_deg and alpha[i] <= alpha_high_deg:
return
if alpha[i] > -165 and alpha[i] < 165:
if abs(alpha[i]) < 0.01:
cm_new = self.cm0
else:
if alpha[i] > 0:
x = cmCoef * np.tan(np.radians(alpha[i]) - np.pi / 2) + 0.25
cm_new = self.cm0 - x * (
cl_ext[i] * np.cos(np.radians(alpha[i])) + cd_ext[i] * np.sin(np.radians(alpha[i]))
)
else:
x = cmCoef * np.tan(-np.radians(alpha[i]) - np.pi / 2) + 0.25
cm_new = -(
self.cm0
- x * (-cl_ext[i] * np.cos(-np.radians(alpha[i])) + cd_ext[i] * np.sin(-np.radians(alpha[i])))
)
else:
if alpha[i] == 165:
cm_new = -0.4
elif alpha[i] == 170:
cm_new = -0.5
elif alpha[i] == 175:
cm_new = -0.25
elif alpha[i] == 180:
cm_new = 0
elif alpha[i] == -165:
cm_new = 0.35
elif alpha[i] == -170:
cm_new = 0.4
elif alpha[i] == -175:
cm_new = 0.2
elif alpha[i] == -180:
cm_new = 0
else:
print("Angle encountered for which there is no CM table value " "(near +/-180 deg). Program will stop.")
return cm_new
def unsteadyParams(self, window_offset=None):
"""compute unsteady aero parameters used in AeroDyn input file
TODO Questions to solve:
- Is alpha 0 defined at zero lift or zero Cn?
- Are Cn1 and Cn2 the stall points of Cn or the regions where Cn deviates from the linear region?
- Is Cd0 Cdmin?
- Should Cd0 be used in cn?
- Should the TSE points be used?
- If so, should we use the linear points or the points on the cn-curve
- Should we prescribe alpha0cn when determining the slope?
NOTE:
alpha0Cl and alpha0Cn are usually within 0.005 deg of each other, less thatn 0.3% difference, with alpha0Cn > alpha0Cl. The difference increase thought towards the root of the blade
Using the f=0.7 points doesnot change much for the lower point
but it has quite an impact on the upper point
%
Parameters
----------
window_dalpha0: the linear region will be looked for in the region alpha+window_offset
Returns
-------
alpha0 : lift or 0 cn (TODO TODO) angle of attack (deg)
alpha1 : angle of attack at f=0.7 (approximately the stall angle) for AOA>alpha0 (deg)
alpha2 : angle of attack at f=0.7 (approximately the stall angle) for AOA<alpha0 (deg)
cnSlope : slope of 2D normal force coefficient curve (1/rad)
Cn1 : Critical value of C0n at leading edge separation. It should be extracted from airfoil data at a given Mach and Reynolds number. It can be calculated from the static value of Cn at either the break in the pitching moment or the loss of chord force at the onset of stall. It is close to the condition of maximum lift of the airfoil at low Mach numbers.
Cn2 : As Cn1 for negative AOAs.
Cd0 : Drag coefficient at zero lift TODO
Cm0 : Moment coefficient at zero lift TODO
"""
if window_offset is None:
dwin = np.array([-5, 10])
if self._radians:
dwin = np.radians(dwin)
cl = self.cl
cd = self.cd
alpha = self.alpha
if self._radians:
cn = cl * np.cos(alpha) + cd * np.sin(alpha)
else:
cn = cl * np.cos(alpha * np.pi / 180) + cd * np.sin(alpha * np.pi / 180)
# --- Zero lift
alpha0 = self.alpha0()
cd0 = self.cd_interp(alpha0)
cm0 = self.cm_interp(alpha0)
# --- Zero cn
if self._radians:
window = [np.radians(-20), np.radians(20)]
else:
window = [-20, 20]
alpha0cn = _find_alpha0(alpha, cn, window)
# checks for inppropriate data (like cylinders)
if len(np.unique(cl)) == 1:
return (alpha0, 0.0, 0.0, 0.0, 0.0, 0.0, cd0, cm0)
# --- cn "inflection" or "Max" points
# These point are detected from slope changes of cn, positive of negative inflections
# The upper stall point is the first point after alpha0 with a "hat" inflection
# The lower stall point is the first point below alpha0 with a "v" inflection
a_MaxUpp, cn_MaxUpp, a_MaxLow, cn_MaxLow = _find_max_points(alpha, cn, alpha0, method="inflections")
# --- cn slope
# Different method may be used. The max method ensures the the curve is always below its tangent
# Leastsquare fit in the region alpha0cn+window_offset
cnSlope_poly, a0cn_poly = _find_slope(alpha, cn, window=alpha0cn + dwin, method="leastsquare", x0=alpha0cn)
cnSlope_poly, a0cn_poly = _find_slope(alpha, cn, window=alpha0cn + dwin, method="leastsquare")
# Max (KEEP ME)
# cnSlope_max,a0cn_max = _find_slope(alpha, cn, window=[alpha0cn,a_StallUpp], method='max', xi=alpha0cn)
# Optim
# cnSlope_optim,a0cn_optim = _find_slope(alpha, cn, window=[alpha0-5,alpha0+20], method='optim', x0=alpha0cn)
## FiniteDiff
# cnSlope_FD,a0cn_FD = _find_slope(alpha, cn, method='finitediff_1c', xi=alpha0cn)
# slopesRel=np.array([cnSlope_poly,cnSlope_max,cnSlope_optim,cnSlope_FD])*180/np.pi/(2*np.pi)
cnSlope = cnSlope_poly
# --- cn at "stall onset" (Trailling Edge Separation) locations, when cn deviates from the linear region
a_TSELow, a_TSEUpp = _find_TSE_region(alpha, cn, cnSlope, alpha0cn, deviation=0.05)
cn_TSEUpp_lin = cnSlope * (a_TSEUpp - alpha0cn)
cn_TSELow_lin = cnSlope * (a_TSELow - alpha0cn)
cn_TSEUpp = np.interp(a_TSEUpp, alpha, cn)
cn_TSELow = np.interp(a_TSELow, alpha, cn)
# --- cn at points where f=0.7
cn_f = cnSlope * (alpha - alpha0cn) * ((1 + np.sqrt(0.7)) / 2) ** 2
xInter, _ = _intersections(alpha, cn_f, alpha, cn)
if len(xInter) == 3:
a_f07_Upp = xInter[2]
a_f07_Low = xInter[0]
else:
raise Exception("cn_f does not ntersect cn 3 times.")
# alpha1 = abs(xInter[0])
# alpha2 = -abs(xInter[0])
# --- DEBUG plot
# import matplotlib.pyplot as plt
# plt.plot(alpha, cn,label='cn')
# plt.xlim([-50,50])
# plt.ylim([-3,3])
# plt.plot([alpha0-5,alpha0-5] ,[-3,3],'k--')
# plt.plot([alpha0+10,alpha0+10],[-3,3],'k--')
# plt.plot([alpha0,alpha0],[-3,3],'r-')
# plt.plot([alpha0cn,alpha0cn],[-3,3],'b-')
#
# plt.plot(alpha, cn_f,label='cn_f')
# plt.plot(a_f07_Upp,self.cn_interp(a_f07_Upp),'d',label='Cn f07 Up')
# plt.plot(a_f07_Low,self.cn_interp(a_f07_Low),'d',label='Cn f07 Low')
# plt.plot(a_TSEUpp,cn_TSEUpp,'o',label='Cn TSEUp')
# plt.plot(a_TSELow,cn_TSELow,'o',label='Cn TSELow')
# plt.plot(a_TSEUpp,cn_TSEUpp_lin,'+',label='Cn TSEUp lin')
# plt.plot(a_TSELow,cn_TSELow_lin,'+',label='Cn TSELow lin')
# plt.plot(alpha,cnSlope *(alpha-alpha0cn),'--', label ='Linear')
# # plt.plot(a_MaxUpp,cnMaxUpp,'o',label='Cn MaxUp')
# # plt.plot(a_MaxLow,cnMaxLow,'o',label='Cn MaxLow')
# # plt.plot(alpha,cnSlope_poly *(alpha-a0cn_poly),'--', label ='Polyfit '+sSlopes[0])
# # plt.plot(alpha,cnSlope_max *(alpha-a0cn_max),'--', label ='Max '+sSlopes[1])
# # plt.plot(alpha,cnSlope_optim*(alpha-a0cn_optim),'--', label ='Optim '+sSlopes[2])
# # plt.plot(alpha,cnSlope_FD *(alpha-a0cn_FD),'--', label ='FiniteDiff'+sSlopes[3])
# # # plt.plot(alpha , np.pi/180*cnSlope*(alpha-alpha0),label='cn lin')
# # # plt.plot(alpha1, np.pi/180*cnSlope*(alpha1-alpha0),'o',label='cn Stall')
# # # plt.plot(alpha2, np.pi/180*cnSlope*(alpha2-alpha0),'o',label='cn Stall')
# plt.legend()
# mng=plt.get_current_fig_manager()
# mng.full_screen_toggle()
# plt.show()
# raise Exception()
# --- Deciding what we return
# Critical value of C0n at leading edge separation
# cn1 = cn_TSEUpp_lin
# cn2 = cn_TSELow_lin
cn1 = cn_MaxUpp
cn2 = cn_MaxLow
# Alpha at f=0.7
# alpha1= a_TSEUpp
# alpha2= a_TSELow
alpha1 = a_f07_Upp
alpha2 = a_f07_Low
#
if self._radians:
alpha0 = np.degrees(alpha0)
alpha1 = np.degrees(alpha1)
alpha2 = np.degrees(alpha2)
cnSlope = cnSlope
else:
cnSlope = cnSlope * 180 / np.pi
return (alpha0, alpha1, alpha2, cnSlope, cn1, cn2, cd0, cm0)
def plot(self):
"""plot cl/cd/cm polar
Returns
-------
figs : list of figure handles
"""
import matplotlib.pyplot as plt
p = self
figs = []
# plot cl
fig = plt.figure()
figs.append(fig)
ax = fig.add_subplot(111)
plt.plot(p.alpha, p.cl, label="Re = " + str(p.Re / 1e6) + " million")
ax.set_xlabel("angle of attack (deg)")
ax.set_ylabel("lift coefficient")
ax.legend(loc="best")
# plot cd
fig = plt.figure()
figs.append(fig)
ax = fig.add_subplot(111)
ax.plot(p.alpha, p.cd, label="Re = " + str(p.Re / 1e6) + " million")
ax.set_xlabel("angle of attack (deg)")
ax.set_ylabel("drag coefficient")
ax.legend(loc="best")
# plot cm
fig = plt.figure()
figs.append(fig)
ax = fig.add_subplot(111)
ax.plot(p.alpha, p.cm, label="Re = " + str(p.Re / 1e6) + " million")
ax.set_xlabel("angle of attack (deg)")
ax.set_ylabel("moment coefficient")
ax.legend(loc="best")
return figs
def alpha0(self, window=None):
""" Finds alpha0, angle of zero lift """
if window is None:
if self._radians:
window = [np.radians(-20), np.radians(20)]
else:
window = [-20, 20]
window = _alpha_window_in_bounds(self.alpha, window)
# print(window)
# print(self.alpha)
# print(self._radians)
# print(self.cl)
# print(window)
return _find_alpha0(self.alpha, self.cl, window)
def linear_region(self):
slope, alpha0 = self.cl_linear_slope()
alpha_linear_region = np.asarray(_find_TSE_region(self.alpha, self.cl, slope, alpha0, deviation=0.05))
cl_linear_region = (alpha_linear_region - alpha0) * slope
return alpha_linear_region, cl_linear_region, slope, alpha0
def cl_max(self, window=None):
""" Finds cl_max , returns (Cl_max,alpha_max) """
if window is None:
if self._radians:
window = [np.radians(-40), np.radians(40)]
else:
window = [-40, 40]
# Constant case or only one value
if np.all(self.cl == self.cl[0]) or len(self.cl) == 1:
return self.cl, self.alpha
# Ensuring window is within our alpha values
window = _alpha_window_in_bounds(self.alpha, window)
# Finding max within window
iwindow = np.where((self.alpha >= window[0]) & (self.alpha <= window[1]))
alpha = self.alpha[iwindow]
cl = self.cl[iwindow]
i_max = np.argmax(cl)
if i_max == len(iwindow):
raise Exception(
"Max cl is at the window boundary ([{};{}]), increase window (TODO automatically)".format(
window[0], window[1]
)
)
pass
cl_max = cl[i_max]
alpha_cl_max = alpha[i_max]
# alpha_zc,i_zc = _zero_crossings(x=alpha,y=cl,direction='up')
# if len(alpha_zc)>1:
# raise Exception('Cannot find alpha0, {} zero crossings of Cl in the range of alpha values: [{} {}] '.format(len(alpha_zc),window[0],window[1]))
# elif len(alpha_zc)==0:
# raise Exception('Cannot find alpha0, no zero crossing of Cl in the range of alpha values: [{} {}] '.format(window[0],window[1]))
#
# alpha0=alpha_zc[0]
return cl_max, alpha_cl_max
def cl_linear_slope(self, window=None, method="optim", radians=False):
"""Find slope of linear region
Outputs: a 2-tuplet of:
slope (in inverse units of alpha, or in radians-1 if radians=True)
alpha_0 in the same unit as alpha, or in radians if radians=True
"""
# --- Return function
def myret(sl, a0):
# wrapper function to return degrees or radians
if radians:
return np.rad2deg(sl), np.deg2rad(a0)
else:
return sl, a0
# finding our alpha0
alpha0 = self.alpha0()
# Constant case or only one value
if np.all(self.cl == self.cl[0]) or len(self.cl) == 1:
return myret(0, alpha0)
if window is None:
if np.nanmin(self.cl) > 0 or np.nanmax(self.cl) < 0:
window = [self.alpha[0], self.alpha[-1]]
else:
# define a window around alpha0
if self._radians:
window = alpha0 + np.radians(np.array([-5, +20]))
else:
window = alpha0 + np.array([-5, +20])
# Ensuring window is within our alpha values
window = _alpha_window_in_bounds(self.alpha, window)
if method == "max":
slope, off = _find_slope(self.alpha, self.cl, xi=alpha0, window=window, method="max")
elif method == "leastsquare":
slope, off = _find_slope(self.alpha, self.cl, xi=alpha0, window=window, method="leastsquare")
elif method == "leastsquare_constraint":
slope, off = _find_slope(self.alpha, self.cl, x0=alpha0, window=window, method="leastsquare")
elif method == "optim":
# Selecting range of values within window
idx = np.where((self.alpha >= window[0]) & (self.alpha <= window[1]) & ~np.isnan(self.cl))[0]
cl, alpha = self.cl[idx], self.alpha[idx]
# Selecting within the min and max of this window to improve accuracy
imin = np.where(cl == np.min(cl))[0][-1]
idx = np.arange(imin, np.argmax(cl) + 1)
window = [alpha[imin], alpha[np.argmax(cl)]]
cl, alpha = cl[idx], alpha[idx]
# Performing minimization of slope
slope, off = _find_slope(alpha, cl, x0=alpha0, window=None, method="optim")
else:
raise Exception("Method unknown for lift slope determination: {}".format(method))
# --- Safety checks
if len(self.cl) > 10:
# Looking at slope around alpha 0 to see if we are too far off
slope_FD, off_FD = _find_slope(self.alpha, self.cl, xi=alpha0, window=window, method="finitediff_1c")
if abs(slope - slope_FD) / slope_FD * 100 > 20:
raise Exception(
"Warning: More than 20% error between estimated slope ({:.4f}) and the slope around alpha0 ({:.4f}). The window for the slope search ([{} {}]) is likely wrong.".format(
slope, slope_FD, window[0], window[-1]
)
)
# print('slope ',slope,' Alpha range: {:.3f} {:.3f} - nLin {} nMin {} nMax {}'.format(alpha[iStart],alpha[iEnd],len(alpha[iStart:iEnd+1]),nMin,len(alpha)))
return myret(slope, off)
def cl_fully_separated(self):
alpha0 = self.alpha0()
(
cla,
_,
) = self.cl_linear_slope(method="max")
if cla == 0:
cl_fs = self.cl # when f_st ==1
f_st = self.cl * 0
else:
cl_ratio = self.cl / (cla * (self.alpha - alpha0))
cl_ratio[np.where(cl_ratio < 0)] = 0
f_st = (2 * np.sqrt(cl_ratio) - 1) ** 2
f_st[np.where(f_st < 1e-15)] = 0
# Initialize to linear region (in fact only at singularity, where f_st=1)
cl_fs = self.cl / 2.0 # when f_st ==1
# Region where f_st<1, merge
I = np.where(f_st < 1)
cl_fs[I] = (self.cl[I] - cla * (self.alpha[I] - alpha0) * f_st[I]) / (1.0 - f_st[I])
# Outside region, use steady data
iHig = np.ma.argmin(np.ma.MaskedArray(f_st, self.alpha < alpha0))
iLow = np.ma.argmin(np.ma.MaskedArray(f_st, self.alpha > alpha0))
cl_fs[0 : iLow + 1] = self.cl[0 : iLow + 1]
cl_fs[iHig + 1 : -1] = self.cl[iHig + 1 : -1]
# Ensuring everything is in harmony
cl_inv = cla * (self.alpha - alpha0)
f_st = (self.cl - cl_fs) / (cl_inv - cl_fs + 1e-10)
f_st[np.where(f_st < 1e-15)] = 0
# Storing
self.f_st = f_st
self.cl_fs = cl_fs
return cl_fs, f_st
def dynaStallOye_DiscreteStep(self, alpha_t, tau, fs_prev, dt):
# compute aerodynamical force from aerodynamic data
# interpolation from data
f_st = self.f_st_interp(alpha_t)
Clinv = self.cl_inv_interp(alpha_t)
Clfs = self.cl_fs_interp(alpha_t)
# dynamic stall model
fs = f_st + (fs_prev - f_st) * np.exp(-dt / tau)
Cl = fs * Clinv + (1 - fs) * Clfs
return Cl, fs
def blend(pol1, pol2, weight):
"""Blend this polar with another one with the specified weighting
Parameters
----------
pol1: (class Polar or array) first polar
pol2: (class Polar or array) second polar
weight: (float) blending parameter between 0 (first polar) and 1 (second polar)
Returns
-------
polar : (class Polar or array) a blended Polar
"""
bReturnObject = False
if hasattr(pol1, "cl"):
bReturnObject = True
alpha1 = pol1.alpha
M1 = np.zeros((len(alpha1), 4))
M1[:, 0] = pol1.alpha
M1[:, 1] = pol1.cl
M1[:, 2] = pol1.cd
M1[:, 3] = pol1.cm
else:
alpha1 = pol1[:, 0]
M1 = pol1
if hasattr(pol2, "cl"):
bReturnObject = True
alpha2 = pol2.alpha
M2 = np.zeros((len(alpha2), 4))
M2[:, 0] = pol2.alpha
M2[:, 1] = pol2.cl
M2[:, 2] = pol2.cd
M2[:, 3] = pol2.cm
else:
alpha2 = pol2[:, 0]
M2 = pol2
# Define range of alpha, merged values and truncate if one set beyond the other range
alpha = np.union1d(alpha1, alpha2)
min_alpha = max(alpha1.min(), alpha2.min())
max_alpha = min(alpha1.max(), alpha2.max())
alpha = alpha[np.logical_and(alpha >= min_alpha, alpha <= max_alpha)]
# alpha = np.array([a for a in alpha if a >= min_alpha and a <= max_alpha])
# Creating new output matrix to store polar
M = np.zeros((len(alpha), M1.shape[1]))
M[:, 0] = alpha
# interpolate to new alpha and linearly blend
for j in np.arange(1, M.shape[1]):
v1 = np.interp(alpha, alpha1, M1[:, j])
v2 = np.interp(alpha, alpha2, M2[:, j])
M[:, j] = (1 - weight) * v1 + weight * v2
if hasattr(pol1, "Re"):
Re = pol1.Re + weight * (pol2.Re - pol1.Re)
else:
Re = np.nan
if bReturnObject:
return type(pol1)(Re, M[:, 0], M[:, 1], M[:, 2], M[:, 3])
else:
return M
def thicknessinterp_from_one_set(thickness, polarList, polarThickness):
"""Returns a set of interpolated polars from one set of polars at known thicknesses and a list of thickness
The nearest polar is used when the thickness is beyond the range of values of the input polars.
"""
thickness = np.asarray(thickness)
polarThickness = np.asarray(polarThickness)
polarList = np.asarray(polarList)
tmax_in = np.max(thickness)
tmax_pol = np.max(polarThickness)
if (tmax_in > 1.2 and tmax_pol <= 1.2) or (tmax_in <= 1.2 and tmax_pol > 1.2):
raise Exception(
"Thicknesses of polars and input thickness need to be both in percent ([0-120]) or in fraction ([0-1.2])"
)
# sorting thickness
Isort = np.argsort(polarThickness)
polarThickness = polarThickness[Isort]
polarList = polarList[Isort]
polars = []
for it, t in enumerate(thickness):
ihigh = len(polarThickness) - 1
for ip, tp in enumerate(polarThickness):
if tp > t:
ihigh = ip
break
ilow = 0
for ip, tp in reversed(list(enumerate(polarThickness))):
if tp < t:
ilow = ip
break
if ihigh == ilow:
polars.append(polarList[ihigh])
print("[WARN] Using nearest polar for section {}, t={} , t_near={}".format(it, t, polarThickness[ihigh]))
else:
if (polarThickness[ilow] > t) or (polarThickness[ihigh] < t):
raise Exception("Implementation Error")
weight = (t - polarThickness[ilow]) / (polarThickness[ihigh] - polarThickness[ilow])
# print(polarThickness[ilow],'<',t,'<',polarThickness[ihigh],'Weight',weight)
pol = blend(polarList[ilow], polarList[ihigh], weight)
polars.append(pol)
# import matplotlib.pyplot as plt
# fig=plt.figure()
# plt.plot(polarList[ilow][: ,0],polarList[ilow][: ,2],'b',label='thick'+str(polarThickness[ilow]))
# plt.plot(pol[:,0],pol[:,2],'k--',label='thick'+str(t))
# plt.plot(polarList[ihigh][:,0],polarList[ihigh][:,2],'r',label='thick'+str(polarThickness[ihigh]))
# plt.legend()
# plt.show()
return polars
def _alpha_window_in_bounds(alpha, window):
"""Ensures that the window of alpha values is within the bounds of alpha
Example: alpha in [-30,30], window=[-20,20] => window=[-20,20]
Example: alpha in [-10,10], window=[-20,20] => window=[-10,10]
Example: alpha in [-30,30], window=[-40,10] => window=[-40,10]
"""
IBef = np.where(alpha <= window[0])[0]
if len(IBef) > 0:
im = IBef[-1]
else:
im = 0
IAft = np.where(alpha >= window[1])[0]
if len(IAft) > 0:
ip = IAft[0]
else:
ip = len(alpha) - 1
window = [alpha[im], alpha[ip]]
return window
def _find_alpha0(alpha, coeff, window):
"""Finds the point where coeff(alpha)==0 using interpolation.
The search is narrowed to a window that can be specified by the user. The default window is yet enough for cases that make physical sense.
The angle alpha0 is found by looking at a zero up crossing in this window, and interpolation is used to find the exact location.
"""
# Constant case or only one value
if np.all(coeff == coeff[0]) or len(coeff) == 1:
if coeff[0] == 0:
return 0
else:
return np.nan
# Ensuring window is within our alpha values
window = _alpha_window_in_bounds(alpha, window)
# Finding zero up-crossing within window
iwindow = np.where((alpha >= window[0]) & (alpha <= window[1]))
alpha = alpha[iwindow]
coeff = coeff[iwindow]
alpha_zc, i_zc = _zero_crossings(x=alpha, y=coeff, direction="up")
if len(alpha_zc) > 1:
raise Exception(
"Cannot find alpha0, {} zero crossings of Coeff in the range of alpha values: [{} {}] ".format(
len(alpha_zc), window[0], window[1]
)
)
elif len(alpha_zc) == 0:
raise Exception(
"Cannot find alpha0, no zero crossing of Coeff in the range of alpha values: [{} {}] ".format(
window[0], window[1]
)
)
alpha0 = alpha_zc[0]
return alpha0
def _find_TSE_region(alpha, coeff, slope, alpha0, deviation):
"""Find the Trailing Edge Separation points, when the coefficient separates from its linear region
These points are defined as the points where the difference is equal to +/- `deviation`
Typically deviation is about 0.05 (absolute value)
The linear region is defined as coeff_lin = slope (alpha-alpha0)
returns:
a_TSE: values of alpha at the TSE point (upper and lower)
"""
# How off are we from the linear region
DeltaLin = slope * (alpha - alpha0) - coeff
# Upper and lower regions
bUpp = alpha >= alpha0
bLow = alpha <= alpha0
# Finding the point where the delta is equal to `deviation`
a_TSEUpp = np.interp(deviation, DeltaLin[bUpp], alpha[bUpp])
a_TSELow = np.interp(-deviation, DeltaLin[bLow], alpha[bLow])
return a_TSELow, a_TSEUpp
def _find_max_points(alpha, coeff, alpha0, method="inflections"):
"""Find upper and lower max points in `coeff` vector.
if `method` is "inflection":
These point are detected from slope changes of `coeff`, positive of negative inflections
The upper stall point is the first point after alpha0 with a "hat" inflection
The lower stall point is the first point below alpha0 with a "v" inflection
"""
if method == "inflections":
dC = np.diff(coeff)
IHatInflections = np.where(np.logical_and.reduce((dC[1:] < 0, dC[0:-1] > 0, alpha[1:-1] > alpha0)))[0]
IVeeInflections = np.where(np.logical_and.reduce((dC[1:] > 0, dC[0:-1] < 0, alpha[1:-1] < alpha0)))[0]
if len(IHatInflections) <= 0:
raise Exception("Not able to detect upper stall point of curve")
if len(IVeeInflections) <= 0:
raise Exception("Not able to detect lower stall point of curve")
a_MaxUpp = alpha[IHatInflections[0] + 1]
c_MaxUpp = coeff[IHatInflections[0] + 1]
a_MaxLow = alpha[IVeeInflections[-1] + 1]
c_MaxLow = coeff[IVeeInflections[-1] + 1]
else:
raise NotImplementedError()
return (a_MaxUpp, c_MaxUpp, a_MaxLow, c_MaxLow)
# --------------------------------------------------------------------------------}
# --- Generic curve handling functions
# --------------------------------------------------------------------------------{
def _find_slope(x, y, xi=None, x0=None, window=None, method="max", opts=None):
"""Find the slope of a curve at x=xi based on a given method.
INPUTS:
x: array of x values
y: array of y values
xi: point where the slope is to be computed
x0: point where y(x0)=0
if provided the constraint y(x0)=0 is added.
window:
If a `window` is provided the search is restrained to this region of x values.
Typical windows for airfoils are: window=[alpha0,Clmax], or window=[-5,5]+alpha0
If window is None, the whole extent is used (window=[min(x),max(x)])
The methods available are:
'max' : returns the maximum slope within the window. Needs `xi`
'leastsquare': use leastsquare (or polyfit), to fit the curve within the window
'finitediff_1c': first order centered finite difference. Needs `xi`
'optim': find the slope by looking at all possible slope values, and try to find an optimal where the length of linear region is maximized.
returns:
(a,x0): such that the slope is a(x-x0)
(x0=-b/a where y=ax+b)
"""
if window is not None:
I = np.where(np.logical_and(x >= window[0], x <= window[1]))
x = x[I]
y = y[I]
if len(y) <= 0:
raise Exception("Cannot find slope, no data in y (after window selection)")
if len(y) < 4 and method == "optim":
method = "leastsquare"
# print('[WARN] Not enought data to find slope with optim method, using leastsquare')
if method == "max":
if xi is not None:
I = np.nonzero(x - xi)
yi = np.interp(xi, x, y)
a = max((y[I] - yi) / (x[I] - xi))
x0 = xi - yi / a
else:
raise Exception("For now xi needs to be set to find a slope with the max method")
elif method == "finitediff_1c":
# First order centered finite difference
if xi is not None:
im = np.where(x < xi)[0][-1]
dx = x[im + 1] - x[im - 1]
if np.abs(dx) > 1e-7:
a = (y[im + 1] - y[im - 1]) / dx
yi = np.interp(xi, x, y)
x0 = xi - yi / a
else:
a = np.inf
x0 = xi
else:
raise Exception("For now xi needs to be set to find a slope with the finite diff method")
elif method == "leastsquare":
if x0 is not None:
try:
a = np.linalg.lstsq((x - x0).reshape((-1, 1)), y.reshape((-1, 1)), rcond=None)[0][0][0]
except:
a = np.linalg.lstsq((x - x0).reshape((-1, 1)), y.reshape((-1, 1)))[0][0][0]
else:
p = np.polyfit(x, y, 1)
a = p[0]
x0 = -p[1] / a
elif method == "optim":
if opts is None:
nMin = max(3, int(len(x) / 2))
else:
nMin = opts["nMin"]
a, x0, iStart, iEnd = _find_linear_region(x, y, nMin, x0)
else:
raise NotImplementedError()
return a, x0
def _find_linear_region(x, y, nMin, x0=None):
"""Find a linear region by computing all possible slopes for all possible extent.
The objective function tries to minimize the error with the linear slope
and maximize the length of the linear region.
nMin is the mimum number of points to be present in the region
If x0 is provided, the function a*(x-x0) is fitted
returns:
slope :
offset:
iStart: index of start of linear region
iEnd : index of end of linear region
"""
if x0 is not None:
x = x.reshape((-1, 1)) - x0
y = y.reshape((-1, 1))
n = len(x) - nMin + 1
err = np.zeros((n, n)) * np.nan
slp = np.zeros((n, n)) * np.nan
off = np.zeros((n, n)) * np.nan
spn = np.zeros((n, n)) * np.nan
for iStart in range(n):
for j in range(iStart, n):
iEnd = j + nMin
if x0 is not None:
sl = np.linalg.lstsq(x[iStart:iEnd], y[iStart:iEnd], rcond=None)[0][0]
slp[iStart, j] = sl
off[iStart, j] = x0
y_lin = x[iStart:iEnd] * sl
else:
coefs = np.polyfit(x[iStart:iEnd], y[iStart:iEnd], 1)
slp[iStart, j] = coefs[0]
off[iStart, j] = -coefs[1] / coefs[0]
y_lin = x[iStart:iEnd] * coefs[0] + coefs[1]
err[iStart, j] = np.mean((y[iStart:iEnd] - y_lin) ** 2)
spn[iStart, j] = iEnd - iStart
spn = 1 / (spn - nMin + 1)
err = (err) / (np.nanmax(err))
obj = np.multiply(spn, err)
obj = err
(iStart, j) = np.unravel_index(
|
np.nanargmin(obj)
|
numpy.nanargmin
|
"""Training and evaluation script"""
import pickle
import argparse
import numpy as np
import torch
from cnn import YKCNNClassifier
from utils import create_dataloader
from train import train_model
from evaluation import eval_model, accuracy
def cli_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"pickle_path", help="Path to pickle file produced by `process_data.py`"
)
parser.add_argument(
"mode",
default="static",
choices=["static", "non_static", "random"],
)
parser.add_argument("--cv_folds", type=int, default=10)
parser.add_argument("--use_gpu", type=bool, default=False)
return parser.parse_args()
def load_data(pickle_file):
"""
For loading the pickle file created with process_data.py
"""
with open(pickle_file, "rb") as f:
contents = pickle.load(f)
return contents
def get_id_from_sequence(
sequence, word2id, max_sequence_length=56, pad_index=0
):
"""
Transforms sentence into a list of indices. Pad with zeroes.
"""
x = np.zeros(max_sequence_length) + pad_index
index = 0
for word in sequence.split():
if word in word2id:
x[index] = word2id[word]
index += 1
return x
def get_train_test_inds(cv, splits):
"""
Returns training and test indices based on the split
digit stored in the review object
"""
id_split = np.array(splits, dtype=np.int)
bool_mask = id_split == cv
train_inds =
|
np.where(~bool_mask)
|
numpy.where
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.geo.line` defines :class:`Line`.
"""
import numpy
from openquake.hazardlib.geo import geodetic
from openquake.hazardlib.geo import utils
class Line(object):
"""
This class represents a geographical line, which is basically
a sequence of geographical points.
A line is defined by at least one point.
:param points:
The sequence of points defining this line.
:type points:
list of :class:`~openquake.hazardlib.geo.point.Point` instances
"""
def __init__(self, points):
self.points = utils.clean_points(points)
if len(self.points) < 1:
raise ValueError("One point needed to create a line!")
def __eq__(self, other):
"""
>>> from openquake.hazardlib.geo.point import Point
>>> points = [Point(1, 2), Point(3, 4)]; Line(points) == Line(points)
True
>>> Line(points) == Line(list(reversed(points)))
False
"""
return self.points == other.points
def __ne__(self, other):
"""
>>> from openquake.hazardlib.geo.point import Point
>>> Line([Point(1, 2)]) != Line([Point(1, 2)])
False
>>> Line([Point(1, 2)]) != Line([Point(2, 1)])
True
"""
return not self.__eq__(other)
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points.__getitem__(key)
def on_surface(self):
"""
Check if this line is defined on the surface (i.e. all points
are on the surfance, depth=0.0).
:returns bool:
True if this line is on the surface, false otherwise.
"""
return all(point.on_surface() for point in self.points)
def horizontal(self):
"""
Check if this line is horizontal (i.e. all depths of points
are equal).
:returns bool:
True if this line is horizontal, false otherwise.
"""
return all(p.depth == self[0].depth for p in self)
def average_azimuth(self):
"""
Calculate and return weighted average azimuth of all line's segments
in decimal degrees.
Uses formula from
http://en.wikipedia.org/wiki/Mean_of_circular_quantities
>>> from openquake.hazardlib.geo.point import Point as P
>>> '%.1f' % Line([P(0, 0), P(1e-5, 1e-5)]).average_azimuth()
'45.0'
>>> '%.1f' % Line([P(0, 0), P(0, 1e-5), P(1e-5, 1e-5)]).average_azimuth()
'45.0'
>>> line = Line([P(0, 0), P(-2e-5, 0), P(-2e-5, 1.154e-5)])
>>> '%.1f' % line.average_azimuth()
'300.0'
"""
if len(self.points) == 2:
return self.points[0].azimuth(self.points[1])
lons = numpy.array([point.longitude for point in self.points])
lats = numpy.array([point.latitude for point in self.points])
azimuths = geodetic.azimuth(lons[:-1], lats[:-1], lons[1:], lats[1:])
distances = geodetic.geodetic_distance(lons[:-1], lats[:-1],
lons[1:], lats[1:])
azimuths = numpy.radians(azimuths)
# convert polar coordinates to Cartesian ones and calculate
# the average coordinate of each component
avg_x = numpy.mean(distances * numpy.sin(azimuths))
avg_y = numpy.mean(distances * numpy.cos(azimuths))
# find the mean azimuth from that mean vector
azimuth = numpy.degrees(
|
numpy.arctan2(avg_x, avg_y)
|
numpy.arctan2
|
import os
from sys import byteorder
import numpy as np
from psana.psexp.smdreader_manager import SmdReaderManager
from psana.psexp.eventbuilder_manager import EventBuilderManager
from psana.psexp.event_manager import EventManager
from psana.psexp.packet_footer import PacketFooter
from psana.event import Event
from psana.psexp.step import Step
from psana import dgram
from mpi4py import MPI
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
group = comm.Get_group() # This this the world group
# Setting up group communications
# Ex. PS_SMD_NODES=3 mpirun -n 13
# 1 4 7 10
# 0 2 5 8 11
# 3 6 9 12
#-smd_group-
# -bd_main_group-
# color
# 0 0 0 0
# 1 1 1 1
# 2 2 2 2
# bd_main_rank bd_rank
# 0 3 6 9 0 1 2 3
# 1 4 7 10 0 1 2 3
# 2 5 8 11 0 1 2 3
PS_SMD_NODES = int(os.environ.get('PS_SMD_NODES', 1))
smd_group = group.Incl(range(PS_SMD_NODES + 1))
bd_main_group = group.Excl([0])
smd_comm = comm.Create(smd_group)
smd_rank = 0
smd_size = 0
if smd_comm != MPI.COMM_NULL:
smd_rank = smd_comm.Get_rank()
smd_size = smd_comm.Get_size()
bd_main_comm = comm.Create(bd_main_group)
bd_main_rank = 0
bd_main_size = 0
bd_rank = 0
bd_size = 0
color = 0
nodetype = None
if bd_main_comm != MPI.COMM_NULL:
bd_main_rank = bd_main_comm.Get_rank()
bd_main_size = bd_main_comm.Get_size()
# Split bigdata main comm to PS_SMD_NODES groups
color = bd_main_rank % PS_SMD_NODES
bd_comm = bd_main_comm.Split(color, bd_main_rank)
bd_rank = bd_comm.Get_rank()
bd_size = bd_comm.Get_size()
if bd_rank == 0:
nodetype = 'smd'
else:
nodetype = 'bd'
if nodetype is None:
nodetype = 'smd0' # if no nodetype assigned, I must be smd0
class UpdateManager(object):
""" Keeps epics data and their send history. """
def __init__(self, client_size, n_smds):
self.n_smds = n_smds
self.bufs = [bytearray() for i in range(self.n_smds)]
self.send_history = []
# Initialize no. of sent bytes to 0 for evtbuilder
# [[offset_update0, offset_update1, ], [offset_update0, offset_update1, ], ...]
# [ ---------evtbuilder0------------ , ---------evtbuilder1------------ ,
for i in range(1, client_size):
self.send_history.append([0]*self.n_smds)
def extend_buffers(self, views):
for i, view in enumerate(views):
self.bufs[i].extend(view)
def get_buffer(self, client_id):
""" Returns new epics data (if any) for this client
then updates the sent record."""
update_chunk = bytearray()
if self.n_smds: # do nothing if no epics data found
indexed_id = client_id - 1 # rank 0 has no send history.
pf = PacketFooter(self.n_smds)
for i, buf in enumerate(self.bufs):
current_buf = self.bufs[i]
current_offset = self.send_history[indexed_id][i]
current_buf_size = memoryview(current_buf).shape[0]
pf.set_size(i, current_buf_size - current_offset)
update_chunk.extend(current_buf[current_offset:])
self.send_history[indexed_id][i] = current_buf_size
update_chunk.extend(pf.footer)
return update_chunk
class Smd0(object):
""" Sends blocks of smds to smd_node
Identifies limit timestamp of the slowest detector then
sends all smds within that timestamp to an smd_node.
"""
def __init__(self, run):
self.smdr_man = SmdReaderManager(run.smd_dm.fds, run.max_events)
self.run = run
self.epics_man = UpdateManager(smd_size, self.run.epics_store.n_files)
self.run_mpi()
def run_mpi(self):
rankreq = np.empty(1, dtype='i')
for (smd_chunk, update_chunk) in self.smdr_man.chunks():
# Creates a chunk from smd and epics data to send to SmdNode
# Anatomy of a chunk (pf=packet_footer):
# [ [smd0][smd1][smd2][pf] ][ [epics0][epics1][epics2][pf] ][ pf ]
# ----- smd_chunk ------ ---------epics_chunk-------
# -------------------------- chunk ------------------------------
# Read new epics data as available in the queue
# then send only unseen portion of data to the evtbuilder rank.
update_pf = PacketFooter(view=update_chunk)
self.epics_man.extend_buffers(update_pf.split_packets())
smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
epics_chunk = self.epics_man.get_buffer(rankreq[0])
pf = PacketFooter(2)
pf.set_size(0, memoryview(smd_chunk).shape[0])
pf.set_size(1, memoryview(epics_chunk).shape[0])
chunk = smd_chunk + epics_chunk + pf.footer
smd_comm.Send(chunk, dest=rankreq[0])
for i in range(PS_SMD_NODES):
smd_comm.Recv(rankreq, source=MPI.ANY_SOURCE)
smd_comm.Send(bytearray(), dest=rankreq[0])
class SmdNode(object):
"""Handles both smd_0 and bd_nodes
Receives blocks of smds from smd_0 then assembles
offsets and dgramsizes into a numpy array. Sends
this np array to bd_nodes that are registered to it."""
def __init__(self, run):
self.n_bd_nodes = bd_comm.Get_size() - 1
self.run = run
self.epics_man = UpdateManager(bd_size, self.run.epics_store.n_files)
self._update_dgram_pos = 0 # bookkeeping for running in scan mode
def pack(self, *args):
pf = PacketFooter(len(args))
batch = bytearray()
for i, arg in enumerate(args):
pf.set_size(i, memoryview(arg).shape[0])
batch += arg
batch += pf.footer
return batch
def run_mpi(self):
rankreq =
|
np.empty(1, dtype='i')
|
numpy.empty
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the TensorBox functional API in pennylane.math.fn
"""
import itertools
import numpy as onp
import pytest
import pennylane as qml
from pennylane import numpy as np
from pennylane.math import fn
tf = pytest.importorskip("tensorflow", minversion="2.1")
torch = pytest.importorskip("torch")
class TestGetMultiTensorbox:
"""Tests for the _get_multi_tensorbox utility function"""
def test_exception_tensorflow_and_torch(self):
"""Test that an exception is raised if the sequence of tensors contains
tensors from incompatible dispatch libraries"""
x = tf.Variable([1.0, 2.0, 3.0])
y = onp.array([0.5, 0.1])
z = torch.tensor([0.6])
with pytest.raises(ValueError, match="Tensors contain mixed types"):
fn._get_multi_tensorbox([x, y, z])
def test_warning_tensorflow_and_autograd(self):
"""Test that a warning is raised if the sequence of tensors contains
both tensorflow and autograd tensors."""
x = tf.Variable([1.0, 2.0, 3.0])
y = np.array([0.5, 0.1])
with pytest.warns(UserWarning, match="Consider replacing Autograd with vanilla NumPy"):
fn._get_multi_tensorbox([x, y])
def test_warning_torch_and_autograd(self):
"""Test that a warning is raised if the sequence of tensors contains
both torch and autograd tensors."""
x = torch.tensor([1.0, 2.0, 3.0])
y = np.array([0.5, 0.1])
with pytest.warns(UserWarning, match="Consider replacing Autograd with vanilla NumPy"):
fn._get_multi_tensorbox([x, y])
def test_return_tensorflow_box(self):
"""Test that TensorFlow is correctly identified as the dispatching library."""
x = tf.Variable([1.0, 2.0, 3.0])
y = onp.array([0.5, 0.1])
res = fn._get_multi_tensorbox([y, x])
assert res.interface == "tf"
def test_return_torch_box(self):
"""Test that Torch is correctly identified as the dispatching library."""
x = torch.tensor([1.0, 2.0, 3.0])
y = onp.array([0.5, 0.1])
res = fn._get_multi_tensorbox([y, x])
assert res.interface == "torch"
def test_return_autograd_box(self):
"""Test that autograd is correctly identified as the dispatching library."""
x = np.array([1.0, 2.0, 3.0])
y = [0.5, 0.1]
res = fn._get_multi_tensorbox([y, x])
assert res.interface == "autograd"
def test_return_numpy_box(self):
"""Test that NumPy is correctly identified as the dispatching library."""
x = onp.array([1.0, 2.0, 3.0])
y = [0.5, 0.1]
res = fn._get_multi_tensorbox([y, x])
assert res.interface == "numpy"
test_abs_data = [
(1, -2, 3 + 4j),
[1, -2, 3 + 4j],
onp.array([1, -2, 3 + 4j]),
np.array([1, -2, 3 + 4j]),
torch.tensor([1, -2, 3 + 4j], dtype=torch.complex128),
tf.Variable([1, -2, 3 + 4j], dtype=tf.complex128),
tf.constant([1, -2, 3 + 4j], dtype=tf.complex128),
]
@pytest.mark.parametrize("t", test_abs_data)
def test_abs(t):
"""Test that the absolute function works for a variety
of input"""
res = fn.abs_(t)
assert fn.allequal(res, [1, 2, 5])
test_data = [
(1, 2, 3),
[1, 2, 3],
onp.array([1, 2, 3]),
np.array([1, 2, 3]),
torch.tensor([1, 2, 3]),
tf.Variable([1, 2, 3]),
tf.constant([1, 2, 3]),
]
@pytest.mark.parametrize("t1,t2", list(itertools.combinations(test_data, r=2)))
def test_allequal(t1, t2):
"""Test that the allequal function works for a variety of inputs."""
res = fn.allequal(t1, t2)
if isinstance(t1, tf.Variable):
t1 = tf.convert_to_tensor(t1)
if isinstance(t2, tf.Variable):
t2 = tf.convert_to_tensor(t2)
expected = all(float(x) == float(y) for x, y in zip(t1, t2))
assert res == expected
@pytest.mark.parametrize("t1,t2", list(itertools.combinations(test_data, r=2)))
def test_allclose(t1, t2):
"""Test that the allclose function works for a variety of inputs."""
res = fn.allclose(t1, t2)
if isinstance(t1, tf.Variable):
t1 = tf.convert_to_tensor(t1)
if isinstance(t2, tf.Variable):
t2 = tf.convert_to_tensor(t2)
expected = all(float(x) == float(y) for x, y in zip(t1, t2))
assert res == expected
test_angle_data = [
[1.0, 1.0j, 1+1j],
[1.0, 1.0j, 1+1j],
onp.array([1.0, 1.0j, 1+1j]),
np.array([1.0, 1.0j, 1+1j]),
torch.tensor([1.0, 1.0j, 1+1j], dtype=torch.complex128),
tf.Variable([1.0, 1.0j, 1+1j], dtype=tf.complex128),
tf.constant([1.0, 1.0j, 1+1j], dtype=tf.complex128),
]
@pytest.mark.parametrize("t", test_angle_data)
def test_angle(t):
"""Test that the angle function works for a variety
of input"""
res = fn.angle(t)
assert fn.allequal(res, [0, np.pi / 2, np.pi / 4])
test_arcsin_data = [
(1, 0.2, -0.5),
[1, 0.2, -0.5],
onp.array([1, 0.2, -0.5]),
np.array([1, 0.2, -0.5]),
torch.tensor([1, 0.2, -0.5], dtype=torch.float64),
tf.Variable([1, 0.2, -0.5], dtype=tf.float64),
tf.constant([1, 0.2, -0.5], dtype=tf.float64),
]
@pytest.mark.parametrize("t", test_arcsin_data)
def test_arcsin(t):
"""Test that the arcsin function works for a variety
of input"""
res = fn.arcsin(t)
assert fn.allequal(res, np.arcsin([1, 0.2, -0.5]))
class TestCast:
"""Tests for the cast function"""
@pytest.mark.parametrize("t", test_data)
def test_cast_numpy(self, t):
"""Test that specifying a NumPy dtype results in proper casting
behaviour"""
res = fn.cast(t, onp.float64)
assert fn.get_interface(res) == fn.get_interface(t)
if hasattr(res, "numpy"):
# if tensorflow or pytorch, extract view of underlying data
res = res.numpy()
t = t.numpy()
assert onp.issubdtype(onp.asarray(t).dtype, onp.integer)
assert res.dtype.type is onp.float64
@pytest.mark.parametrize("t", test_data)
def test_cast_numpy_dtype(self, t):
"""Test that specifying a NumPy dtype object results in proper casting
behaviour"""
res = fn.cast(t, onp.dtype("float64"))
assert fn.get_interface(res) == fn.get_interface(t)
if hasattr(res, "numpy"):
# if tensorflow or pytorch, extract view of underlying data
res = res.numpy()
t = t.numpy()
assert onp.issubdtype(onp.asarray(t).dtype, onp.integer)
assert res.dtype.type is onp.float64
@pytest.mark.parametrize("t", test_data)
def test_cast_numpy_string(self, t):
"""Test that specifying a NumPy dtype via a string results in proper casting
behaviour"""
res = fn.cast(t, "float64")
assert fn.get_interface(res) == fn.get_interface(t)
if hasattr(res, "numpy"):
# if tensorflow or pytorch, extract view of underlying data
res = res.numpy()
t = t.numpy()
assert onp.issubdtype(onp.asarray(t).dtype, onp.integer)
assert res.dtype.type is onp.float64
def test_cast_tensorflow_dtype(self):
"""If the tensor is a TensorFlow tensor, casting using a TensorFlow dtype
will also work"""
t = tf.Variable([1, 2, 3])
res = fn.cast(t, tf.complex128)
assert isinstance(res, tf.Tensor)
assert res.dtype is tf.complex128
def test_cast_torch_dtype(self):
"""If the tensor is a Torch tensor, casting using a Torch dtype
will also work"""
t = torch.tensor([1, 2, 3], dtype=torch.int64)
res = fn.cast(t, torch.float64)
assert isinstance(res, torch.Tensor)
assert res.dtype is torch.float64
cast_like_test_data = [
(1, 2, 3),
[1, 2, 3],
onp.array([1, 2, 3], dtype=onp.int64),
np.array([1, 2, 3], dtype=np.int64),
torch.tensor([1, 2, 3], dtype=torch.int64),
tf.Variable([1, 2, 3], dtype=tf.int64),
tf.constant([1, 2, 3], dtype=tf.int64),
(1.0, 2.0, 3.0),
[1.0, 2.0, 3.0],
onp.array([1, 2, 3], dtype=onp.float64),
np.array([1, 2, 3], dtype=np.float64),
torch.tensor([1, 2, 3], dtype=torch.float64),
tf.Variable([1, 2, 3], dtype=tf.float64),
tf.constant([1, 2, 3], dtype=tf.float64),
]
@pytest.mark.parametrize("t1,t2", list(itertools.combinations(cast_like_test_data, r=2)))
def test_cast_like(t1, t2):
"""Test that casting t1 like t2 results in t1 being cast to the same datatype as t2"""
res = fn.cast_like(t1, t2)
# if tensorflow or pytorch, extract view of underlying data
if hasattr(res, "numpy"):
res = res.numpy()
if hasattr(t2, "numpy"):
t2 = t2.numpy()
assert fn.allequal(res, t1)
assert onp.asarray(res).dtype.type is
|
onp.asarray(t2)
|
numpy.asarray
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.lines import Line2D
class Casino:
def __init__(self):
self.player = Player()
self.sum = np.array([0] * 10)
self.usable_ace = {"exist": False, "pos": -1, "flipped_pos": -1}
self.threshold = 17
self.cards = np.append([1.0 / 13.0] * 9, (4.0 / 13.0))
def clear_hand_n_sum(self):
self.sum = np.array([0] * 10)
self.usable_ace = \
{"exist": False, "pos": -1, "flipped_pos": -1}
def get_card(self):
card = int(
|
np.random.choice(10, 1, p=self.cards)
|
numpy.random.choice
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import Model
from tensorflow.python.ops.rnn_cell import LSTMCell
import numpy as np
from tests.utils import CustomTestCase
class LayerNode_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_net1(self):
print('-' * 20, 'test_net1', '-' * 20)
def get_model(input_shape):
ni = Input(input_shape)
nii = Conv2d(32, filter_size=(3, 3), strides=(1, 1), name='conv1')(ni)
nn = Dropout(keep=0.9, name='drop1')(nii)
conv = Conv2d(32, filter_size=(3, 3), strides=(1, 1), name='conv2')
tt = conv(nn) # conv2_node_0
nn = conv(nn) # conv2_node_1
# a branch
na = Conv2d(64, filter_size=(3, 3), strides=(1, 1), name='conv3')(nn)
na = MaxPool2d(name='pool1')(na)
# b branch
nb = MaxPool2d(name='pool2')(nn)
nb = conv(nb) # conv2_node_2
out = Concat(name='concat')([na, nb])
M = Model(inputs=ni, outputs=[out, nn, nb])
gg = conv(nii) # this node will not be added since model fixed
return M
net = get_model([None, 24, 24, 3])
for k, v in enumerate(net._node_by_depth):
print(k, [x.name for x in v], [x.in_tensors_idxes for x in v])
all_node_names = []
for k, v in enumerate(net._node_by_depth):
all_node_names.extend([x.name for x in v])
self.assertNotIn('conv2_node_0', all_node_names)
self.assertNotIn('conv2_node_3', all_node_names)
self.assertEqual(len(net.all_layers), 8)
print(net.all_layers)
data = np.random.normal(size=[2, 24, 24, 3]).astype(np.float32)
out, nn, nb = net(data, is_train=True)
self.assertEqual(nn.shape, [2, 24, 24, 32])
self.assertEqual(nb.shape, [2, 12, 12, 32])
def test_net2(self):
print('-' * 20, 'test_net2', '-' * 20)
def get_unstack_model(input_shape):
ni = Input(input_shape)
nn = Dropout(keep=0.9)(ni)
a, b, c = UnStack(axis=-1)(nn)
b = Flatten()(b)
b = Dense(10)(b)
c = Flatten()(c)
M = Model(inputs=ni, outputs=[a, b, c])
return M
net = get_unstack_model([None, 24, 24, 3])
for k, v in enumerate(net._node_by_depth):
print(k, [x.name for x in v], [x.in_tensors_idxes for x in v])
data = np.random.normal(size=[2, 24, 24, 3]).astype(np.float32)
out = net(data, is_train=True)
self.assertEqual(len(out), 3)
def test_word2vec(self):
print('-' * 20, 'test_word2vec', '-' * 20)
def get_word2vec():
vocabulary_size = 800
batch_size = 10
embedding_size = 60
num_sampled = 25
inputs = tl.layers.Input([batch_size], dtype=tf.int32)
labels = tl.layers.Input([batch_size, 1], dtype=tf.int32)
emb_net = tl.layers.Word2vecEmbedding(
vocabulary_size=vocabulary_size,
embedding_size=embedding_size,
num_sampled=num_sampled,
activate_nce_loss=True, # nce loss is activated
nce_loss_args={},
E_init=tl.initializers.random_uniform(minval=-1.0, maxval=1.0),
nce_W_init=tl.initializers.truncated_normal(stddev=float(1.0 /
|
np.sqrt(embedding_size)
|
numpy.sqrt
|
#
# Solver class using sundials with the KLU sparse linear solver
#
import pybamm
import numpy as np
import scipy.sparse as sparse
import importlib
idaklu_spec = importlib.util.find_spec("idaklu")
if idaklu_spec is not None:
idaklu = importlib.util.module_from_spec(idaklu_spec)
idaklu_spec.loader.exec_module(idaklu)
def have_idaklu():
return idaklu_spec is not None
class IDAKLUSolver(pybamm.BaseSolver):
"""Solve a discretised model, using sundials with the KLU sparse linear solver.
Parameters
----------
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
root_method : str, optional
The method to use to find initial conditions (default is "lm")
root_tol : float, optional
The tolerance for the initial-condition solver (default is 1e-8).
max_steps: int, optional
The maximum number of steps the solver will take before terminating
(default is 1000).
"""
def __init__(
self, rtol=1e-6, atol=1e-6, root_method="casadi", root_tol=1e-6, max_steps=1000
):
if idaklu_spec is None:
raise ImportError("KLU is not installed")
super().__init__("ida", rtol, atol, root_method, root_tol, max_steps)
self.name = "IDA KLU solver"
def set_atol_by_variable(self, variables_with_tols, model):
"""
A method to set the absolute tolerances in the solver by state variable.
This method attaches a vector of tolerance to the model. (i.e. model.atol)
Parameters
----------
variables_with_tols : dict
A dictionary with keys that are strings indicating the variable you
wish to set the tolerance of and values that are the tolerances.
model : :class:`pybamm.BaseModel`
The model that is going to be solved.
"""
size = model.concatenated_initial_conditions.size
atol = self._check_atol_type(self._atol, size)
for var, tol in variables_with_tols.items():
variable = model.variables[var]
if isinstance(variable, pybamm.StateVector):
atol = self.set_state_vec_tol(atol, variable, tol)
elif isinstance(variable, pybamm.Concatenation):
for child in variable.children:
if isinstance(child, pybamm.StateVector):
atol = self.set_state_vec_tol(atol, child, tol)
else:
raise pybamm.SolverError(
"""Can only set tolerances for state variables
or concatenations of state variables"""
)
else:
raise pybamm.SolverError(
"""Can only set tolerances for state variables or
concatenations of state variables"""
)
model.atol = atol
def set_state_vec_tol(self, atol, state_vec, tol):
"""
A method to set the tolerances in the atol vector of a specific
state variable. This method modifies self._atol
Parameters
----------
state_vec : :class:`pybamm.StateVector`
The state vector to apply to the tolerance to
tol: float
The tolerance value
"""
slices = state_vec.y_slices[0]
atol[slices] = tol
return atol
def _check_atol_type(self, atol, size):
"""
This method checks that the atol vector is of the right shape and
type.
Parameters
----------
atol: double or np.array or list
Absolute tolerances. If this is a vector then each entry corresponds to
the absolute tolerance of one entry in the state vector.
size: int
The length of the atol vector
"""
if isinstance(atol, float):
atol = atol * np.ones(size)
elif isinstance(atol, list):
atol =
|
np.array(atol)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], self.df_raw0[14], axis=0)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(w = 2*np.pi*self.df_raw.f) #creats a new coloumn with the angular frequency
#Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ['none','none'] and cycle == 'off':
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])
elif mask == ['none','none'] and cycle != 'off':
for i in range(len(cycle)):
self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle
elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])
else:
print('__init__ error (#2)')
def Lin_KK(self, num_RC='auto', legend='on', plot='residuals', bode='off', nyq_xlim='none', nyq_ylim='none', weight_func='Boukamp', savefig='none'):
'''
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
'''
if num_RC == 'auto':
print('cycle || No. RC-elements || u')
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) #needed for self.KK_R
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC_sort.insert(0,0) #needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )
self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)
self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0] #Creates intial guesses for R's
self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) ) #maxfev=99
self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names
self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)
self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]
self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]] #assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)
self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)
self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))
else:
print('['+str(i+1)+']'+' '+str(self.number_RC[i]),' '+str(np.round(self.KK_u[i],2)))
elif num_RC != 'auto': #hardwired number of RC-elements/decade
print('cycle || u')
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC0[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC0.insert(0,0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values
print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))
else:
print('num_RC incorrectly defined')
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(KK_RC2(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(KK_RC3(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(KK_RC4(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(KK_RC5(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(KK_RC6(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(KK_RC7(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(KK_RC8(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(KK_RC9(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(KK_RC10(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(KK_RC11(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(KK_RC12(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(KK_RC13(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(KK_RC14(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(KK_RC15(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(KK_RC16(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(KK_RC17(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(KK_RC18(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(KK_RC19(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(KK_RC20(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(KK_RC21(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(KK_RC22(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(KK_RC23(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(KK_RC24(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(KK_RC25(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(KK_RC26(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(KK_RC27(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(KK_RC28(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(KK_RC29(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(KK_RC30(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(KK_RC31(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(KK_RC32(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(KK_RC33(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(KK_RC34(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(KK_RC35(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(KK_RC36(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(KK_RC37(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(KK_RC38(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(KK_RC39(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(KK_RC40(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(KK_RC41(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(KK_RC42(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(KK_RC43(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(KK_RC44(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(KK_RC45(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(KK_RC46(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(KK_RC47(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(KK_RC48(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(KK_RC49(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(KK_RC50(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(KK_RC51(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(KK_RC52(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(KK_RC53(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(KK_RC54(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(KK_RC55(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(KK_RC56(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(KK_RC57(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(KK_RC58(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(KK_RC59(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(KK_RC60(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(KK_RC61(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(KK_RC62(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(KK_RC63(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(KK_RC64(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(KK_RC65(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(KK_RC66(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(KK_RC67(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(KK_RC68(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(KK_RC69(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(KK_RC70(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(KK_RC71(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(KK_RC72(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(KK_RC73(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(KK_RC74(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(KK_RC75(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(KK_RC76(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(KK_RC77(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(KK_RC78(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(KK_RC79(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(KK_RC80(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
else:
print('RC simulation circuit not defined')
print(' Number of RC = ', self.number_RC)
self.KK_rr_re.append(residual_real(re=self.df[i].re, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the real part
self.KK_rr_im.append(residual_imag(im=self.df[i].im, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
if plot == 'w_data':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', alpha=.7, label=self.label_cycleno[i])
### Bode Plot
if bode == 'on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1], marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])
ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1], marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9], color='k', fontweight='bold')
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9], color='k', fontweight='bold')
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### Illustrating residuals only
elif plot == 'residuals':
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax = fig.add_subplot(231)
ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
#cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 6 Cycles
elif len(self.df) == 6:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 7 Cycles
elif len(self.df) == 7:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:
ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 8 Cycles
elif len(self.df) == 8:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
ax8 = fig.add_subplot(338)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 8
ax8.plot(np.log10(self.df[7].f), self.KK_rr_re[7]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax8.plot(np.log10(self.df[7].f), self.KK_rr_im[7]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax8.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax8.legend(loc='best', fontsize=10, frameon=False)
ax8.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(
|
np.log10(self.df[0].f)
|
numpy.log10
|
import sys
from copy import deepcopy
import pytest
import numpy as np
from numpy.random import random, randint
from flare import env, struc, gp
from flare.kernels.mc_sephyps import _str_to_kernel as stk
from flare.kernels.utils import from_mask_to_args, str_to_kernel_set
from .fake_gp import generate_hm, generate_envs
def test_force_en_multi_vs_simple():
"""Check that the analytical kernel matches the one implemented
in mc_simple.py"""
cutoffs = np.ones(3, dtype=np.float64)
delta = 1e-8
env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(cutoffs, delta)
# set hyperparameters
d1 = 1
d2 = 2
tol = 1e-4
hyps, hm, cut = generate_hm(1, 1, cutoffs, False)
# mc_simple
kernel0, kg0, en_kernel0, force_en_kernel0 = str_to_kernel_set("2+3+mb+mc", False)
hyps = np.ones(7, dtype=np.float64)
args0 = (hyps, cutoffs)
# mc_sephyps
kernel, kg, en_kernel, force_en_kernel = str_to_kernel_set("2+3+mb+mc", True)
args1 = from_mask_to_args(hyps, hm, cutoffs)
funcs = [[kernel0, kg0, en_kernel0, force_en_kernel0],
[kernel, kg, en_kernel, force_en_kernel]]
i = 0
reference = funcs[0][i](env1_1, env2_1, d1, d2, *args0)
result = funcs[1][i](env1_1, env2_1, d1, d2, *args1)
assert(np.isclose(reference, result, atol=tol))
i = 1
reference = funcs[0][i](env1_1, env2_1, d1, d2, *args0)
result = funcs[1][i](env1_1, env2_1, d1, d2, *args1)
assert(np.isclose(reference[0], result[0], atol=tol))
assert(np.isclose(reference[1], result[1], atol=tol).all())
i = 2
reference = funcs[0][i](env1_1, env2_1, *args0)
result = funcs[1][i](env1_1, env2_1, *args1)
assert(np.isclose(reference, result, atol=tol))
i = 3
reference = funcs[0][i](env1_1, env2_1, d1, *args0)
result = funcs[1][i](env1_1, env2_1, d1, *args1)
assert(np.isclose(reference, result, atol=tol))
@pytest.mark.parametrize('kernel_name, nbond, ntriplet, constraint',
[ ('two_body_mc', 2, 0, True),
('two_body_mc', 2, 0, False),
('three_body_mc', 0, 2, True),
('three_body_mc', 0, 2, False),
('two_plus_three_mc', 2, 2, True),
('two_plus_three_mc', 2, 2, False) ]
)
def test_force_en(kernel_name, nbond, ntriplet, constraint):
"""Check that the analytical force/en kernel matches finite difference of
energy kernel."""
cutoffs = np.array([1, 1])
delta = 1e-8
env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(cutoffs, delta)
# set hyperparameters
d1 = 1
hyps, hm, cut = generate_hm(nbond, ntriplet, cutoffs, constraint)
args0 = from_mask_to_args(hyps, hm, cutoffs)
force_en_kernel = stk[kernel_name+"_force_en"]
en_kernel = stk[kernel_name+"_en"]
if bool('two' in kernel_name) != bool('three' in kernel_name):
# check force kernel
calc1 = en_kernel(env1_2, env2_1, *args0)
calc2 = en_kernel(env1_1, env2_1, *args0)
kern_finite_diff = (calc1 - calc2) / delta
if ('two' in kernel_name):
kern_finite_diff /= 2
else:
kern_finite_diff /= 3
else:
en2_kernel = stk['two_body_mc_en']
en3_kernel = stk['three_body_mc_en']
# check force kernel
hm2 = deepcopy(hm)
hm3 = deepcopy(hm)
if ('map' in hm):
hm2['original'] = np.hstack([hm2['original'][0:nbond*2], hm2['original'][-1]])
hm2['map'] = np.array([1, 3, 4])
hm3['original'] = hm3['original'][nbond*2:]
hm3['map'] = np.array([1, 3, 4])
nbond = 1
hm2['ntriplet']=0
hm3['nbond']=0
args2 = from_mask_to_args(hyps[0:nbond*2], hm2, cutoffs)
calc1 = en2_kernel(env1_2, env2_1, *args2)
calc2 = en2_kernel(env1_1, env2_1, *args2)
kern_finite_diff = (calc1 - calc2) / 2.0 / delta
args3 = from_mask_to_args(hyps[nbond*2:-1], hm3, cutoffs)
calc1 = en3_kernel(env1_2, env2_1, *args3)
calc2 = en3_kernel(env1_1, env2_1, *args3)
kern_finite_diff += (calc1 - calc2) / 3.0 / delta
kern_analytical = force_en_kernel(env1_1, env2_1, d1, *args0)
tol = 1e-4
assert(np.isclose(-kern_finite_diff, kern_analytical, atol=tol))
@pytest.mark.parametrize('kernel_name, nbond, ntriplet, constraint',
[ ('two_body_mc', 2, 0, True),
('two_body_mc', 2, 0, False),
('three_body_mc', 0, 2, True),
('three_body_mc', 0, 2, False),
('two_plus_three_mc', 2, 2, True),
('two_plus_three_mc', 2, 2, False) ]
)
def test_force(kernel_name, nbond, ntriplet, constraint):
"""Check that the analytical force kernel matches finite difference of
energy kernel."""
# create env 1
delta = 1e-5
cutoffs = np.array([1, 1])
env1_1, env1_2, env1_3, env2_1, env2_2, env2_3 = generate_envs(cutoffs, delta)
# set hyperparameters
hyps, hm, cut = generate_hm(nbond, ntriplet, cutoffs, constraint)
args0 = from_mask_to_args(hyps, hm, cutoffs)
d1 = 1
d2 = 2
kernel = stk[kernel_name]
if bool('two' in kernel_name) != bool('three' in kernel_name):
en_kernel = stk[kernel_name+"_en"]
else:
en_kernel = stk['two_plus_three_mc_en']
# check force kernel
calc1 = en_kernel(env1_2, env2_2, *args0)
calc2 = en_kernel(env1_3, env2_3, *args0)
calc3 = en_kernel(env1_2, env2_3, *args0)
calc4 = en_kernel(env1_3, env2_2, *args0)
kern_finite_diff = (calc1 + calc2 - calc3 - calc4) / (4*delta**2)
kern_analytical = kernel(env1_1, env2_1,
d1, d2, *args0)
tol = 1e-4
assert(np.isclose(kern_finite_diff, kern_analytical, atol=tol))
@pytest.mark.parametrize('kernel_name, nbond, ntriplet, constraint',
[ ('two_body_mc', 2, 0, True),
('two_body_mc', 2, 0, False),
('three_body_mc', 0, 2, True),
('three_body_mc', 0, 2, False),
('two_plus_three_mc', 2, 2, True),
('two_plus_three_mc', 2, 2, False) ]
)
def test_hyps_grad(kernel_name, nbond, ntriplet, constraint):
np.random.seed(0)
delta = 1e-8
cutoffs =
|
np.array([1, 1])
|
numpy.array
|
"""
Tests for saving helper.
"""
import os
import unittest
import numpy as np
import pytest
pd = pytest.importorskip("pandas")
tables = pytest.importorskip("tables")
from gpso.saving_helper import (
ALL_RUNS_KEY,
EXTRAS_KEY,
RUN_PREFIX,
TableSaver,
table_reader,
)
from gpso.utils import H5_EXT
class TestTableSaver(unittest.TestCase):
FILENAME = "test"
EXTRAS = {
"a": "b",
"c": 12,
"d": 0.54e-4,
"e": [1, 2, 3],
"f": np.array([1, 2, 3]),
}
def test_init(self):
saver = TableSaver(filename=self.FILENAME)
saver.close()
self.assertTrue(os.path.exists(self.FILENAME + H5_EXT))
os.remove(self.FILENAME + H5_EXT)
def test_write_extras(self):
saver = TableSaver(filename=self.FILENAME, extras=self.EXTRAS)
saver.close()
# test saved extras
saved = tables.open_file(self.FILENAME + H5_EXT)
for key, value in self.EXTRAS.items():
saved_val = saved.root[EXTRAS_KEY][key].read()
if isinstance(saved_val, bytes):
saved_val = saved_val.decode()
if isinstance(saved_val, np.ndarray):
np.testing.assert_equal(value, saved_val)
else:
self.assertEqual(value, saved_val)
# proper exit
saved.close()
os.remove(self.FILENAME + H5_EXT)
def test_write_single_result(self):
np.random.seed(42)
ARRAY = np.random.rand(12, 3)
PARAMS = {"a": 1.0, "b": 0.1}
SCORE = 0.8
saver = TableSaver(filename=self.FILENAME)
saver.save_runs(ARRAY, SCORE, PARAMS)
saver.close()
# test saved run
saved = tables.open_file(self.FILENAME + H5_EXT)
# check parameters
for key, value in PARAMS.items():
saved_val = saved.root[ALL_RUNS_KEY][f"{RUN_PREFIX}0"]["params"][
key
].read()
self.assertEqual(saved_val, value)
# check result itself
np.testing.assert_equal(
ARRAY,
saved.root[ALL_RUNS_KEY][f"{RUN_PREFIX}0"]["result"][
"result"
].read(),
)
self.assertEqual(
SCORE,
saved.root[ALL_RUNS_KEY][f"{RUN_PREFIX}0"]["result"][
"score"
].read(),
)
# proper exit
saved.close()
os.remove(self.FILENAME + H5_EXT)
def test_write_multiple_df_results(self):
np.random.seed(42)
PD_COLUMNS = ["a", "b", "c"]
DFS = [
pd.DataFrame(
|
np.random.rand(12, 3)
|
numpy.random.rand
|
'''
This code is refered to https://www.kaggle.com/chattob/intel-mobileodt-cervical-cancer-screening/cervix-segmentation-gmm/notebook
'''
import matplotlib.pyplot as plt
# matplotlib inline
import numpy as np
import pandas as pd
import cv2
import math
import argparse
from sklearn import mixture
from sklearn.utils import shuffle
from skimage import measure
from glob import glob
import os
from multiprocessing import Pool, cpu_count
from functools import partial
from subprocess import check_output
from tqdm import tqdm
def get_image_data(image_path):
"""
Method to get image data as np.array specifying image id and type
"""
img = cv2.imread(image_path)
assert img is not None, "Failed to read image : %s" % (image_path)
return img
def maxHist(hist):
maxArea = (0, 0, 0)
height = []
position = []
for i in range(len(hist)):
if (len(height) == 0):
if (hist[i] > 0):
height.append(hist[i])
position.append(i)
else:
if (hist[i] > height[-1]):
height.append(hist[i])
position.append(i)
elif (hist[i] < height[-1]):
while (height[-1] > hist[i]):
maxHeight = height.pop()
area = maxHeight * (i-position[-1])
if (area > maxArea[0]):
maxArea = (area, position[-1], i)
last_position = position.pop()
if (len(height) == 0):
break
position.append(last_position)
if (len(height) == 0):
height.append(hist[i])
elif(height[-1] < hist[i]):
height.append(hist[i])
else:
position.pop()
while (len(height) > 0):
maxHeight = height.pop()
last_position = position.pop()
area = maxHeight * (len(hist) - last_position)
if (area > maxArea[0]):
maxArea = (area, len(hist), last_position)
return maxArea
def maxRect(img):
maxArea = (0, 0, 0)
addMat = np.zeros(img.shape)
for r in range(img.shape[0]):
if r == 0:
addMat[r] = img[r]
area = maxHist(addMat[r])
if area[0] > maxArea[0]:
maxArea = area + (r,)
else:
addMat[r] = img[r] + addMat[r-1]
addMat[r][img[r] == 0] *= 0
area = maxHist(addMat[r])
if area[0] > maxArea[0]:
maxArea = area + (r,)
return (int(maxArea[3]+1-maxArea[0]/abs(maxArea[1]-maxArea[2])), maxArea[2], maxArea[3], maxArea[1], maxArea[0])
def cropCircle(img):
'''
there many imaged taken thresholded, which means many images is
present as a circle with black surrounded. This function is to
find the largest inscribed rectangle to the thresholed image and
then crop the image to the rectangle.
input: img - the cv2 module
return: img_crop, rectangle, tile_size
'''
if(img.shape[0] > img.shape[1]):
tile_size = (int(img.shape[1]*256/img.shape[0]),256)
else:
tile_size = (256, int(img.shape[0]*256/img.shape[1]))
img = cv2.resize(img, dsize=tile_size)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
_, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
main_contour = sorted(contours, key = cv2.contourArea, reverse = True)[0]
ff = np.zeros((gray.shape[0],gray.shape[1]), 'uint8')
cv2.drawContours(ff, main_contour, -1, 1, 15)
ff_mask = np.zeros((gray.shape[0]+2,gray.shape[1]+2), 'uint8')
cv2.floodFill(ff, ff_mask, (int(gray.shape[1]/2), int(gray.shape[0]/2)), 1)
rect = maxRect(ff)
rectangle = [min(rect[0],rect[2]), max(rect[0],rect[2]), min(rect[1],rect[3]), max(rect[1],rect[3])]
img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
cv2.rectangle(ff,(min(rect[1],rect[3]),min(rect[0],rect[2])),(max(rect[1],rect[3]),max(rect[0],rect[2])),3,2)
return [img_crop, rectangle, tile_size]
def Ra_space(img, Ra_ratio, a_threshold):
'''
Extract the Ra features by converting RGB to LAB space.
The higher is a value, the "redder" is the pixel.
'''
imgLab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB);
w = img.shape[0]
h = img.shape[1]
Ra = np.zeros((w*h, 2))
for i in range(w):
for j in range(h):
R = math.sqrt((w/2-i)*(w/2-i) + (h/2-j)*(h/2-j))
Ra[i*h+j, 0] = R
Ra[i*h+j, 1] = min(imgLab[i][j][1], a_threshold)
Ra[:,0] /= max(Ra[:,0])
Ra[:,0] *= Ra_ratio
Ra[:,1] /= max(Ra[:,1])
return Ra
def get_and_crop_image(image_path):
'''
Input: image_path: the absolute file path of the input image
Return: the rectangle
TODO: add more comments and rename the variable for the code
is hard to read
'''
# get image
img = get_image_data(image_path)
initial_shape = img.shape
# TODO: review cropCircle
[img, rectangle_cropCircle, tile_size] = cropCircle(img)
# convert RGB to LAB and get the Ra space value
imgLab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB);
w = img.shape[0]
h = img.shape[1]
Ra = Ra_space(imgLab, 1.0, 150)
a_channel =
|
np.reshape(Ra[:,1], (w,h))
|
numpy.reshape
|
import numpy as np
from scipy import special
import scipy as sp
from tqdm import tqdm
def interpolant(t):
return t * t * t * (t * (t * 6 - 15) + 10)
def norm_to_uniform(im, scale=None):
if scale is None:
scale = [im.min(), im.max()]
im = (im - np.mean(im)) / np.std(im)
im = 1 / 2 * sp.special.erfc(-im / np.sqrt(2))
im = (im - im.min()) / (im.max() - im.min())
im = im * (scale[1] - scale[0]) + scale[0]
return im
def generate_perlin_noise_2d(shape, res):
def f(t):
return 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = np.mgrid[0 : res[0] : delta[0], 0 : res[1] : delta[1]].transpose(1, 2, 0) % 1
# Gradients
angles = 2 * np.pi * np.random.rand(res[0] + 1, res[1] + 1)
gradients = np.dstack((np.cos(angles), np.sin(angles)))
g00 = gradients[0:-1, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g10 = gradients[1:, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g01 = gradients[0:-1, 1:].repeat(d[0], 0).repeat(d[1], 1)
g11 = gradients[1:, 1:].repeat(d[0], 0).repeat(d[1], 1)
# Ramps
n00 =
|
np.sum(grid * g00, 2)
|
numpy.sum
|
# -*- coding: utf-8 -*-
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
sys.path.append("..")
from sklearn.metrics import classification_report, plot_confusion_matrix, confusion_matrix
from utils.base import save_fig
from global_variable import MODEL_OUTPUT_IMAGE_PATH
from sklearn.model_selection import train_test_split
class ClassificationWorkflowBase(object):
X = None
y = None
name = None
common_function = ["Model Score", "Confusion Matrix"]
special_function = None
@classmethod
def show_info(cls):
print("*-*" * 2, cls.name, "is running ...", "*-*" * 2)
print("Expected Functionality:")
function = cls.common_function + cls.special_function
for i in range(len(function)):
print("+ ", function[i])
def __init__(self, random_state: int= 42) -> None:
self.random_state = random_state
self.model = None
self.naming = None
@staticmethod
def data_split(X_data, y_data, test_size=0.2, random_state=42):
ClassificationWorkflowBase.X = X_data
ClassificationWorkflowBase.y = y_data
X_train, X_test, y_train, y_test = train_test_split(ClassificationWorkflowBase.X,
ClassificationWorkflowBase.y,
test_size=test_size,
random_state=random_state)
return X_train, X_test, y_train, y_test
def fit(self, X_train, y_train):
self.model.fit(X_train, y_train)
def predict(self, X_test):
y_test_prediction = self.model.predict(X_test)
return y_test_prediction
@staticmethod
def score(y_test, y_test_prediction):
print("-----* Model Score *-----")
print(classification_report(y_test, y_test_prediction))
def confusion_matrix_plot(self, X_test, y_test, y_test_prediction):
print("-----* Confusion Matrix *-----")
print(confusion_matrix(y_test, y_test_prediction))
plot_confusion_matrix(self.model, X_test, y_test)
save_fig(f"Confusion Matrix - {self.naming}", MODEL_OUTPUT_IMAGE_PATH)
class SVMClassification(ClassificationWorkflowBase):
name = "Support Vector Machine"
special_function = []
def __init__(
self,
C=1.0,
kernel="rbf",
degree=3,
gamma="scale",
coef0=0.0,
shrinking=True,
probability=False,
tol=1e-3,
cache_size=200,
class_weight=None,
verbose=False,
max_iter=-1,
decision_function_shape="ovr",
break_ties=False,
random_state=None
):
super().__init__(random_state)
self.C = C
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.shrinking = shrinking
self.probability = probability
self.tol = tol
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.decision_function_shape = decision_function_shape
self.break_ties = break_ties
self.random_state = random_state
self.model = SVC(C=self.C,
kernel=self.kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
shrinking=self.shrinking,
probability=self.probability,
tol=self.tol,
cache_size=self.cache_size,
class_weight=self.class_weight,
verbose=self.verbose,
max_iter=self.max_iter,
decision_function_shape=self.decision_function_shape,
break_ties=self.break_ties,
random_state=self.random_state)
self.naming = SVMClassification.name
def plot_ready(self):
self.X = ClassificationWorkflowBase().X
self.y = ClassificationWorkflowBase().y
y = np.array(self.y)
X = np.array(self.X)
y = np.squeeze(y)
clf = self.model.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', s=50, cmap="rainbow")
return clf
def plot_svc_function(self, data, ax=None):
if ax is None:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
x = np.linspace(xlim[0], xlim[1], 30)
y = np.linspace(ylim[0], ylim[1], 30)
Y, X =
|
np.meshgrid(y, x)
|
numpy.meshgrid
|
import sys
import numpy as np
import datetime
from collections import defaultdict
import os
# from sklearn.metrics import confusion_matrix
import glob
import keras
from Bio import pairwise2
import _pickle as cPickle
import copy
from ..features.helpers import scale_clean, scale_clean_two
from .helper import lrd
import csv
import keras.backend as K
from ..models.model_reverse import build_models
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--Nbases', type=int, choices=[4, 5, 8], default=4)
parser.add_argument('--root', type=str, default="data/training/")
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument('--size', type=int, default=20)
parser.add_argument('directories', type=str, nargs='*')
parser.add_argument('--from-pre-trained', dest='from_pre_trained', action='store_true')
parser.add_argument('--pre-trained-weight', dest='pre_trained_weight', type=str, default=None)
parser.add_argument('--pre-trained-dir-list', dest='pre_trained_dir_list', type=str)
parser.add_argument('--deltaseq', dest='deltaseq', type=int, default=10)
parser.add_argument('--forcelength', dest='forcelength', type=float, default=0.5)
parser.add_argument('--oversampleb', dest='oversampleb', type=int, default=3)
parser.add_argument('--ctc', dest='ctc', action="store_true")
parser.add_argument('--n-input', dest="n_input", type=int, default=1)
parser.add_argument('--n-output', dest="n_output", type=int, default=1)
parser.add_argument('--n-output-network', dest="n_output_network", type=int, default=1)
parser.add_argument('--force-clean', dest="force_clean", action="store_true")
parser.add_argument('--filter', nargs='+', dest="filter", type=str, default=[])
parser.add_argument('--ctc-length', dest="ctc_length", type=int, default=20)
parser.add_argument('--lr', dest="lr", type=float, default=0.01)
parser.add_argument('--clean', dest="clean", action="store_true")
parser.add_argument('--attention', dest="attention", action="store_true")
parser.add_argument('--residual', dest="res", action="store_true")
parser.add_argument('--all-file', nargs='+', dest="allignment_files", default=[], type=str)
parser.add_argument('--simple', dest="simple", action="store_true")
parser.add_argument('--all-T', dest="all_T", action="store_true")
parser.add_argument('--hybrid', dest="hybrid", action="store_true")
parser.add_argument('--feat', dest='feat', type=str)
parser.add_argument('--hot', dest='hot', action="store_true")
parser.add_argument('--nepoch', dest="nepoch", default=1000, type=int)
parser.add_argument('--correct-ref', dest="correct_ref", action="store_true")
args = parser.parse_args()
allf = args.allignment_files
root = "data/raw/20170908-R9.5/"
from ..data.dataset import Dataset
from ..features.helpers import scale_simple
D = Dataset(samfile=root + "BTF_AG_ONT_1_FAH14273_A-select.sam",
root_files=root + "AG-basecalled/")
maxf = None
if args.test:
maxf = 3
D.populate(maxf=maxf, filter_not_alligned=True, filter_ch=range(1, 11))
data_x = []
data_y = []
correct_ref = args.correct_ref
for strand in D.strands:
if args.correct_ref:
try:
strand.segmentation(w=8)
transfered = strand.transfer(strand.signal_bc, strand.segments)
# map the transefered:
ref = strand.get_ref("".join(transfered["seq"].replace("N", "")), correct=True)
# allign the ref on the transefered
al = strand.score("".join(transfered["seq"]).replace("N", ""), ref, all_info=True)
mapped_ref = strand.give_map("".join(transfered["seq"]), al[:2])
transfered["seq"] = np.array([s for s in mapped_ref])
except:
print("Failed")
else:
strand.segmentation(w=8)
transfered = strand.transfer(strand.signal_bc, strand.segments)
select = transfered["seq"] != "N"
data_x.append(scale_simple(transfered)[select])
data_y.append(
|
np.array(transfered["seq"][select])
|
numpy.array
|
import test_helpers
import numpy as np
import pytest
from trecs.components import Users, Items
from trecs.models import SocialFiltering, ContentFiltering, PopularityRecommender, BassModel
from trecs.metrics import (
InteractionSpread,
MSEMeasurement,
DiffusionTreeMeasurement,
StructuralVirality,
InteractionMeasurement,
RecSimilarity,
InteractionSimilarity,
AverageFeatureScoreRange,
RMSEMeasurement,
)
class MeasurementUtils:
@classmethod
def assert_valid_length(self, measurements, timesteps):
# there are as many states as the timesteps for which we ran the
# system, plus one to account for the initial state
for _, value in measurements.items():
assert len(value) == timesteps + 1
@classmethod
def assert_valid_final_measurements(
self, measurements, model_attribute, key_mappings, timesteps
):
for key, value in key_mappings.items():
if key in measurements.keys():
assert np.array_equal(measurements[key][timesteps], value)
else:
assert value not in model_attribute
@classmethod
def test_generic_metric(self, model, metric, timesteps):
if metric not in model.metrics:
model.add_metrics(metric)
assert metric in model.metrics
for t in range(1, timesteps + 1):
model.run(timesteps=1)
measurements = model.get_measurements()
self.assert_valid_length(measurements, t)
class TestMeasurementModule:
"""Test basic functionalities of MeasurementModule"""
def test_measurement_module(self):
# Create model, e.g., SocialFiltering
s = SocialFiltering()
# Add Interaction Spread
old_metrics = s.metrics.copy()
s.add_metrics(InteractionSpread())
assert len(old_metrics) + 1 == len(s.metrics)
with pytest.raises(ValueError):
s.add_metrics("wrong type")
with pytest.raises(ValueError):
s.add_metrics(MSEMeasurement(), print)
with pytest.raises(ValueError):
s.add_metrics()
assert len(old_metrics) + 1 == len(s.metrics)
def test_system_state_module(self):
s = SocialFiltering()
with pytest.raises(ValueError):
s.add_state_variable("wrong type")
with pytest.raises(ValueError):
s.add_state_variable(MSEMeasurement(), print)
with pytest.raises(ValueError):
s.add_state_variable()
def test_default_measurements(self, timesteps=None):
if timesteps is None:
timesteps = np.random.randint(2, 100)
s = SocialFiltering(record_base_state=True)
for t in range(1, timesteps + 1):
s.run(timesteps=1)
system_state = s.get_system_state()
state_mappings = {
"predicted_users": s.users_hat.value,
"actual_user_scores": s.users.actual_user_scores.value,
"predicted_items": s.items_hat.value,
"predicted_user_scores": s.predicted_scores.value,
}
MeasurementUtils.assert_valid_final_measurements(
system_state, s._system_state, state_mappings, t
)
MeasurementUtils.assert_valid_length(system_state, t)
s = SocialFiltering()
s.add_metrics(MSEMeasurement())
for t in range(1, timesteps + 1):
s.run(timesteps=1)
measurements = s.get_measurements()
MeasurementUtils.assert_valid_length(measurements, t)
class TestInteractionSpread:
def test_generic(self, timesteps=None):
if timesteps is None:
timesteps = np.random.randint(2, 100)
MeasurementUtils.test_generic_metric(SocialFiltering(), InteractionSpread(), timesteps)
MeasurementUtils.test_generic_metric(
PopularityRecommender(), InteractionSpread(), timesteps
)
class TestRecSimilarity:
def test_generic(self, timesteps=None):
if timesteps is None:
timesteps = np.random.randint(2, 100)
# default # of users is 100
pairs = [np.random.choice(100, 2, replace=False) for i in range(50)]
MeasurementUtils.test_generic_metric(SocialFiltering(), RecSimilarity(pairs), timesteps)
MeasurementUtils.test_generic_metric(
PopularityRecommender(), RecSimilarity(pairs), timesteps
)
class TestInteractionSimilarity:
def test_generic(self, timesteps=None):
if timesteps is None:
timesteps = np.random.randint(2, 100)
# default # of users is 100
pairs = [np.random.choice(100, 2, replace=False) for i in range(50)]
MeasurementUtils.test_generic_metric(
SocialFiltering(), InteractionSimilarity(pairs), timesteps
)
MeasurementUtils.test_generic_metric(
PopularityRecommender(), RecSimilarity(pairs), timesteps
)
def test_functionality(self):
num_users = 2
users = np.eye(num_users)
items = np.zeros((2, 1))
# only one item so the jaccard similarity should be 1
pairs = [(0, 1)]
content = ContentFiltering(
actual_user_representation=users, actual_item_representation=items, num_items_per_iter=1
)
content.add_metrics(InteractionSimilarity(pairs))
content.run(5)
final_jacc = content.get_measurements()["interaction_similarity"][-1]
assert final_jacc == 1 # both users have the item interaction index
new_items = np.eye(num_users)
content = ContentFiltering(
user_representation=users,
item_representation=new_items,
actual_user_representation=users,
actual_item_representation=new_items,
num_items_per_iter=2,
)
content.add_metrics(InteractionSimilarity(pairs))
content.run(5)
final_jacc = content.get_measurements()["interaction_similarity"][-1]
assert final_jacc == 0 # users have no interactions in common
# alter items such that both users prefer the first item
new_items[:, 0] = np.ones(2)
new_items[:, 1] = np.zeros(2)
content.items = Items(new_items)
# force users to recalculate scores
content.users.compute_user_scores(new_items)
content.run(1)
final_jacc = content.get_measurements()["interaction_similarity"][-1]
assert final_jacc == 0.5 # users should now have 1 interaction item in common
class TestMSEMeasurement:
def test_generic(self, timesteps=None):
if timesteps is None:
timesteps = np.random.randint(2, 100)
MeasurementUtils.test_generic_metric(SocialFiltering(), MSEMeasurement(), timesteps)
MeasurementUtils.test_generic_metric(PopularityRecommender(), MSEMeasurement(), timesteps)
def test_numeric(self):
pop = PopularityRecommender()
pop.add_metrics(MSEMeasurement())
pop.run(5)
mse = np.array(pop.get_measurements()["mse"][1:])
assert not np.isnan(mse).any()
assert not np.isinf(mse).any()
class TestRMSEMeasurement:
def test_generic(self, timesteps=None):
if timesteps is None:
timesteps = np.random.randint(2, 100)
MeasurementUtils.test_generic_metric(SocialFiltering(), RMSEMeasurement(), timesteps)
MeasurementUtils.test_generic_metric(PopularityRecommender(), RMSEMeasurement(), timesteps)
def test_functionality(self):
num_users, num_attrs, num_items = 100, 20, 100
user_profiles =
|
np.random.randint(2, size=(num_users, num_attrs))
|
numpy.random.randint
|
# -*- coding: utf-8 -*-
"""
Steady-State Memetic Algorithm
"""
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import random
import numpy as np
from sklearn.utils.validation import check_X_y
from sklearn.neighbors.classification import KNeighborsClassifier
from ..base import InstanceReductionMixin
class SSMA(InstanceReductionMixin):
"""Steady State Memetic Algorithm
The Steady-State Memetic Algorithm is an evolutionary prototype
selection algorithm. It uses a memetic algorithm in order to
perform a local search in the code.
Parameters
----------
n_neighbors : int, optional (default = 3)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
alpha : float (default = 0.6)
Parameter that ponderates the fitness function.
max_loop : int (default = 1000)
Number of maximum loops performed by the algorithm.
threshold : int (default = 0)
Threshold that regulates the substitution condition;
chromosomes_count: int (default = 10)
number of chromosomes used to find the optimal solution.
Attributes
----------
`X_` : array-like, shape = [indeterminated, n_features]
Selected prototypes.
`y_` : array-like, shape = [indeterminated]
Labels of the selected prototypes.
`reduction_` : float, percentual of reduction.
Examples
--------
>>> from protopy.selection.ssma import SSMA
>>> import numpy as np
>>> X = np.array([[i] for i in range(100)])
>>> y = np.asarray(50 * [0] + 50 * [1])
>>> ssma = SSMA()
>>> ssma.fit(X, y)
SSMA(alpha=0.6, chromosomes_count=10, max_loop=1000, threshold=0)
>>> print ssma.predict([[40],[60]])
[0 1]
>>> print ssma.reduction_
0.98
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
References
----------
<NAME>, <NAME>, and <NAME>. Stratified prototype
selection based on a steady-state memetic algorithm: a study of scalability.
Memetic Computing, 2(3):183–199, 2010.
"""
def __init__(self, n_neighbors=1, alpha=0.6, max_loop=1000, threshold=0, chromosomes_count=10):
self.n_neighbors = n_neighbors
self.alpha = alpha
self.max_loop = max_loop
self.threshold = threshold
self.chromosomes_count = chromosomes_count
self.evaluations = None
self.chromosomes = None
self.best_chromosome_ac = -1
self.best_chromosome_rd = -1
self.classifier = KNeighborsClassifier(n_neighbors = n_neighbors)
def accuracy(self, chromosome, X, y):
mask = np.asarray(chromosome, dtype=bool)
cX, cy = X[mask], y[mask]
#print len(cX), len(cy), sum(chromosome)
self.classifier.fit(cX, cy)
labels = self.classifier.predict(X)
accuracy = (labels == y).sum()
return float(accuracy)/len(y)
def fitness(self, chromosome, X, y):
#TODO add the possibility of use AUC for factor1
ac = self.accuracy(chromosome, X, y)
rd = 1.0 - (float(sum(chromosome))/len(chromosome))
return self.alpha * ac + (1.0 - self.alpha) * rd
def fitness_gain(self, gain, n):
return self.alpha * (float(gain)/n) + (1 - self.alpha) * (1.0 / n)
def update_threshold(self, X, y):
best_index = np.argmax(self.evaluations)
chromosome = self.chromosomes[best_index]
best_ac = self.accuracy(chromosome, X, y)
best_rd = 1.0 - float(sum(chromosome))/len(y)
if best_ac <= self.best_chromosome_ac:
self.threshold = self.threshold + 1
if best_rd <= self.best_chromosome_rd:
self.threshold = self.threshold - 1
self.best_chromosome_ac = best_ac
self.best_chromosome_rd = best_rd
def index_nearest_neighbor(self, S, X, y):
classifier = KNeighborsClassifier(n_neighbors=1)
U = []
S_mask = np.array(S, dtype=bool, copy=True)
indexs = np.asarray(range(len(y)))[S_mask]
X_tra, y_tra = X[S_mask], y[S_mask]
for i in range(len(y)):
real_indexes = np.asarray(range(len(y)))[S_mask]
X_tra, y_tra = X[S_mask], y[S_mask]
#print len(X_tra), len(y_tra)
classifier.fit(X_tra, y_tra)
[[index]] = classifier.kneighbors(X[i], return_distance=False)
U = U + [real_indexes[index]]
return U
def memetic_looper(self, S, R):
c = 0
for i in range(len(S)):
if S[i] == 1 and i not in R:
c = c + 1
if c == 2:
return True
return False
def memetic_select_j(self, S, R):
indexs = []
for i in range(len(S)):
if i not in R and S[i] == 1:
indexs.append(i)
# if list is empty wlil return error
return np.random.choice(indexs)
def generate_population(self, X, y):
self.chromosomes = [[np.random.choice([0,1]) for i in range(len(y))]
for c in range(self.chromosomes_count)]
self.evaluations = [self.fitness(c, X, y) for c in self.chromosomes]
self.update_threshold(X, y)
def select_parents(self, X, y):
parents = []
for i in range(2):
samples = random.sample(self.chromosomes, 2)
parents = parents + [samples[0] if self.fitness(samples[0], X, y) >
self.fitness(samples[1], X, y) else samples[1]]
return np.array(parents, copy=True)
def crossover(self, parent_1, parent_2):
size = len(parent_1)
mask = [0] * (size/2) + [1] * (size - size/2)
mask = np.asarray(mask, dtype=bool)
np.random.shuffle(mask)
off_1 = parent_1 * mask + parent_2 * ~mask
off_2 = parent_2 * mask + parent_1 * ~mask
return np.asarray([off_1, off_2])
def mutation(self, offspring):
for i in range(len(offspring)):
if
|
np.random.uniform(0,1)
|
numpy.random.uniform
|
import matplotlib.pyplot as plt
import os
import numpy as np
# from nest_elephant_tvb.simulation.file_tvb.Zerlaut import ZerlautAdaptationSecondOrder as model
from tvb.simulator.integrators import HeunDeterministic
test1=False
from nest_elephant_tvb.Tvb.modify_tvb.Zerlaut import ZerlautAdaptationFirstOrder as model
# test1=True
# from nest_elephant_tvb.simulation.file_tvb.Zerlaut_test_1 import ZerlautAdaptationFirstOrder as model
# excitatory parameter should came from the parameter file
excitatory={
'C_m':200.0,
't_ref':5.0,
'V_reset':-64.5,
'E_L':-64.5,
'g_L':10.0,
'I_e':0.0,
'a':0.0,
'b':0.0,
'Delta_T':2.0,
'tau_w':500.0,
'V_th':-50.0,
'E_ex':0.0,
'tau_syn_ex':5.0,
'E_in':-80.0,
'tau_syn_in':5.0,
'V_peak': 10.0,
'N_tot':10**4,
'p_connect':0.05,
'g':0.2,
'Q_e':1.0,
'Q_i':3.5,
}
#inhibitory
inhibitory={
'C_m':200.0,
't_ref':5.0,
'V_reset':-65.0,
'E_L':-65.,
'g_L':10.0,
'I_e':0.0,
'a':0.0,
'b':0.0,
'Delta_T':0.5,
'tau_w':1.0,
'V_th':-50.0,
'E_ex':0.0,
'tau_syn_ex':5.0,
'E_in':-80.0,
'tau_syn_in':5.0,
'V_peak': 10.0,
'N_tot':10**4,
'p_connect':0.05,
'g':0.2,
'Q_e':1.0,
'Q_i':3.5
}
def compute_rate(data,begin,end,nb):
"""
Compute the firing rate
:param data: the spike of all neurons between end and begin
:param begin: the time of the first spike
:param end: the time of the last spike
:return: the mean firing rate
"""
#get data
n_fil = data[:, 0]
n_fil = n_fil.astype(int)
#count the number of the same id
count_of_n = np.bincount(n_fil)
#compute the rate
rate_each_n_incomplet = count_of_n / (end - begin)
#fill the table with the neurons which are not firing
rate_each_n = np.concatenate(
(rate_each_n_incomplet, np.zeros(-np.shape(rate_each_n_incomplet)[0] + nb +1)))
#save the value
return rate_each_n[1:]
def load_event(events):
"""
Get the id of the neurons which create the spike and time
:param events: id and time of each spikes
:return: The spike of all neurons
"""
data_concatenated = np.concatenate(([events['senders']],[events['times']]))
if data_concatenated.size < 5:
print('empty file')
return None
data_raw = data_concatenated[np.argsort(data_concatenated[:, 1])]
return np.swapaxes(data_raw,0,1)
def load_spike(path):
"""
Get the id of the neurons which create the spike and time
:param path: the path to the file
:return: The spike of all neurons
"""
if not os.path.exists(path + "/spike_detector.gdf"):
print('no file')
return None
data_concatenated = np.loadtxt(path + "/spike_detector.gdf")
if data_concatenated.size < 5:
print('empty file')
return None
data_raw = data_concatenated[np.argsort(data_concatenated[:, 1])]
return data_raw
def create_model_integration(parameter_ex,parameter_in,k,test1):
'''
create the mean field model from the parameters
:param parameter_ex: parameters for excitatory neurons
:param parameter_in: parameters for inhibitory neurons
:param k: number of external connections
:return: function for fitting polynome
'''
model_test = model()
model_test.g_L = np.array(parameter_ex['g_L'])
model_test.E_L_e = np.array(parameter_ex['E_L'])
model_test.E_L_i = np.array(parameter_in['E_L'])
model_test.C_m = np.array(parameter_ex['C_m'])
model_test.b_e = np.array(parameter_ex['b'])
model_test.a_e = np.array(parameter_ex['a'])
model_test.b_i = np.array(parameter_in['b'])
model_test.a_i = np.array(parameter_in['a'])
model_test.tau_w_e = np.array(parameter_ex['tau_w'])
model_test.tau_w_i = np.array(parameter_in['tau_w'])
model_test.E_e = np.array(parameter_ex['E_ex'])
model_test.E_i = np.array(parameter_ex['E_in'])
model_test.Q_e = np.array(parameter_ex['Q_e'])
model_test.Q_i = np.array(parameter_ex['Q_i'])
model_test.tau_e = np.array(parameter_ex['tau_syn_ex'])
model_test.tau_i = np.array(parameter_ex['tau_syn_in'])
model_test.N_tot = np.array(parameter_ex['N_tot'])
model_test.p_connect = np.array(parameter_ex['p_connect'])
model_test.g = np.array(parameter_ex['g'])
model_test.T = np.array(parameter_ex['t_ref'])
model_test.external_input_in_in = np.array(0.0)
model_test.external_input_in_ex = np.array(0.0)
model_test.external_input_ex_in = np.array(0.0)
model_test.external_input_ex_ex = np.array(0.0)
model_test.K_ext_e=np.array(k)
model_test.K_ext_i=np.array(0)
integrator = HeunDeterministic(dt=0.1)
integrator.configure()
if test1:
def function(p,fe,fi,f_ext,w):
model_test.P_e = p[:15]
model_test.P_i = p[15:]
x = np.concatenate([fe,fi,w,np.zeros((fe.size))]).reshape((4,fe.size,1,1))
coupling = np.array([f_ext]).reshape((1,f_ext.size,1,1))
local_coupling=np.array([0.])
stimulus=np.array([0.])
return integrator.scheme(x,model_test.dfun,coupling,local_coupling,stimulus)
else:
def function(p,fe,fi,f_ext,w):
model_test.P_e = p[:10]
model_test.P_i = p[10:]
x = np.concatenate([fe,fi,w,np.zeros((fe.size))]).reshape((4,fe.size,1,1))
coupling = np.array([f_ext]).reshape((1,f_ext.size,1,1))
local_coupling=np.array([0.])
stimulus=
|
np.array([0.])
|
numpy.array
|
import itertools
import matplotlib.pyplot as plt
from matplotlib import cm
import networkx as nx
import numpy as np
from scipy.spatial import Voronoi
########################################################################################################################
# #
# Trees #
# #
########################################################################################################################
def make_random_tree(num_nodes,
x_lim,
y_lim,
max_degree=4,
edge_stretch_interval=None,
seed=None
):
"""
Create a tree from random points in a rectangle by using
Kruskal on complete graph with Euclidean edge weights.
Each node can have at most a degree of max_degree.
"""
if seed is not None:
np.random.seed(seed)
points = make_random_points(num_nodes, x_lim, y_lim)
dist = {(i, j): np.linalg.norm(points[i] - points[j]) for i, j in itertools.combinations(range(len(points)), r=2)}
tree = max_degree_kruskal(dist, max_degree)
pos = {i: list(p) for i, p in enumerate(points)}
nx.set_node_attributes(tree, pos, name='pos')
# stretch edge lengths
####################################
if edge_stretch_interval is not None:
random_stretch_edge_lengths(tree, stretch_interval=edge_stretch_interval, seed=seed)
return tree
def max_degree_kruskal(dist, max_degree):
"""
adjusted from networkx for max_degree
"""
subtrees = nx.utils.UnionFind()
edges = sorted((d, e[0], e[1]) for e, d in dist.items())
tree = nx.Graph()
max_degree_nodes = set()
for wt, u, v in edges:
if u in max_degree_nodes or v in max_degree_nodes:
continue
if subtrees[u] != subtrees[v]:
tree.add_edge(u, v, length=round(wt, 2))
for node in [u, v]:
if tree.degree[node] == max_degree:
max_degree_nodes.add(node)
subtrees.union(u, v)
return tree
########################################################################################################################
# #
# Voronoi graphs #
# #
########################################################################################################################
def make_voronoi_graph(num_voronoi_points,
x_lim,
y_lim,
random_leaves_frac=0.4,
num_nodes=None,
edge_stretch_interval=None,
seed=None,
):
"""
Create a Voronoi diagram and consider the graph that is spanned by the ridges between
the Voronoi cells. The is our basic Voronoi graph with Euclidean edge lengths. Now,
we can add additional random leaves (according to random_leaves_frac), split the edges
to obtain a specified number of nodes, or stretch each edge by a random factor.
:param num_voronoi_points:
int: number of points to build Voronoi regions
:param x_lim:
float: upper x-range bound for box in which we generate Voronoi points (lower bound=0)
:param y_lim:
float: upper y-range bound for box in which we generate Voronoi points (lower bound=0)
:param random_leaves_frac:
float: add this percentage of additional leaves to the voronoi graph
:param num_nodes:
int (or None): split edges such that the returned graph has num_nodes many nodes
:param edge_stretch_interval:
tuple of length 2 (or None): choose random stretch factor in this interval for each edge
:param seed:
random seed for reproducible results
:return: networkx.Graph
"""
if seed is not None:
np.random.seed(seed)
# basic Voronoi graph
####################################
points = make_random_points(num_voronoi_points, x_lim, y_lim)
vor = Voronoi(points)
graph = nx.Graph()
in_box_vertices = {i: v for i, v in enumerate(vor.vertices) if all(v >= (0, 0)) and all(v <= (x_lim, y_lim))}
in_box_edges = [(u, v) for u, v in vor.ridge_vertices if u in in_box_vertices and v in in_box_vertices]
my_edges = [(u, v, round(np.linalg.norm(in_box_vertices[u] - in_box_vertices[v]), 2)) for u, v in in_box_edges]
graph.add_weighted_edges_from(my_edges, weight='length')
nx.set_node_attributes(graph, in_box_vertices, name='pos')
graph = nx.convert_node_labels_to_integers(graph)
# additional leaves
####################################
if random_leaves_frac:
_num_nodes = graph.number_of_nodes()
for i in range(int(_num_nodes * random_leaves_frac)):
u = _num_nodes + i + 1
u_pos = make_random_points(1, x_lim, y_lim)[0]
v, d = find_nearest_node_without_intersection(graph, u_pos)
graph.add_node(u, pos=u_pos)
graph.add_edge(u, v, length=round(d, 2))
# split edges for given num_nodes
####################################
if num_nodes is not None:
num_splits = num_nodes - graph.number_of_nodes()
split_edges(graph=graph, num_splits=num_splits)
# stretch edge lengths
####################################
if edge_stretch_interval is not None:
random_stretch_edge_lengths(graph, stretch_interval=edge_stretch_interval, seed=seed)
return graph
########################################################################################################################
# #
# Two-level Voronoi networks #
# #
########################################################################################################################
def make_two_level_voronoi_graph(n_level_1_voronoi_points,
n_level_2_voronoi_points,
x_lim,
y_lim,
random_leaves_frac_l1=0,
random_leaves_frac_l2=0.6,
n_level_1_nodes=None,
num_nodes=None,
edge_stretch_interval=None,
length_factor_level_1=0.5,
seed=None,
):
"""
The two level network consists of two nested Voronoi graphs. The level 1 layer is a
priority network where travelling is generally faster (adjust with length_factor_level_1).
Therefore, an additional edge attribute 'weight' is introduced.
:param n_level_1_voronoi_points:
int: number of points to build level 1 Voronoi regions
:param n_level_2_voronoi_points:
int: number of points to build level 2 Voronoi regions
:param x_lim:
loat: upper x-range bound for box in which we generate Voronoi points (lower bound=0)
:param y_lim:
float: upper y-range bound for box in which we generate Voronoi points (lower bound=0)
:param random_leaves_frac_l1:
float: add this percentage of additional leaves to the level 1 voronoi graph
:param random_leaves_frac_l2:
float: add this percentage of additional leaves to the level 2 voronoi graph
:param n_level_1_nodes:
int (or None): split edges such that the level 1 graph has num_nodes many nodes
:param num_nodes:
int (or None): split edges such that the returned graph has num_nodes many nodes
:param edge_stretch_interval:
tuple of length 2 (or None): choose random stretch factor in this interval for each edge
:param length_factor_level_1:
float: travelling on level 1 graph is faster by this factor (a new edge attribute 'weight'
is introduced)
:param seed:
random seed for reproducible results
:return: networkx.Graph
"""
level_1_layer = make_voronoi_graph(num_voronoi_points=n_level_1_voronoi_points, x_lim=x_lim, y_lim=y_lim,
random_leaves_frac=random_leaves_frac_l1, num_nodes=None,
edge_stretch_interval=None, seed=seed)
level_2_layer = make_voronoi_graph(num_voronoi_points=n_level_2_voronoi_points, x_lim=x_lim, y_lim=y_lim,
random_leaves_frac=random_leaves_frac_l2, num_nodes=None,
edge_stretch_interval=None, seed=seed)
# map level_1_layer to level_2_layer
####################################
v2_list, pos2_list = zip(*level_2_layer.nodes(data='pos'))
l1_to_l2_nodes = {}
for v1, pos1 in level_1_layer.nodes(data='pos'):
i = find_closest_point(pos1, pos2_list)
l1_to_l2_nodes[v1] = v2_list[i]
level_1_edges = set()
for u, v in level_1_layer.edges:
sh_path = nx.dijkstra_path(level_2_layer, l1_to_l2_nodes[u], l1_to_l2_nodes[v], weight='length')
for e in zip(sh_path[:-1], sh_path[1:]):
level_1_edges.add(tuple(sorted(e)))
level_2_edges = set(e for e in level_2_layer.edges if e not in level_1_edges)
two_level_network = level_2_layer
level = {e: 1 + int(e in level_2_edges) for e in two_level_network.edges}
nx.set_edge_attributes(two_level_network, level, name='level')
# split level 1 edges
####################################
if n_level_1_nodes is not None:
num_splits = n_level_1_nodes - len(set(l1_to_l2_nodes.values()))
split_edges(graph=two_level_network, num_splits=num_splits, no_split_edges=level_2_edges)
# travelling on level_1_graph is faster
####################################
weight = {(u, v): l * length_factor_level_1 if (u, v) in level_1_edges else l
for u, v, l in two_level_network.edges(data='length')}
nx.set_edge_attributes(two_level_network, weight, name='weight')
# split level 2 edges
####################################
if num_nodes is not None:
num_splits = num_nodes - two_level_network.number_of_nodes()
no_split_edges = [e for e in two_level_network.edges if two_level_network.edges[e]['level'] == 1]
split_edges(graph=two_level_network, num_splits=num_splits, no_split_edges=no_split_edges)
# stretch edge lengths
####################################
if edge_stretch_interval is not None:
random_stretch_edge_lengths(two_level_network, stretch_interval=edge_stretch_interval, seed=seed)
return two_level_network
########################################################################################################################
# #
# general functions #
# #
########################################################################################################################
def make_random_graph(graph_params, seed=None):
"""
Detect based on graph_params, which graph should be built and call the respective function.
"""
make_graph_function = {
'tree': make_random_tree,
'voronoi': make_voronoi_graph,
'two_level_voronoi': make_two_level_voronoi_graph
}
variant = graph_params['variant']
assert variant in make_graph_function
graph_params = {k: v for k, v in graph_params.items() if k != 'variant'}
graph = make_graph_function[variant](**graph_params, seed=seed)
pos = {v: [round(p[0], 4), round(p[1], 4)] for v, p in graph.nodes(data='pos')}
nx.set_node_attributes(graph, pos, 'pos')
return graph
def make_random_points(num_points, x_lim, y_lim):
points = []
for i in range(num_points):
pos_x = round(np.random.random() * x_lim, 4)
pos_y = round(np.random.random() * y_lim, 4)
points.append((pos_x, pos_y))
points = np.array(points)
return points
def find_nearest_node_without_intersection(graph, u_pos):
"""
Return the nearest node in graph to u_pos, such that the
direct line between the two does not intersect any edge
of the graph.
"""
def ccw(A, B, C):
"""
code from: https://stackoverflow.com/questions/3838329/how-can-i-check-if-two-segments-intersect
(ccw stands for counterclockwise)
"""
x, y = 0, 1
return (C[y] - A[y]) * (B[x] - A[x]) > (B[y] - A[y]) * (C[x] - A[x])
def intersect(A, B, C, D):
"""
Returns true if line segments AB and CD intersect.
code from: https://stackoverflow.com/questions/3838329/how-can-i-check-if-two-segments-intersect
"""
return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)
pos = dict(graph.nodes(data='pos'))
dists = sorted([(
|
np.linalg.norm(pos[v] - u_pos)
|
numpy.linalg.norm
|
# Importing the relevant modules
from pyspark import *
from pyspark.sql import SQLContext
from pyspark.sql.types import FloatType
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim import lr_scheduler
from torch import nn, device, manual_seed, optim
from torchvision import transforms
from PIL import Image
Image.MAX_IMAGE_PIXELS = 1000000000
import argparse
import numpy as np
import os
import time
import dataset
import model
import utils
def main(args):
model_path = args.output_dir + args.experiment_name + '/'
## Configuring Spark
spark_conf = SparkConf().setAppName("Artist Detector")
spark_context = SparkContext.getOrCreate(conf = spark_conf)
spark_context.setLogLevel("ERROR")
sqlContext = SQLContext(spark_context)
## ETL: Preparing the data
# Read Input annotations and store in dataframe
df = sqlContext.read.format("csv").option("header", "true").load(args.csv_filepath)
df.createOrReplaceTempView('artists')
# Splitting train and test sets
df_train = sqlContext.sql("""SELECT * from artists where in_train='True'""")
df_train.createOrReplaceTempView('train_set')
df_test = sqlContext.sql("""SELECT * from artists where in_train='False'""")
df_test.createOrReplaceTempView('test_set')
# Defining date cleansing function
sqlContext.udf.register("kdo", lambda s: utils.keep_date_only(s), FloatType())
# Cleaning inputs and selecting relevant columns for generating Train, Validation and Test Set
train_val_df_ori = sqlContext.sql("""SELECT date, kdo(date) as kdo_date, new_filename from train_set where date is not null""")
train_val_df_ori.createOrReplaceTempView('train_val_df_ori')
train_val_df = sqlContext.sql("""SELECT kdo_date as date, new_filename as filename from train_val_df_ori where kdo_date is not null and kdo_date > 1000 """)
test_df_ori = sqlContext.sql("""SELECT date, kdo(date) as kdo_date, new_filename from test_set where date is not null""")
test_df_ori.createOrReplaceTempView('test_df_ori')
test_df = sqlContext.sql("""SELECT kdo_date as date, new_filename as filename from test_df_ori where kdo_date is not null and kdo_date > 1000 """)
# Converting dataframes to Pandas
p_test_df = test_df.toPandas()
p_train_val_df = train_val_df.toPandas()
# Splitting Train and Validation Set
p_train_df = p_train_val_df.sample(frac=0.8, random_state=args.random_seed) #random state is a seed value
p_val_df = p_train_val_df.drop(p_train_df.index)
# Let's print some statistiscs
print('TRAINING FIELDS & TYPES: ')
train_val_df.printSchema()
print('\nTRAINING ENTRIES: {}'.format(len(p_train_df)))
print('VALIDATIO ENTRIES: {}'.format(len(p_val_df)))
print('TEST ENTRIES: {}'.format(len(p_test_df)))
# Let's normalize the input dates
train_date_mean =
|
np.mean(p_train_df.date)
|
numpy.mean
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import pyLikelihood as pyLike
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.table import Table, Column
from astropy.io import fits
from fermipy import utils
from fermipy import spectrum
from fermipy import irfs
from fermipy import skymap
from fermipy.ltcube import LTCube
class SensitivityCalc(object):
"""Class for evaluating LAT source flux sensitivity.
Parameters
----------
gdiff : `~fermipy.skymap.SkyMap`
Galactic diffuse map cube object.
iso : `~numpy.ndarray`
Array of background isotropic intensity vs. energy.
ltc : `~fermipy.ltcube.LTCube`
ebins : `~numpy.ndarray`
Energy bin edges in MeV used for differential sensitivity.
event_class : str
Name of the IRF/event class (e.g. P8R2_SOURCE_V6).
event_types : list
List of lists of event type strings defining the event type
selection to be used. Each event type list will be combined.
A selection for a combined FRONT/BACK analysis is defined with
[['FRONT','BACK']]. A selection for joint FRONT/BACK analysis
is defined with [['FRONT'],['BACK']].
"""
def __init__(self, gdiff, iso, ltc, ebins, event_class, event_types=None,
gdiff_fit=None, iso_fit=None, spatial_model='PointSource',
spatial_size=None):
self._gdiff = gdiff
self._gdiff_fit = gdiff_fit
self._iso = iso
self._iso_fit = iso_fit
self._ltc = ltc
self._ebins = ebins
self._log_ebins = np.log10(ebins)
self._ectr = np.exp(utils.edge_to_center(np.log(self._ebins)))
self._event_class = event_class
self._spatial_model = spatial_model
self._spatial_size = spatial_size
if event_types is None:
self._event_types = [['FRONT'], ['BACK']]
else:
self._event_types = event_types
self._psf = []
self._exp = []
ebins = 10**np.linspace(1.0, 6.0, 5 * 8 + 1)
skydir = SkyCoord(0.0, 0.0, unit='deg')
for et in self._event_types:
self._psf += [irfs.PSFModel.create(skydir.icrs, self._ltc,
self._event_class, et,
ebins)]
self._exp += [irfs.ExposureMap.create(self._ltc,
self._event_class, et,
ebins)]
@property
def ebins(self):
return self._ebins
@property
def ectr(self):
return self._ectr
@property
def spatial_model(self):
return self._spatial_model
@property
def spatial_size(self):
return self._spatial_size
def compute_counts(self, skydir, fn, ebins=None):
"""Compute signal and background counts for a point source at
position ``skydir`` with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
ebins : `~numpy.ndarray`
Returns
-------
sig : `~numpy.ndarray`
Signal counts array. Dimensions are energy, angular
separation, and event type.
bkg : `~numpy.ndarray`
Background counts array. Dimensions are energy, angular
separation, and event type.
"""
if ebins is None:
ebins = self.ebins
ectr = self.ectr
else:
ectr = np.exp(utils.edge_to_center(np.log(ebins)))
skydir_cel = skydir.transform_to('icrs')
skydir_gal = skydir.transform_to('galactic')
sig = []
bkg = []
bkg_fit = None
if self._gdiff_fit is not None:
bkg_fit = []
for psf, exp in zip(self._psf, self._exp):
coords0 = np.meshgrid(*[skydir_cel.ra.deg, ectr], indexing='ij')
coords1 = np.meshgrid(*[skydir_cel.dec.deg, ectr], indexing='ij')
# expv = exp.interpolate(skydir_cel.icrs.ra.deg,
# skydir_cel.icrs.dec.deg,
# ectr)
expv = exp.interpolate(coords0[0], coords1[0], coords0[1])
coords0 = np.meshgrid(*[skydir_gal.l.deg, ectr], indexing='ij')
coords1 = np.meshgrid(*[skydir_gal.b.deg, ectr], indexing='ij')
bkgv = self._gdiff.interpolate(np.ravel(coords0[0]),
np.ravel(coords1[0]),
np.ravel(coords0[1]))
bkgv = bkgv.reshape(expv.shape)
# bkgv = self._gdiff.interpolate(
# skydir_gal.l.deg, skydir_gal.b.deg, ectr)
isov = np.exp(np.interp(np.log(ectr), np.log(self._iso[0]),
np.log(self._iso[1])))
bkgv += isov
s0, b0 = irfs.compute_ps_counts(ebins, expv, psf, bkgv, fn,
egy_dim=1,
spatial_model=self.spatial_model,
spatial_size=self.spatial_size)
sig += [s0]
bkg += [b0]
if self._iso_fit is not None:
isov_fit = np.exp(np.interp(np.log(ectr), np.log(self._iso_fit[0]),
np.log(self._iso_fit[1])))
else:
isov_fit = isov
if self._gdiff_fit is not None:
bkgv_fit = self._gdiff_fit.interpolate(np.ravel(coords0[0]),
np.ravel(coords1[0]),
np.ravel(coords0[1]))
bkgv_fit = bkgv_fit.reshape(expv.shape)
bkgv_fit += isov_fit
s0, b0 = irfs.compute_ps_counts(ebins, expv, psf,
bkgv_fit, fn, egy_dim=1,
spatial_model=self.spatial_model,
spatial_size=self.spatial_size)
bkg_fit += [b0]
sig = np.concatenate([np.expand_dims(t, -1) for t in sig])
bkg = np.concatenate([np.expand_dims(t, -1) for t in bkg])
if self._gdiff_fit is not None:
bkg_fit = np.concatenate([np.expand_dims(t, -1) for t in bkg_fit])
return sig, bkg, bkg_fit
def diff_flux_threshold(self, skydir, fn, ts_thresh, min_counts):
"""Compute the differential flux threshold for a point source at
position ``skydir`` with spectral parameterization ``fn``.
Parameters
----------
skydir : `~astropy.coordinates.SkyCoord`
Sky coordinates at which the sensitivity will be evaluated.
fn : `~fermipy.spectrum.SpectralFunction`
ts_thresh : float
Threshold on the detection test statistic (TS).
min_counts : float
Threshold on the minimum number of counts.
"""
sig, bkg, bkg_fit = self.compute_counts(skydir, fn)
norms = irfs.compute_norm(sig, bkg, ts_thresh,
min_counts, sum_axes=[2, 3],
rebin_axes=[10, 1],
bkg_fit=bkg_fit)
npred = np.squeeze(
|
np.apply_over_axes(np.sum, norms * sig, [2, 3])
|
numpy.apply_over_axes
|
import numpy as np
from mnist import download_mnist, read_mnist
def image_to_vec(images):
"""Flatten image datasets from 3D to 2D."""
return images.reshape((images.shape[0], images.shape[1] * images.shape[2]))
def one_hot_encode(labels, n_classes=10):
n_samples = labels.shape[1]
encoded = np.zeros((n_classes, n_samples))
encoded[labels,
|
np.arange(n_samples)
|
numpy.arange
|
"""
Functions and classes for testing data augmentation approaches.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: new BSD
import scipy
import scipy.stats
import pytest
import numpy as np
import fatf
from fatf.exceptions import IncompatibleModelError, IncorrectShapeError
import fatf.utils.array.tools as fuat
import fatf.utils.data.augmentation as fuda
import fatf.utils.distances as fud
import fatf.utils.models as fum
# yapf: disable
NUMERICAL_NP_ARRAY = np.array([
[0, 0, 0.08, 0.69],
[1, 0, 0.03, 0.29],
[0, 1, 0.99, 0.82],
[2, 1, 0.73, 0.48],
[1, 0, 0.36, 0.89],
[0, 1, 0.07, 0.21]])
NUMERICAL_STRUCT_ARRAY = np.array(
[(0, 0, 0.08, 0.69),
(1, 0, 0.03, 0.29),
(0, 1, 0.99, 0.82),
(2, 1, 0.73, 0.48),
(1, 0, 0.36, 0.89),
(0, 1, 0.07, 0.21)],
dtype=[('a', 'i'), ('b', 'i'), ('c', 'f'), ('d', 'f')])
CATEGORICAL_NP_ARRAY = np.array([
['a', 'b', 'c'],
['a', 'f', 'g'],
['b', 'c', 'c'],
['b', 'f', 'c'],
['a', 'f', 'c'],
['a', 'b', 'g']])
CATEGORICAL_STRUCT_ARRAY = np.array(
[('a', 'b', 'c'),
('a', 'f', 'g'),
('b', 'c', 'c'),
('b', 'f', 'c'),
('a', 'f', 'c'),
('a', 'b', 'g')],
dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])
MIXED_ARRAY = np.array(
[(0, 'a', 0.08, 'a'),
(0, 'f', 0.03, 'bb'),
(1, 'c', 0.99, 'aa'),
(1, 'a', 0.73, 'a'),
(0, 'c', 0.36, 'b'),
(1, 'f', 0.07, 'bb')],
dtype=[('a', 'i'), ('b', 'U1'), ('c', 'f'), ('d', 'U2')])
NUMERICAL_NP_RESULTS = np.array([
[0.370, 0.762, 0.658, 0.829],
[-0.103, -0.117, 0.361, 0.571],
[0.483, -0.117, -0.092, 0.570]])
NUMERICAL_STRUCT_RESULTS = np.array(
[(0.180, -0.281, -0.252, 0.632),
(-1.426, -0.506, -0.437, 0.707),
(-1.286, 0.157, 0.616, 0.324)],
dtype=[('a', 'f'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
NUMERICAL_NP_CAT_RESULTS = np.array([
[0., 0.267, 0.268, 0.986],
[0., 0.723, 0.526, 0.551],
[0., -0.662, 0.334, 1.007]])
NUMERICAL_STRUCT_CAT_RESULTS = np.array(
[(0, -0.362, 0.149, 0.623),
(1, -0.351, 0.458, 0.702),
(2, -0.047, -0.244, 0.860)],
dtype=[('a', 'i'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
CATEGORICAL_NP_RESULTS = np.array([
['a', 'f', 'c'],
['a', 'b', 'c'],
['a', 'b', 'c']])
CATEGORICAL_STRUCT_RESULTS = np.array(
[('a', 'c', 'c'),
('b', 'b', 'g'),
('a', 'f', 'g')],
dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])
MIXED_RESULTS = np.array(
[(0.254, 'a', 0.429, 'a'),
(0.071, 'c', -0.310, 'aa',),
(0.481, 'f', 0.180, 'bb')],
dtype=[('a', '<f8'), ('b', 'U1'), ('c', '<f8'), ('d', 'U2')])
NUMERICAL_NP_0_CAT_VAL = np.array([0, 1, 2])
NUMERICAL_NP_0_CAT_FREQ = np.array([0.5, 0.3, 0.2])
# yapf: enable
def test_validate_input():
"""
Tests :func:`fatf.utils.data.augmentation._validate_input` function.
"""
incorrect_shape_data = ('The input dataset must be a 2-dimensional numpy '
'array.')
type_error_data = 'The input dataset must be of a base type.'
incorrect_shape_gt = ('The ground_truth array must be 1-dimensional. (Or '
'None if it is not required.)')
type_error_gt = 'The ground_truth array must be of a base type.'
incorrect_shape_instances = ('The number of labels in the ground_truth '
'array is not equal to the number of data '
'points in the dataset array.')
index_error_cidx = ('The following indices are invalid for the input '
'dataset: {}.')
type_error_cidx = ('The categorical_indices parameter must be a Python '
'list or None.')
type_error_itf = 'The int_to_float parameter has to be a boolean.'
with pytest.raises(IncorrectShapeError) as exin:
fuda._validate_input(np.array([0, 4, 3, 0]))
assert str(exin.value) == incorrect_shape_data
with pytest.raises(TypeError) as exin:
fuda._validate_input(np.array([[0, 4], [None, 0]]))
assert str(exin.value) == type_error_data
#
with pytest.raises(IncorrectShapeError) as exin:
fuda._validate_input(MIXED_ARRAY, MIXED_ARRAY)
assert str(exin.value) == incorrect_shape_gt
with pytest.raises(TypeError) as exin:
fuda._validate_input(MIXED_ARRAY, np.array([1, 2, 3, None, 4, 5]))
assert str(exin.value) == type_error_gt
with pytest.raises(IncorrectShapeError) as exin:
fuda._validate_input(MIXED_ARRAY, np.array([1, 2, 3]))
assert str(exin.value) == incorrect_shape_instances
#
with pytest.raises(TypeError) as exin:
fuda._validate_input(NUMERICAL_NP_ARRAY, categorical_indices=0)
assert str(exin.value) == type_error_cidx
with pytest.raises(IndexError) as exin:
fuda._validate_input(MIXED_ARRAY, categorical_indices=['f'])
assert str(exin.value) == index_error_cidx.format(['f'])
with pytest.raises(IndexError) as exin:
fuda._validate_input(MIXED_ARRAY, categorical_indices=[1])
assert str(exin.value) == index_error_cidx.format([1])
#
with pytest.raises(TypeError) as exin:
fuda._validate_input(NUMERICAL_NP_ARRAY, int_to_float='True')
assert str(exin.value) == type_error_itf
#
assert fuda._validate_input(
MIXED_ARRAY,
categorical_indices=['a', 'b'],
ground_truth=np.array([1, 2, 3, 4, 5, 6]),
int_to_float=False)
class TestAugmentation(object):
"""
Tests :class:`fatf.utils.data.augmentation.Augmentation` abstract class.
"""
class BrokenAugmentor1(fuda.Augmentation):
"""
A broken data augmentation implementation.
This class does not have a ``sample`` method.
"""
def __init__(self, dataset, categorical_indices=None):
"""
Dummy init method.
"""
super().__init__( # pragma: nocover
dataset,
categorical_indices=categorical_indices)
class BrokenAugmentor2(fuda.Augmentation):
"""
A broken data augmentation implementation.
This class does not have a ``sample`` method.
"""
class BaseAugmentor(fuda.Augmentation):
"""
A dummy data augmentation implementation.
For :func:`fatf.utils.data.augmentation._validate_input` and
:func:`~fatf.utils.data.augmentation.Augmentation._validate_sample_input`
testing.
"""
def __init__(self,
dataset,
categorical_indices=None,
int_to_float=True):
"""
Dummy init method.
"""
super().__init__(
dataset,
categorical_indices=categorical_indices,
int_to_float=int_to_float)
def sample(self, data_row=None, samples_number=10):
"""
Dummy sample method.
"""
self._validate_sample_input(data_row, samples_number)
return np.ones((samples_number, self.features_number))
def test_augmentation_class_init(self):
"""
Tests :class:`fatf.utils.data.augmentation.Augmentation` class init.
"""
abstract_method_error = ("Can't instantiate abstract class "
'{} with abstract methods sample')
user_warning = (
'Some of the string-based columns in the input dataset were not '
'selected as categorical features via the categorical_indices '
'parameter. String-based columns cannot be treated as numerical '
'features, therefore they will be also treated as categorical '
'features (in addition to the ones selected with the '
'categorical_indices parameter).')
with pytest.raises(TypeError) as exin:
self.BrokenAugmentor1(NUMERICAL_NP_ARRAY)
msg = abstract_method_error.format('BrokenAugmentor1')
assert str(exin.value) == msg
with pytest.raises(TypeError) as exin:
self.BrokenAugmentor2(NUMERICAL_NP_ARRAY)
msg = abstract_method_error.format('BrokenAugmentor2')
assert str(exin.value) == msg
with pytest.raises(TypeError) as exin:
fuda.Augmentation(NUMERICAL_NP_ARRAY)
assert str(exin.value) == abstract_method_error.format('Augmentation')
# Test for a categorical index warning
with pytest.warns(UserWarning) as warning:
augmentor = self.BaseAugmentor(CATEGORICAL_NP_ARRAY, [0])
assert len(warning) == 1
assert str(warning[0].message) == user_warning
assert np.array_equal(augmentor.categorical_indices, [0, 1, 2])
#
with pytest.warns(UserWarning) as warning:
augmentor = self.BaseAugmentor(CATEGORICAL_STRUCT_ARRAY, ['a'])
assert len(warning) == 1
assert str(warning[0].message) == user_warning
assert np.array_equal(augmentor.categorical_indices,
np.array(['a', 'b', 'c']))
#
with pytest.warns(UserWarning) as warning:
augmentor = self.BaseAugmentor(MIXED_ARRAY, ['b'])
assert len(warning) == 1
assert str(warning[0].message) == user_warning
assert np.array_equal(augmentor.categorical_indices, ['b', 'd'])
# Validate internal variables
categorical_np_augmentor = self.BaseAugmentor(CATEGORICAL_NP_ARRAY)
assert np.array_equal(categorical_np_augmentor.dataset,
CATEGORICAL_NP_ARRAY)
assert not categorical_np_augmentor.is_structured
assert categorical_np_augmentor.ground_truth is None
assert categorical_np_augmentor.categorical_indices == [0, 1, 2]
assert categorical_np_augmentor.numerical_indices == []
assert categorical_np_augmentor.features_number == 3
categorical_struct_augmentor = self.BaseAugmentor(
CATEGORICAL_STRUCT_ARRAY)
assert np.array_equal(categorical_struct_augmentor.dataset,
CATEGORICAL_STRUCT_ARRAY)
assert categorical_struct_augmentor.is_structured
assert categorical_struct_augmentor.ground_truth is None
assert (categorical_struct_augmentor.categorical_indices
== ['a', 'b', 'c']) # yapf: disable
assert categorical_struct_augmentor.numerical_indices == []
assert categorical_struct_augmentor.features_number == 3
mixed_augmentor = self.BaseAugmentor(MIXED_ARRAY)
assert np.array_equal(mixed_augmentor.dataset, MIXED_ARRAY)
assert mixed_augmentor.is_structured
assert mixed_augmentor.ground_truth is None
assert mixed_augmentor.categorical_indices == ['b', 'd']
assert mixed_augmentor.numerical_indices == ['a', 'c']
assert mixed_augmentor.features_number == 4
numerical_np_augmentor = self.BaseAugmentor(NUMERICAL_NP_ARRAY, [0, 1])
assert np.array_equal(numerical_np_augmentor.dataset,
NUMERICAL_NP_ARRAY)
assert not numerical_np_augmentor.is_structured
assert numerical_np_augmentor.ground_truth is None
assert numerical_np_augmentor.categorical_indices == [0, 1]
assert numerical_np_augmentor.numerical_indices == [2, 3]
assert numerical_np_augmentor.features_number == 4
# Test type generalisation
assert numerical_np_augmentor.sample_dtype == np.float64
#
dtype = mixed_augmentor.sample_dtype
assert len(dtype) == 4
for i in range(4):
assert len(dtype[i]) == 2
assert dtype[0][0] == 'a'
assert dtype[0][1] == np.float64
assert dtype[1][0] == 'b'
assert dtype[1][1] == 'U1'
assert dtype[2][0] == 'c'
assert dtype[2][1] == np.float64
assert dtype[3][0] == 'd'
assert dtype[3][1] == 'U2'
#
# Test type generalisation
numerical_struct_augmentor_i2f = self.BaseAugmentor(
NUMERICAL_STRUCT_ARRAY, int_to_float=True)
dtype = numerical_struct_augmentor_i2f.sample_dtype
assert len(dtype) == 4
for i, name in enumerate(['a', 'b', 'c', 'd']):
assert len(dtype[i]) == 2
assert dtype[i][0] == name
assert dtype[i][1] == np.float64
#
numerical_struct_augmentor = self.BaseAugmentor(
NUMERICAL_STRUCT_ARRAY, int_to_float=False)
dtype = numerical_struct_augmentor.sample_dtype
assert len(dtype) == 4
for i in range(4):
assert len(dtype[i]) == 2
assert dtype[0][0] == 'a'
assert dtype[0][1] == np.int64
assert dtype[1][0] == 'b'
assert dtype[1][1] == np.int64
assert dtype[2][0] == 'c'
assert dtype[2][1] == np.float64
assert dtype[3][0] == 'd'
assert dtype[3][1] == np.float64
def test_augmentation_sample_validation(self):
"""
Tests :func:`~fatf.utils.data.augmentation.Augmentation.sample` method.
This function test validation of input for the ``sample`` method.
"""
incorrect_shape_data_row = ('The data_row must either be a '
'1-dimensional numpy array or numpy void '
'object for structured rows.')
type_error_data_row = ('The dtype of the data_row is different to the '
'dtype of the data array used to initialise '
'this class.')
incorrect_shape_features = ('The data_row must contain the same '
'number of features as the dataset used '
'to initialise this class.')
#
value_error_samples_number = ('The samples_number parameter must be a '
'positive integer.')
type_error_samples_number = ('The samples_number parameter must be an '
'integer.')
# Validate sample input rows
numerical_np_augmentor = self.BaseAugmentor(NUMERICAL_NP_ARRAY)
categorical_np_augmentor = self.BaseAugmentor(CATEGORICAL_NP_ARRAY)
numerical_struct_augmentor = self.BaseAugmentor(NUMERICAL_STRUCT_ARRAY)
categorical_struct_augmentor = self.BaseAugmentor(
CATEGORICAL_STRUCT_ARRAY, categorical_indices=['a', 'b', 'c'])
# data_row shape
with pytest.raises(IncorrectShapeError) as exin:
numerical_np_augmentor.sample(NUMERICAL_NP_ARRAY)
assert str(exin.value) == incorrect_shape_data_row
with pytest.raises(IncorrectShapeError) as exin:
numerical_struct_augmentor.sample(NUMERICAL_STRUCT_ARRAY)
assert str(exin.value) == incorrect_shape_data_row
# data_row type
with pytest.raises(TypeError) as exin:
numerical_np_augmentor.sample(np.array(['a', 'b', 'c', 'd']))
assert str(exin.value) == type_error_data_row
with pytest.raises(TypeError) as exin:
numerical_struct_augmentor.sample(MIXED_ARRAY[0])
assert str(exin.value) == type_error_data_row
with pytest.raises(TypeError) as exin:
categorical_np_augmentor.sample(np.array([0.1]))
assert str(exin.value) == type_error_data_row
# Structured too short
with pytest.raises(TypeError) as exin:
numerical_struct_augmentor.sample(MIXED_ARRAY[['a', 'b']][0])
assert str(exin.value) == type_error_data_row
# data_row features number
with pytest.raises(IncorrectShapeError) as exin:
numerical_np_augmentor.sample(np.array([0.1, 1, 2]))
assert str(exin.value) == incorrect_shape_features
with pytest.raises(IncorrectShapeError) as exin:
categorical_np_augmentor.sample(np.array(['a', 'b']))
assert str(exin.value) == incorrect_shape_features
# samples_number type
with pytest.raises(TypeError) as exin:
numerical_np_augmentor.sample(np.array([0, 0, 0.08, 0.69]), 'a')
assert str(exin.value) == type_error_samples_number
with pytest.raises(TypeError) as exin:
numerical_np_augmentor.sample(np.array([0, 0, 0.08, 0.69]), 5.5)
assert str(exin.value) == type_error_samples_number
# samples_number value
with pytest.raises(ValueError) as exin:
numerical_np_augmentor.sample(np.array([0, 0, 0.08, 0.69]), -1)
assert str(exin.value) == value_error_samples_number
with pytest.raises(ValueError) as exin:
numerical_np_augmentor.sample(np.array([0, 0, 0.08, 0.69]), 0)
assert str(exin.value) == value_error_samples_number
# All OK
ones_30 = np.ones((10, 3))
ones_40 = np.ones((10, 4))
ones_300 = np.ones((100, 3))
ones_400 = np.ones((100, 4))
assert np.array_equal(
numerical_np_augmentor.sample(NUMERICAL_NP_ARRAY[0, :]), ones_40)
assert np.array_equal(
numerical_np_augmentor.sample(samples_number=100), ones_400)
assert np.array_equal(
numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=100), ones_400)
assert np.array_equal(
categorical_struct_augmentor.sample(CATEGORICAL_STRUCT_ARRAY[0]),
ones_30)
assert np.array_equal(
categorical_struct_augmentor.sample(samples_number=100), ones_300)
assert np.array_equal(
categorical_struct_augmentor.sample(
CATEGORICAL_STRUCT_ARRAY[0], samples_number=100), ones_300)
class TestNormalSampling(object):
"""
Tests :class:`fatf.utils.data.augmentation.NormalSampling` class.
"""
numerical_np_0_augmentor = fuda.NormalSampling(NUMERICAL_NP_ARRAY, [0])
numerical_np_augmentor = fuda.NormalSampling(NUMERICAL_NP_ARRAY)
numerical_struct_a_augmentor = fuda.NormalSampling(NUMERICAL_STRUCT_ARRAY,
['a'])
numerical_struct_augmentor = fuda.NormalSampling(NUMERICAL_STRUCT_ARRAY)
numerical_struct_augmentor_f = fuda.NormalSampling(
NUMERICAL_STRUCT_ARRAY, int_to_float=False)
categorical_np_augmentor = fuda.NormalSampling(CATEGORICAL_NP_ARRAY)
categorical_np_012_augmentor = fuda.NormalSampling(CATEGORICAL_NP_ARRAY,
[0, 1, 2])
categorical_struct_abc_augmentor = fuda.NormalSampling(
CATEGORICAL_STRUCT_ARRAY, ['a', 'b', 'c'])
mixed_augmentor = fuda.NormalSampling(MIXED_ARRAY, ['b', 'd'])
def test_init(self):
"""
Tests :class:`fatf.utils.data.augmentation.NormalSampling` class init.
"""
# Test class inheritance
assert (self.numerical_np_0_augmentor.__class__.__bases__[0].__name__
== 'Augmentation')
# Test calculating numerical and categorical indices
assert self.numerical_np_0_augmentor.categorical_indices == [0]
assert self.numerical_np_0_augmentor.numerical_indices == [1, 2, 3]
#
assert self.numerical_np_augmentor.categorical_indices == []
assert self.numerical_np_augmentor.numerical_indices == [0, 1, 2, 3]
#
assert self.numerical_struct_a_augmentor.categorical_indices == ['a']
assert (self.numerical_struct_a_augmentor.numerical_indices
== ['b', 'c', 'd']) # yapf: disable
#
assert self.categorical_np_augmentor.categorical_indices == [0, 1, 2]
assert self.categorical_np_augmentor.numerical_indices == []
# Test attributes unique to NormalSampling
csv = self.numerical_np_0_augmentor.categorical_sampling_values
nsv = self.numerical_np_0_augmentor.numerical_sampling_values
#
assert len(csv) == 1
assert 0 in csv
assert len(csv[0]) == 2
assert np.array_equal(csv[0][0], np.array([0, 1, 2]))
assert np.allclose(
csv[0][1], np.array([3 / 6, 2 / 6, 1 / 6]), atol=1e-3)
#
assert len(nsv) == 3
assert 1 in nsv and 2 in nsv and 3 in nsv
assert len(nsv[1]) == 2 and len(nsv[2]) == 2 and len(nsv[3]) == 2
assert nsv[1][0] == pytest.approx(.5, abs=1e-3)
assert nsv[1][1] == pytest.approx(.5, abs=1e-3)
assert nsv[2][0] == pytest.approx(.377, abs=1e-3)
assert nsv[2][1] == pytest.approx(.366, abs=1e-3)
assert nsv[3][0] == pytest.approx(.563, abs=1e-3)
assert nsv[3][1] == pytest.approx(.257, abs=1e-3)
def test_sample(self):
"""
Tests :func:`~fatf.utils.data.augmentation.NormalSampling.sample`.
"""
fatf.setup_random_seed()
# Pure numerical sampling of a data point
# ...numpy array results
samples = self.numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=3)
assert np.allclose(samples, NUMERICAL_NP_RESULTS, atol=1e-3)
# ...structured array results
samples_struct = self.numerical_struct_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=3)
for i in samples_struct.dtype.names:
assert np.allclose(
samples_struct[i], NUMERICAL_STRUCT_RESULTS[i], atol=1e-3)
# ...numpy array results mean
samples = self.numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=1000)
assert np.allclose(
samples.mean(axis=0), NUMERICAL_NP_ARRAY[0, :], atol=1e-1)
assert np.allclose(
samples.std(axis=0), NUMERICAL_NP_ARRAY.std(axis=0), atol=1e-1)
# ...structured array results mean
samples_struct = self.numerical_struct_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=1000)
for i in samples_struct.dtype.names:
assert np.allclose(
np.mean(samples_struct[i]),
NUMERICAL_STRUCT_ARRAY[0][i],
atol=1e-1)
assert np.allclose(
np.std(samples_struct[i]),
np.std(NUMERICAL_STRUCT_ARRAY[i]),
atol=1e-1)
# Pure numerical sampling of the mean of the data
# ...numpy array mean
samples = self.numerical_np_augmentor.sample(samples_number=1000)
assert np.allclose(
samples.mean(axis=0), NUMERICAL_NP_ARRAY.mean(axis=0), atol=1e-1)
assert np.allclose(
samples.std(axis=0), NUMERICAL_NP_ARRAY.std(axis=0), atol=1e-1)
# ...structured array mean
samples_struct = self.numerical_struct_augmentor.sample(
samples_number=1000)
for i in samples_struct.dtype.names:
assert np.allclose(
np.mean(samples_struct[i]),
np.mean(NUMERICAL_STRUCT_ARRAY[i]),
atol=1e-1)
assert np.allclose(
np.std(samples_struct[i]),
np.std(NUMERICAL_STRUCT_ARRAY[i]),
atol=1e-1)
#######################################################################
# Numerical sampling with one categorical index defined
# ...numpy array results
samples = self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=3)
assert np.allclose(samples, NUMERICAL_NP_CAT_RESULTS, atol=1e-3)
# ...structured array results
samples_struct = self.numerical_struct_a_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=3)
for i in samples_struct.dtype.names:
assert np.allclose(
samples_struct[i], NUMERICAL_STRUCT_CAT_RESULTS[i], atol=1e-3)
# ...numpy array results mean
samples = self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=100)
# ......numerical
assert np.allclose(
samples.mean(axis=0)[1:], NUMERICAL_NP_ARRAY[0, 1:], atol=1e-1)
assert np.allclose(
samples.std(axis=0)[1:],
NUMERICAL_NP_ARRAY.std(axis=0)[1:],
atol=1e-1)
# ......categorical
val, freq = np.unique(samples[:, 0], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
# ...structured array results mean
samples_struct = self.numerical_struct_a_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=100)
# ......numerical
for i in samples_struct.dtype.names[1:]:
assert np.allclose(
np.mean(samples_struct[i]),
NUMERICAL_STRUCT_ARRAY[0][i],
atol=1e-1)
assert np.allclose(
np.std(samples_struct[i]),
np.std(NUMERICAL_STRUCT_ARRAY[i]),
atol=1e-1)
# ......categorical
val_struct, freq_struct = np.unique(
samples_struct['a'], return_counts=True)
freq_struct = freq_struct / freq_struct.sum()
assert np.array_equal(val_struct, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq_struct, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
# ...numpy array mean
samples = self.numerical_np_0_augmentor.sample(samples_number=1000)
# ......numerical
assert np.allclose(
samples.mean(axis=0)[1:],
NUMERICAL_NP_ARRAY.mean(axis=0)[1:],
atol=1e-1)
# ......categorical
val, freq = np.unique(samples[:, 0], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
# ...structured array mean
samples_struct = self.numerical_struct_a_augmentor.sample(
samples_number=1000)
# ......numerical
for i in samples_struct.dtype.names[1:]:
assert np.allclose(
np.mean(samples_struct[i]),
np.mean(NUMERICAL_STRUCT_ARRAY[i]),
atol=1e-1)
assert np.allclose(
np.std(samples_struct[i]),
np.std(NUMERICAL_STRUCT_ARRAY[i]),
atol=1e-1)
# ......categorical
val_struct, freq_struct = np.unique(
samples_struct['a'], return_counts=True)
freq_struct = freq_struct / freq_struct.sum()
assert np.array_equal(val_struct, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq_struct, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
#######################################################################
#######################################################################
# Pure categorical sampling
# ...numpy array
samples = self.categorical_np_012_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=3)
assert np.array_equal(samples, CATEGORICAL_NP_RESULTS)
# ...structured array
samples_struct = self.categorical_struct_abc_augmentor.sample(
CATEGORICAL_STRUCT_ARRAY[0], samples_number=3)
assert np.array_equal(samples_struct, CATEGORICAL_STRUCT_RESULTS)
vals = [['a', 'b'], ['b', 'c', 'f'], ['c', 'g']]
# ...numpy array proportions and values
samples = self.categorical_np_012_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=100)
#
proportions = [
np.array([0.62, 0.38]),
np.array([0.31, 0.17, 0.52]),
np.array([0.63, 0.37])
]
for i, index in enumerate([0, 1, 2]):
val, freq = np.unique(samples[:, index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-2)
# ...structured array proportions and values
samples_struct = self.categorical_struct_abc_augmentor.sample(
CATEGORICAL_STRUCT_ARRAY[0], samples_number=100)
#
proportions = [
np.array([0.74, 0.26]),
np.array([0.38, 0.12, 0.50]),
np.array([0.63, 0.37])
]
for i, index in enumerate(['a', 'b', 'c']):
val, freq = np.unique(samples_struct[index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-2)
# No need to check for mean of dataset since categorical features are
# sampled from the distribution of the entire dataset and not centered
# on the data_row.
#######################################################################
#######################################################################
# Mixed array with categorical indices auto-discovered
vals = [['a', 'c', 'f'], ['a', 'aa', 'b', 'bb']]
proportions = [
np.array([0.33, 0.33, 0.33]),
np.array([0.33, 0.16, 0.16, 0.33])
]
# Instance
samples = self.mixed_augmentor.sample(MIXED_ARRAY[0], samples_number=3)
# ...categorical
assert np.array_equal(samples[['b', 'd']], MIXED_RESULTS[['b', 'd']])
# ...numerical
for i in ['a', 'c']:
assert np.allclose(samples[i], MIXED_RESULTS[i], atol=1e-3)
# Instance mean
samples = self.mixed_augmentor.sample(
MIXED_ARRAY[0], samples_number=1000)
# ...numerical
for i in ['a', 'c']:
assert np.allclose(
np.mean(samples[i]), MIXED_ARRAY[0][i], atol=1e-1)
assert np.allclose(
np.std(samples[i]), np.std(MIXED_ARRAY[i]), atol=1e-1)
# ...categorical
for i, index in enumerate(['b', 'd']):
val, freq = np.unique(samples[index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-1)
# Dataset mean
samples = self.mixed_augmentor.sample(samples_number=1000)
# ...numerical
for i in ['a', 'c']:
assert np.allclose(
np.mean(samples[i]), np.mean(MIXED_ARRAY[i]), atol=1e-1)
assert np.allclose(
np.std(samples[i]), np.std(MIXED_ARRAY[i]), atol=1e-1)
# ...categorical
for i, index in enumerate(['b', 'd']):
val, freq = np.unique(samples[index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-1)
#######################################################################
# Sample without float cast
samples = self.numerical_struct_augmentor_f.sample(samples_number=5)
samples_answer = np.array(
[(-1, 0, 0.172, 0.624),
(1, 1, 0.343, 0.480),
(0, 0, 0.649, 0.374),
(0, 0, 0.256, 0.429),
(0, 0, 0.457, 0.743)],
dtype=NUMERICAL_STRUCT_ARRAY.dtype) # yapf: disable
for i in ['a', 'b', 'c', 'd']:
assert np.allclose(samples[i], samples_answer[i], atol=1e-3)
# Cast to float on in the tests to compare (this ouput was generated
# with self.numerical_struct_augmentor)
samples = self.numerical_struct_augmentor_f.sample(samples_number=5)
samples_answer = np.array(
[(1.250, 0.264, 0.381, 0.479),
(-0.181, 1.600, 0.602, 0.345),
(0.472, 0.609, -0.001, 1.026),
(0.105, 1.091, 0.384, 0.263),
(1.263, -0.007, 0.762, 0.603)],
dtype=NUMERICAL_STRUCT_ARRAY.dtype) # yapf: disable
for i in ['a', 'b', 'c', 'd']:
assert np.allclose(samples[i], samples_answer[i], atol=1e-3)
def test_validate_input_mixup():
"""
Tests :func:`fatf.utils.data.augmentation._validate_input_mixup` function.
"""
type_error_out = ('The beta_parameters parameter has to be a tuple with '
'two numbers or None to use the default parameters '
'value.')
type_error_in = 'The {} beta parameter has to be a numerical type.'
value_error_out = ('The beta_parameters parameter has to be a 2-tuple '
'(a pair) of numbers.')
value_error_in = 'The {} beta parameter cannot be a negative number.'
with pytest.raises(TypeError) as exin:
fuda._validate_input_mixup('tuple')
assert str(exin.value) == type_error_out
with pytest.raises(ValueError) as exin:
fuda._validate_input_mixup(('tuple', ))
assert str(exin.value) == value_error_out
with pytest.raises(TypeError) as exin:
fuda._validate_input_mixup(('1', 2))
assert str(exin.value) == type_error_in.format('first')
with pytest.raises(TypeError) as exin:
fuda._validate_input_mixup((1, '2'))
assert str(exin.value) == type_error_in.format('second')
with pytest.raises(ValueError) as exin:
fuda._validate_input_mixup((0, 0))
assert str(exin.value) == value_error_in.format('first')
with pytest.raises(ValueError) as exin:
fuda._validate_input_mixup((0.1, 0))
assert str(exin.value) == value_error_in.format('second')
assert fuda._validate_input_mixup(None)
assert fuda._validate_input_mixup((.1, .1))
class TestMixup(object):
"""
Tests :class:`fatf.utils.data.augmentation.Mixup` class.
"""
numerical_labels = np.array([0, 1, 0, 0, 0, 1])
categorical_labels = np.array(['b', 'a', 'a', 'a', 'a', 'b'])
numerical_np_augmentor = fuda.Mixup(NUMERICAL_NP_ARRAY, int_to_float=False)
numerical_struct_augmentor = fuda.Mixup(
NUMERICAL_STRUCT_ARRAY,
categorical_labels,
beta_parameters=(3, 6),
int_to_float=False)
categorical_np_augmentor = fuda.Mixup(
CATEGORICAL_NP_ARRAY, int_to_float=False)
categorical_struct_augmentor = fuda.Mixup(
CATEGORICAL_STRUCT_ARRAY,
categorical_indices=['a', 'b', 'c'],
int_to_float=False)
mixed_augmentor = fuda.Mixup(
MIXED_ARRAY, numerical_labels, int_to_float=False)
mixed_augmentor_i2f = fuda.Mixup(MIXED_ARRAY, numerical_labels)
def test_init(self):
"""
Tests :class:`fatf.utils.data.augmentation.Mixup` class initialisation.
"""
# Test class inheritance
assert (self.numerical_np_augmentor.__class__.__bases__[0].__name__
== 'Augmentation') # yapf: disable
# Check threshold
assert self.mixed_augmentor.threshold == 0.5
# Check beta parameters
assert self.numerical_struct_augmentor.beta_parameters == (3, 6)
assert self.mixed_augmentor.beta_parameters == (2, 5)
# Check ground_truth_unique, ground_truth_frequencies,
# indices_per_label and ground_truth_probabilities
assert self.numerical_np_augmentor.ground_truth is None
assert self.numerical_np_augmentor.ground_truth_unique is None
assert self.numerical_np_augmentor.ground_truth_frequencies is None
assert self.numerical_np_augmentor.ground_truth_probabilities is None
assert self.numerical_np_augmentor.indices_per_label is None
#
assert np.array_equal(self.numerical_struct_augmentor.ground_truth,
self.categorical_labels)
assert np.array_equal(
self.numerical_struct_augmentor.ground_truth_unique,
np.array(['a', 'b']))
assert np.array_equal(
self.numerical_struct_augmentor.ground_truth_frequencies,
np.array([4 / 6, 2 / 6]))
assert np.array_equal(
self.numerical_struct_augmentor.ground_truth_probabilities,
np.array([[0, 1], [1, 0], [1, 0], [1, 0], [1, 0], [0, 1]]))
assert len(self.numerical_struct_augmentor.indices_per_label) == 2
assert np.array_equal(
self.numerical_struct_augmentor.indices_per_label[0],
np.array([1, 2, 3, 4]))
assert np.array_equal(
self.numerical_struct_augmentor.indices_per_label[1],
np.array([0, 5]))
def test_sample_errors(self):
"""
Tests for errors in :func:`~fatf.utils.data.augmentation.Mixup.sample`.
"""
not_implemented_error = ('Sampling around the data mean is not yet '
'implemented for the Mixup class.')
type_error_probs = 'return_probabilities parameter has to be boolean.'
type_error_replace = 'with_replacement parameter has to be boolean.'
type_error_target = ('The data_row_target parameter should either be '
'None or a string/number indicating the target '
'class.')
value_error_target = ('The value of the data_row_target parameter is '
'not present in the ground truth labels used to '
'initialise this class. The data row target '
'value is not recognised.')
user_warning = ('This Mixup class has not been initialised with a '
'ground truth vector. The value of the '
'data_row_target parameter will be ignored, therefore '
'target values samples will not be returned.')
with pytest.raises(TypeError) as exin:
self.numerical_np_augmentor.sample(data_row_target=('4', '2'))
assert str(exin.value) == type_error_target
with pytest.raises(ValueError) as exin:
self.numerical_struct_augmentor.sample(data_row_target='1')
assert str(exin.value) == value_error_target
with pytest.warns(UserWarning) as warning:
with pytest.raises(NotImplementedError) as exin:
self.numerical_np_augmentor.sample(data_row_target='1')
assert str(exin.value) == not_implemented_error
assert len(warning) == 1
assert str(warning[0].message) == user_warning
with pytest.raises(TypeError) as exin:
self.numerical_np_augmentor.sample(return_probabilities=1)
assert str(exin.value) == type_error_probs
with pytest.raises(TypeError) as exin:
self.numerical_np_augmentor.sample(with_replacement=1)
assert str(exin.value) == type_error_replace
with pytest.raises(NotImplementedError) as exin:
self.numerical_np_augmentor.sample()
assert str(exin.value) == not_implemented_error
def test_sample(self):
"""
Tests :func:`~fatf.utils.data.augmentation.Mixup.sample` method.
"""
user_warning_gt = (
'This Mixup class has not been initialised with a ground truth '
'vector. The value of the data_row_target parameter will be '
'ignored, therefore target values samples will not be returned.')
user_warning_strat = (
'Since the ground truth vector was not provided while '
'initialising the Mixup class it is not possible to get a '
'stratified sample of data points. Instead, Mixup will choose '
'data points at random, which is equivalent to assuming that the '
'class distribution is balanced.')
fatf.setup_random_seed()
# Mixed array with ground truth and probabilities
samples = self.mixed_augmentor_i2f.sample(
MIXED_ARRAY[0], 0, 5, return_probabilities=True)
assert len(samples) == 2
answer_sample = np.array(
[(0.000, 'a', 0.332, 'a'),
(0.000, 'a', 0.080, 'a'),
(0.780, 'a', 0.587, 'a'),
(0.992, 'a', 0.725, 'a'),
(0.734, 'a', 0.073, 'a')],
dtype=[('a', '<f4'), ('b', '<U1'),
('c', '<f4'), ('d', '<U2')]) # yapf: disable
answer_sample_gt = np.array([[1, 0], [1, 0], [1, 0], [1, 0],
[0.266, 0.734]])
assert np.allclose(samples[1], answer_sample_gt, atol=1e-3)
for i in ['a', 'c']:
assert np.allclose(samples[0][i], answer_sample[i], atol=1e-3)
for i in ['b', 'd']:
assert np.array_equal(samples[0][i], answer_sample[i])
# Mixed array with ground truth and probabilities
samples = self.mixed_augmentor.sample(
MIXED_ARRAY[0], 1, 5, return_probabilities=True)
assert len(samples) == 2
answer_sample = np.array(
[(0, 'a', 0.829, 'a'),
(0, 'a', 0.601, 'a'),
(0, 'a', 0.255, 'a'),
(0, 'a', 0.377, 'a'),
(0, 'a', 0.071, 'a')],
dtype=[('a', '<i4'), ('b', '<U1'),
('c', '<f4'), ('d', '<U2')]) # yapf: disable
answer_sample_gt = np.array([[0.823, 0.177], [0.802, 0.198],
[0.624, 0.376], [0.457, 0.543], [0, 1]])
assert np.allclose(samples[1], answer_sample_gt, atol=1e-3)
for i in ['a', 'c']:
assert np.allclose(samples[0][i], answer_sample[i], atol=1e-3)
for i in ['b', 'd']:
assert np.array_equal(samples[0][i], answer_sample[i])
# Numpy array without ground truth -- categorical
with pytest.warns(UserWarning) as warning:
samples = self.categorical_np_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=5)
assert len(warning) == 1
assert str(warning[0].message) == user_warning_strat
#
answer_sample = np.array([['a', 'b', 'c'], ['a', 'b', 'c'],
['a', 'b', 'c'], ['a', 'b', 'c'],
['a', 'b', 'c']])
assert np.array_equal(samples, answer_sample)
# Numpy array without ground truth -- numerical -- test for warning
with pytest.warns(UserWarning) as warning:
samples = self.numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0], data_row_target=1, samples_number=5)
assert len(warning) == 2
assert str(warning[0].message) == user_warning_gt
assert str(warning[1].message) == user_warning_strat
#
answer_sample = np.array([[0.792, 0.000, 0.040, 0.373],
[0.000, 0.000, 0.080, 0.690],
[1.220, 0.610, 0.476, 0.562],
[0.000, 0.000, 0.080, 0.690],
[1.389, 0.694, 0.531, 0.544]])
assert np.allclose(samples, answer_sample, atol=1e-3)
# Structured array with ground truth -- numerical -- no probabilities
samples = self.numerical_struct_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=5, data_row_target='b')
assert len(samples) == 2
answer_sample = np.array(
[(0, 0, 0.039, 0.358),
(1, 0, 0.544, 0.540),
(1, 0, 0.419, 0.580),
(0, 0, 0.080, 0.690),
(0, 0, 0.080, 0.690)],
dtype=[('a', '<i4'), ('b', '<i4'),
('c', '<f4'), ('d', '<f4')]) # yapf: disable
answer_sample_gt = np.array(['a', 'a', 'a', 'b', 'b'])
assert np.array_equal(samples[1], answer_sample_gt)
for index in ['a', 'b', 'c', 'd']:
assert np.allclose(
samples[0][index], answer_sample[index], atol=1e-3)
def get_truncated_mean_std(minimum, maximum, original_mean, original_std):
"""
Computes the theoretical mean and standard deviation of a truncated
normal distribution from its initialisation parameters: the original
normal mean and standard deviation, and the minimum and maximum within
which values are truncated.
Equations for calculating these -- implemented by this function -- can
be found here_.
.. _here: https://en.wikipedia.org/wiki/Truncated_normal_distribution
"""
def cdf(epsilon):
return (1 / 2) * (1 + scipy.special.erf(epsilon / np.sqrt(2)))
def norm(episilon):
return 1 / np.sqrt(2 * np.pi) * np.exp(-1 / 2 * episilon**2)
alpha = (minimum - original_mean) / original_std
beta = (maximum - original_mean) / original_std
z_phi = cdf(beta) - cdf(alpha)
n_ab = norm(alpha) - norm(beta)
computed_mean = original_mean + (n_ab / z_phi) * original_std
computed_var = (original_std**2
* (1 + (alpha * norm(alpha) - beta * norm(beta)) / z_phi
- (n_ab / z_phi)**2
)
) # yapf: disable
computed_std = np.sqrt(computed_var)
return computed_mean, computed_std
class TestTruncatedNormalSampling(object):
"""
Tests :class:`fatf.utils.data.augmentation.TruncatedNormalSampling` class.
"""
numerical_np_augmentor = fuda.TruncatedNormalSampling(NUMERICAL_NP_ARRAY)
numerical_np_0_augmentor = fuda.TruncatedNormalSampling(
NUMERICAL_NP_ARRAY, [0])
numerical_struct_augmentor = fuda.TruncatedNormalSampling(
NUMERICAL_STRUCT_ARRAY)
numerical_struct_a_augmentor = fuda.TruncatedNormalSampling(
NUMERICAL_STRUCT_ARRAY, ['a'])
numerical_struct_augmentor_f = fuda.TruncatedNormalSampling(
NUMERICAL_STRUCT_ARRAY, int_to_float=False)
categorical_np_augmentor = fuda.TruncatedNormalSampling(
CATEGORICAL_NP_ARRAY)
categorical_np_012_augmentor = fuda.TruncatedNormalSampling(
CATEGORICAL_NP_ARRAY, [0, 1, 2])
categorical_struct_abc_augmentor = fuda.TruncatedNormalSampling(
CATEGORICAL_STRUCT_ARRAY, ['a', 'b', 'c'])
mixed_augmentor = fuda.TruncatedNormalSampling(MIXED_ARRAY, ['b', 'd'])
def test_init(self):
"""
Tests ``TruncatedNormalSampling`` class initialisation.
"""
# Test class inheritance
assert (self.numerical_np_0_augmentor.__class__.__bases__[0].__name__
== 'Augmentation')
# Test calculating numerical and categorical indices
assert self.numerical_np_0_augmentor.categorical_indices == [0]
assert self.numerical_np_0_augmentor.numerical_indices == [1, 2, 3]
#
assert self.numerical_np_augmentor.categorical_indices == []
assert self.numerical_np_augmentor.numerical_indices == [0, 1, 2, 3]
#
assert self.numerical_struct_a_augmentor.categorical_indices == ['a']
assert (self.numerical_struct_a_augmentor.numerical_indices
== ['b', 'c', 'd']) # yapf: disable
#
assert self.categorical_np_augmentor.categorical_indices == [0, 1, 2]
assert self.categorical_np_augmentor.numerical_indices == []
# Test attributes unique to TruncatedNormalSampling
csv = self.numerical_np_0_augmentor.categorical_sampling_values
nsv = self.numerical_np_0_augmentor.numerical_sampling_values
#
assert len(csv) == 1
assert 0 in csv
assert len(csv[0]) == 2
assert np.array_equal(csv[0][0], np.array([0, 1, 2]))
assert np.allclose(
csv[0][1], np.array([3 / 6, 2 / 6, 1 / 6]), atol=1e-3)
#
assert len(nsv) == 3
assert 1 in nsv and 2 in nsv and 3 in nsv
assert len(nsv[1]) == 4 and len(nsv[2]) == 4 and len(nsv[3]) == 4
assert nsv[1][0] == pytest.approx(.5, abs=1e-3)
assert nsv[1][1] == pytest.approx(.5, abs=1e-3)
assert nsv[1][2] == pytest.approx(0., abs=1e-3)
assert nsv[1][3] == pytest.approx(1., abs=1e-3)
assert nsv[2][0] == pytest.approx(.377, abs=1e-3)
assert nsv[2][1] == pytest.approx(.366, abs=1e-3)
assert nsv[2][2] == pytest.approx(0.03, abs=1e-3)
assert nsv[2][3] == pytest.approx(0.99, abs=1e-3)
assert nsv[3][0] == pytest.approx(.563, abs=1e-3)
assert nsv[3][1] == pytest.approx(.257, abs=1e-3)
assert nsv[3][2] == pytest.approx(0.21, abs=1e-3)
assert nsv[3][3] == pytest.approx(0.89, abs=1e-3)
def test_sample(self):
"""
Tests ``sample`` for the ``TruncatedNormalSampling`` augmenter.
"""
fatf.setup_random_seed()
# yapf: disable
numerical_np_truncated_results = np.array([
[0.361, 0.396, 0.0593, 0.731],
[1.423, 0.094, 0.595, 0.258],
[0.816, 0.094, 0.356, 0.871]])
numerical_struct_truncated_results = np.array(
[(1.014, 0.111, 0.254, 0.408),
(0.199, 0.186, 0.178, 0.517),
(0.170, 0.338, 0.364, 0.560)],
dtype=[('a', 'f'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
numerical_np_truncated_cat_results = np.array([
[1., 0.531, 0.269, 0.587],
[1., 0.154, 0.136, 0.751],
[1., 0.696, 0.594, 0.653]])
numerical_struct_truncated_cat_results = np.array(
[(0, 0.243, 0.048, 0.697),
(1, 0.066, 0.591, 0.842),
(1, 0.728, 0.214, 0.418)],
dtype=[('a', 'i'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
categorical_np_results = np.array([
['a', 'f', 'c'],
['a', 'f', 'c'],
['b', 'f', 'g']])
categorical_struct_results = np.array(
[('a', 'b', 'g'),
('a', 'f', 'c'),
('a', 'f', 'c')],
dtype=[('a', 'U1'), ('b', 'U1'), ('c', 'U1')])
mixed_results = np.array(
[(0.668, 'a', 0.522, 'bb'),
(0.195, 'c', 0.075, 'a'),
(0.266, 'f', 0.586, 'b')],
dtype=[('a', '<f8'), ('b', 'U1'), ('c', '<f8'), ('d', 'U2')])
# yapf: enable
# Calculate what the mean and std of truncated normals should be
min_ = NUMERICAL_NP_ARRAY.min(axis=0)
max_ = NUMERICAL_NP_ARRAY.max(axis=0)
mean = NUMERICAL_NP_ARRAY.mean(axis=0)
std = NUMERICAL_NP_ARRAY.std(axis=0)
nt_results_mean, nt_results_std = get_truncated_mean_std(
min_, max_, NUMERICAL_NP_ARRAY[0], std)
nt_results_data_mean, nt_results_data_std = get_truncated_mean_std(
min_, max_, mean, std)
mixed_numerical_values = fuat.structured_to_unstructured(
MIXED_ARRAY[['a', 'c']])
min_ = mixed_numerical_values.min(axis=0)
max_ = mixed_numerical_values.max(axis=0)
std = mixed_numerical_values.std(axis=0)
mean = mixed_numerical_values.mean(axis=0)
nt_mixed_results_mean, nt_mixed_results_std = get_truncated_mean_std(
min_, max_, mixed_numerical_values[0], std)
nt_mixed_results_data = get_truncated_mean_std(min_, max_, mean, std)
nt_mixed_results_data_mean = nt_mixed_results_data[0]
nt_mixed_results_data_std = nt_mixed_results_data[1]
# Pure numerical sampling of a data point
# ...numpy array results
samples = self.numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=3)
assert np.allclose(samples, numerical_np_truncated_results, atol=1e-3)
# ...structured array results
samples_struct = self.numerical_struct_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=3)
for i in samples_struct.dtype.names:
assert np.allclose(
samples_struct[i],
numerical_struct_truncated_results[i],
atol=1e-3)
# ...numpy array results mean
samples = self.numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=1000)
assert np.allclose(samples.mean(axis=0), nt_results_mean, atol=1e-1)
assert np.allclose(samples.std(axis=0), nt_results_std, atol=1e-1)
# ...structured array results mean
samples_struct = self.numerical_struct_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=1000)
for i, name in enumerate(samples_struct.dtype.names):
assert np.allclose(
np.mean(samples_struct[name]), nt_results_mean[i], atol=1e-1)
assert np.allclose(
np.std(samples_struct[name]), nt_results_std[i], atol=1e-1)
# Pure numerical sampling from the mean of the data
# ...numpy array mean
samples = self.numerical_np_augmentor.sample(samples_number=1000)
assert np.allclose(
samples.mean(axis=0), nt_results_data_mean, atol=1e-1)
assert np.allclose(samples.std(axis=0), nt_results_data_std, atol=1e-1)
# ...structured array mean
samples_struct = self.numerical_struct_augmentor.sample(
samples_number=1000)
for i, name in enumerate(samples_struct.dtype.names):
assert np.allclose(
np.mean(samples_struct[name]),
nt_results_data_mean[i],
atol=1e-1)
assert np.allclose(
np.std(samples_struct[name]),
nt_results_data_std[i],
atol=1e-1)
#######################################################################
# Numerical sampling with one categorical index defined
# ...numpy array results
samples = self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=3)
assert np.allclose(
samples, numerical_np_truncated_cat_results, atol=1e-3)
# ...structured array results
samples_struct = self.numerical_struct_a_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=3)
for i in samples_struct.dtype.names:
assert np.allclose(
samples_struct[i],
numerical_struct_truncated_cat_results[i],
atol=1e-3)
# ...numpy array results mean
# ......numerical
samples = self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0, :], samples_number=100)
assert np.allclose(
samples.mean(axis=0)[1:], nt_results_mean[1:], atol=1e-1)
assert np.allclose(
samples.std(axis=0)[1:], nt_results_std[1:], atol=1e-1)
# ......categorical
val, freq = np.unique(samples[:, 0], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
# ...structured array results mean
samples_struct = self.numerical_struct_a_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=100)
# ......numerical
for i, name in enumerate(samples_struct.dtype.names[1:]):
assert np.allclose(
np.mean(samples_struct[name]),
nt_results_mean[1:][i],
atol=1e-1)
assert np.allclose(
np.std(samples_struct[name]), nt_results_std[1:][i], atol=1e-1)
# ......categorical
val_struct, freq_struct = np.unique(
samples_struct['a'], return_counts=True)
freq_struct = freq_struct / freq_struct.sum()
assert np.array_equal(val_struct, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq_struct, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
# ...numpy array mean
samples = self.numerical_np_0_augmentor.sample(samples_number=1000)
# ......numerical
assert np.allclose(
samples.mean(axis=0)[1:], nt_results_data_mean[1:], atol=1e-1)
assert np.allclose(
samples.std(axis=0)[1:], nt_results_data_std[1:], atol=1e-1)
# ......categorical
val, freq = np.unique(samples[:, 0], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
# ...structured array mean
samples_struct = self.numerical_struct_a_augmentor.sample(
samples_number=1000)
# ......numerical
for i, name in enumerate(samples_struct.dtype.names[1:]):
assert np.allclose(
np.mean(samples_struct[name]),
nt_results_data_mean[1:][i],
atol=1e-1)
assert np.allclose(
np.std(samples_struct[name]),
nt_results_data_std[1:][i],
atol=1e-1)
# ......categorical
val_struct, freq_struct = np.unique(
samples_struct['a'], return_counts=True)
freq_struct = freq_struct / freq_struct.sum()
assert np.array_equal(val_struct, NUMERICAL_NP_0_CAT_VAL)
assert np.allclose(freq_struct, NUMERICAL_NP_0_CAT_FREQ, atol=1e-1)
#######################################################################
#######################################################################
# Pure categorical sampling
# ...numpy array
samples = self.categorical_np_012_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=3)
assert np.array_equal(samples, categorical_np_results)
# ...structured array
samples_struct = self.categorical_struct_abc_augmentor.sample(
CATEGORICAL_STRUCT_ARRAY[0], samples_number=3)
assert np.array_equal(samples_struct, categorical_struct_results)
vals = [['a', 'b'], ['b', 'c', 'f'], ['c', 'g']]
# ...numpy array proportions and values
samples = self.categorical_np_012_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=100)
#
proportions = [
np.array([0.62, 0.38]),
np.array([0.31, 0.17, 0.52]),
np.array([0.63, 0.37])
]
for i, index in enumerate(range(CATEGORICAL_NP_ARRAY.shape[1])):
val, freq = np.unique(samples[:, index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-1)
# ...structured array proportions and values
samples_struct = self.categorical_struct_abc_augmentor.sample(
CATEGORICAL_STRUCT_ARRAY[0], samples_number=100)
#
proportions = [
np.array([0.74, 0.26]),
np.array([0.38, 0.12, 0.50]),
np.array([0.63, 0.37])
]
for i, index in enumerate(CATEGORICAL_STRUCT_ARRAY.dtype.names):
val, freq = np.unique(samples_struct[index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-1)
# No need to check for mean of the datasets since categorical features
# are sampled from the distribution of the entire dataset and not
# centered on the data_row.
#######################################################################
#######################################################################
# Mixed array with categorical indices auto-discovered
vals = [['a', 'c', 'f'], ['a', 'aa', 'b', 'bb']]
proportions = [
np.array([0.33, 0.33, 0.33]),
np.array([0.33, 0.16, 0.16, 0.33])
]
mixed_cat, mixed_num = ['b', 'd'], ['a', 'c']
# Instance
samples = self.mixed_augmentor.sample(MIXED_ARRAY[0], samples_number=3)
# ...categorical
assert np.array_equal(samples[mixed_cat], mixed_results[mixed_cat])
# ...numerical
for i in mixed_num:
assert np.allclose(samples[i], mixed_results[i], atol=1e-3)
# Instance mean
samples = self.mixed_augmentor.sample(
MIXED_ARRAY[0], samples_number=1000)
# ...numerical
for i, name in enumerate(mixed_num):
assert np.allclose(
np.mean(samples[name]), nt_mixed_results_mean[i], atol=1e-1)
assert np.allclose(
np.std(samples[name]), nt_mixed_results_std[i], atol=1e-1)
# ...categorical
for i, index in enumerate(mixed_cat):
val, freq = np.unique(samples[index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-1)
# Dataset mean
samples = self.mixed_augmentor.sample(samples_number=1000)
# ...numerical
for i, name in enumerate(mixed_num):
assert np.allclose(
np.mean(samples[name]),
nt_mixed_results_data_mean[i],
atol=1e-1)
assert np.allclose(
np.std(samples[name]), nt_mixed_results_data_std[i], atol=1e-1)
# ...categorical
for i, index in enumerate(mixed_cat):
val, freq = np.unique(samples[index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-1)
#######################################################################
# Sample without float cast
samples = self.numerical_struct_augmentor_f.sample(samples_number=5)
samples_answer = np.array(
[(0, 0, 0.345, 0.442),
(1, 0, 0.311, 0.338),
(1, 0, 0.040, 0.553),
(0, 0, 0.886, 0.822),
(0, 0, 0.164, 0.315)],
dtype=NUMERICAL_STRUCT_ARRAY.dtype) # yapf: disable
for i in NUMERICAL_STRUCT_ARRAY.dtype.names:
assert np.allclose(samples[i], samples_answer[i], atol=1e-3)
# Compare with the same augmentation but with int_to_float=False and
# casted to integers afterwards (generated with
# self.numerical_struct_augmentor).
samples = self.numerical_struct_augmentor_f.sample(samples_number=5)
samples_answer = np.array([(0.718, 0.476, 0.449, 0.615),
(0.047, 0.883, 0.205, 0.329),
(1.255, 0.422, 0.302, 0.627),
(1.024, 0.512, 0.122, 0.790),
(1.123, 0.670, 0.386, 0.471)],
dtype=NUMERICAL_STRUCT_ARRAY.dtype)
for i in NUMERICAL_STRUCT_ARRAY.dtype.names:
assert np.allclose(samples[i], samples_answer[i], atol=1e-3)
def test_validate_input_normalclassdiscovery():
"""
Tests the ``_validate_input_normalclassdiscovery`` function.
Tests the
:func:`fatf.utils.data.augmentation._validate_input_normalclassdiscovery`
function.
"""
predictive_function_model = ('The predictive function must take exactly '
'*one* required parameter: a data array to '
'be predicted.')
predictive_function_type = ('The predictive_function should be a Python '
'callable, e.g., a Python function.')
classes_number_type = ('The classes_number parameter is neither None nor '
'an integer.')
classes_number_value = ('The classes_number parameter has to be an '
'integer larger than 1 (at least a binary '
'classification problem).')
class_proportion_type = ('The class_proportion_threshold parameter is not '
'a number.')
class_proportion_value = ('The class_proportion_threshold parameter must '
'be a number between 0 and 1 (not inclusive).')
standard_deviation_init_type = ('The standard_deviation_init parameter is '
'not a number.')
standard_deviation_init_value = ('The standard_deviation_init parameter '
'must be a positive number (greater than '
'0).')
standard_deviation_increment_type = ('The standard_deviation_increment '
'parameter is not a number.')
standard_deviation_increment_value = ('The standard_deviation_increment '
'parameter must be a positive '
'number (greater than 0).')
def invalid_predict_proba(self, x, y):
pass # pragma: no cover
model = fum.KNN(k=3)
with pytest.raises(TypeError) as exin:
fuda._validate_input_normalclassdiscovery(None, None, None, None, None)
assert str(exin.value) == predictive_function_type
with pytest.raises(IncompatibleModelError) as exin:
fuda._validate_input_normalclassdiscovery(invalid_predict_proba, None,
None, None, None)
assert str(exin.value) == predictive_function_model
with pytest.raises(TypeError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict, '1', None,
None, None)
assert str(exin.value) == classes_number_type
with pytest.raises(ValueError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict, 1, None, None,
None)
assert str(exin.value) == classes_number_value
with pytest.raises(TypeError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict, 2, None, None,
None)
assert str(exin.value) == class_proportion_type
with pytest.raises(ValueError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict, None, 0, None,
None)
assert str(exin.value) == class_proportion_value
with pytest.raises(ValueError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict, None, 1.0,
None, None)
assert str(exin.value) == class_proportion_value
with pytest.raises(TypeError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict_proba, 3, 0.9,
None, None)
assert str(exin.value) == standard_deviation_init_type
with pytest.raises(ValueError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict, None, 0.1, 0,
None)
assert str(exin.value) == standard_deviation_init_value
with pytest.raises(ValueError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict, None, 0.1, -5,
None)
assert str(exin.value) == standard_deviation_init_value
with pytest.raises(TypeError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict_proba, None,
0.5, 6, None)
assert str(exin.value) == standard_deviation_increment_type
with pytest.raises(ValueError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict_proba, None,
0.5, 6, 0)
assert str(exin.value) == standard_deviation_increment_value
with pytest.raises(ValueError) as exin:
fuda._validate_input_normalclassdiscovery(model.predict_proba, None,
0.5, 6, -0.5)
assert str(exin.value) == standard_deviation_increment_value
class TestNormalClassDiscovery(object):
"""
Tests :class:`fatf.utils.data.augmentation.NormalClassDiscovery` class.
"""
numerical_labels = np.array([0, 1, 0, 1, 1, 0])
numerical_classifier = fum.KNN(k=3)
numerical_classifier.fit(NUMERICAL_NP_ARRAY, numerical_labels)
#
numerical_np_augmentor = fuda.NormalClassDiscovery(
NUMERICAL_NP_ARRAY, numerical_classifier.predict_proba)
numerical_np_0_augmentor = fuda.NormalClassDiscovery(
NUMERICAL_NP_ARRAY, numerical_classifier.predict, [0])
numerical_struct_classifier = fum.KNN(k=3)
numerical_struct_classifier.fit(NUMERICAL_STRUCT_ARRAY, numerical_labels)
#
numerical_struct_augmentor = fuda.NormalClassDiscovery(
NUMERICAL_STRUCT_ARRAY,
numerical_struct_classifier.predict_proba,
standard_deviation_init=0.5,
standard_deviation_increment=0.2,
class_proportion_threshold=0.1)
numerical_struct_augmentor_f = fuda.NormalClassDiscovery(
NUMERICAL_STRUCT_ARRAY,
numerical_struct_classifier.predict,
int_to_float=False,
classes_number=2)
numerical_struct_a_augmentor = fuda.NormalClassDiscovery(
NUMERICAL_STRUCT_ARRAY,
numerical_struct_classifier.predict, ['a'],
classes_number=2)
categorical_classifier = fum.KNN(k=3)
categorical_classifier.fit(CATEGORICAL_NP_ARRAY, numerical_labels)
#
categorical_np_augmentor = fuda.NormalClassDiscovery(
CATEGORICAL_NP_ARRAY, categorical_classifier.predict, classes_number=2)
categorical_np_012_augmentor = fuda.NormalClassDiscovery(
CATEGORICAL_NP_ARRAY,
categorical_classifier.predict, [0, 1, 2],
classes_number=2)
categorical_struct_classifier = fum.KNN(k=3)
categorical_struct_classifier.fit(CATEGORICAL_STRUCT_ARRAY,
numerical_labels)
#
categorical_struct_abc_augmentor = fuda.NormalClassDiscovery(
CATEGORICAL_STRUCT_ARRAY,
categorical_struct_classifier.predict, ['a', 'b', 'c'],
classes_number=2)
mixed_classifier = fum.KNN(k=3)
mixed_classifier.fit(MIXED_ARRAY, numerical_labels)
#
mixed_augmentor = fuda.NormalClassDiscovery(
MIXED_ARRAY, mixed_classifier.predict, ['b', 'd'], classes_number=2)
def test_init(self, caplog):
"""
Tests ``NormalClassDiscovery`` class initialisation.
"""
runtime_error_class_n = ('For the specified (classification) '
'predictive function, classifying the input '
'dataset provided only one target class. To '
'use this augmenter please initialise it '
'with the classes_number parameter.')
logger_info = ('The number of classes was not specified by the user. '
'Based on *classification* of the input dataset {} '
'classes were found.')
runtime_error_prop = ('The lower bound on the proportion of each '
'class must be smaller than 1/(the number of '
'classes) for this sampling implementation. '
'(Please see the documentation of the '
'NormalClassDiscovery augmenter for more '
'information.')
# Test class inheritance
assert (self.numerical_np_0_augmentor.__class__.__bases__[0].__name__
== 'Augmentation')
# Test calculating numerical and categorical indices
assert self.numerical_np_0_augmentor.categorical_indices == [0]
assert self.numerical_np_0_augmentor.numerical_indices == [1, 2, 3]
#
assert self.numerical_np_augmentor.categorical_indices == []
assert self.numerical_np_augmentor.numerical_indices == [0, 1, 2, 3]
#
assert self.numerical_struct_a_augmentor.categorical_indices == ['a']
assert (self.numerical_struct_a_augmentor.numerical_indices
== ['b', 'c', 'd']) # yapf: disable
#
assert self.categorical_np_augmentor.categorical_indices == [0, 1, 2]
assert self.categorical_np_augmentor.numerical_indices == []
# Test for non-probabilistic
# ...successful class number inference logging
assert len(caplog.records) == 0
_ = fuda.NormalClassDiscovery(NUMERICAL_NP_ARRAY,
self.numerical_classifier.predict)
assert len(caplog.records) == 1
assert caplog.records[0].levelname == 'INFO'
assert caplog.records[0].getMessage() == logger_info.format(2)
# ...failed class number inference
with pytest.raises(RuntimeError) as exin:
fuda.NormalClassDiscovery(
np.array([[2, 1, 0.73, 0.48], [1, 0, 0.36, 0.89]]),
self.numerical_classifier.predict)
assert str(exin.value) == runtime_error_class_n
# Impossible class proportion threshold
with pytest.raises(RuntimeError) as exin:
fuda.NormalClassDiscovery(
NUMERICAL_NP_ARRAY,
self.numerical_classifier.predict,
class_proportion_threshold=0.5)
assert str(exin.value) == runtime_error_prop
# Test attributes unique to NormalClassDiscovery...
# ...numpy probabilistic
assert (self.numerical_np_augmentor.predictive_function
== self.numerical_classifier.predict_proba) # yapf: disable
assert self.numerical_np_augmentor.is_probabilistic is True
assert self.numerical_np_augmentor.classes_number == 2
assert self.numerical_np_augmentor.standard_deviation_init == 1
assert self.numerical_np_augmentor.standard_deviation_increment == 0.1
assert self.numerical_np_augmentor.class_proportion_threshold == 0.05
assert not self.numerical_np_augmentor.categorical_sampling_values
# ...numpy classifier
assert (self.numerical_np_0_augmentor.predictive_function
== self.numerical_classifier.predict) # yapf: disable
assert self.numerical_np_0_augmentor.is_probabilistic is False
assert self.numerical_np_0_augmentor.classes_number == 2
assert self.numerical_np_0_augmentor.standard_deviation_init == 1
assert self.numerical_np_0_augmentor.standard_deviation_increment == .1
assert self.numerical_np_0_augmentor.class_proportion_threshold == .05
#
csv = self.numerical_np_0_augmentor.categorical_sampling_values
assert len(csv) == 1
assert 0 in csv
assert len(csv[0]) == 2
assert np.array_equal(csv[0][0], np.array([0, 1, 2]))
assert np.allclose(
csv[0][1], np.array([3 / 6, 2 / 6, 1 / 6]), atol=1e-3)
# ...structured probabilistic
assert (
self.numerical_struct_augmentor.predictive_function
== self.numerical_struct_classifier.predict_proba
) # yapf: disable
assert self.numerical_struct_augmentor.is_probabilistic is True
assert self.numerical_struct_augmentor.classes_number == 2
assert (self.numerical_struct_augmentor.standard_deviation_init
== 0.5) # yapf: disable
assert (self.numerical_struct_augmentor.standard_deviation_increment
== 0.2) # yapf: disable
assert (self.numerical_struct_augmentor.class_proportion_threshold
== 0.1) # yapf: disable
assert not self.numerical_struct_augmentor.categorical_sampling_values
# ...structured classifier
assert (self.categorical_struct_abc_augmentor.predictive_function
== self.categorical_struct_classifier.predict) # yapf: disable
assert self.categorical_struct_abc_augmentor.is_probabilistic is False
assert self.categorical_struct_abc_augmentor.classes_number == 2
assert (
self.categorical_struct_abc_augmentor.standard_deviation_init == 1)
assert (
self.categorical_struct_abc_augmentor.standard_deviation_increment
== 0.1) # yapf: disable
assert (
self.categorical_struct_abc_augmentor.class_proportion_threshold
== 0.05) # yapf: disable
csv = self.categorical_struct_abc_augmentor.categorical_sampling_values
assert len(csv) == 3
assert 'a' in csv and 'b' in csv and 'c' in csv
#
assert len(csv['a']) == 2
assert np.array_equal(csv['a'][0], np.array(['a', 'b']))
assert np.allclose(csv['a'][1], np.array([4 / 6, 2 / 6]), atol=1e-3)
#
assert len(csv['b']) == 2
assert np.array_equal(csv['b'][0], np.array(['b', 'c', 'f']))
assert np.allclose(
csv['b'][1], np.array([2 / 6, 1 / 6, 3 / 6]), atol=1e-3)
#
assert len(csv['c']) == 2
assert np.array_equal(csv['c'][0], np.array(['c', 'g']))
assert np.allclose(csv['c'][1], np.array([4 / 6, 2 / 6]), atol=1e-3)
def test_sample(self):
"""
Tests :func:`fatf.utils.data.augmentation.NormalClassDiscovery.sample`.
"""
fatf.setup_random_seed()
max_iter_type = 'The max_iter parameter is not a positive integer.'
max_iter_value = 'The max_iter parameter must be a positive number.'
runtime_msg = ('The maximum number of iterations was reached '
'without sampling enough data points for each '
'class. Please try increasing the max_iter '
'parameter or decreasing the '
'class_proportion_threshold parameter. '
'Increasing the standard_deviation_init and '
'standard_deviation_increment parameters may also '
'help.')
with pytest.raises(TypeError) as exin:
self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0], max_iter='a')
assert str(exin.value) == max_iter_type
with pytest.raises(ValueError) as exin:
self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0], max_iter=-1)
assert str(exin.value) == max_iter_value
# yapf: disable
numerical_samples = np.array([
[0.088, 0.024, -0.505, 0.934],
[-0.175, -0.471, -0.049, -0.155],
[-2.289, -1.651, -0.110, -2.343],
[0.8346353, -1.189, -0.435, -1.269]])
numerical_0_samples = np.array([
[1, -0.196, 1.378, 1.608],
[0, -0.451, -0.908, 1.016],
[0, 1.588, 0.976, 1.275],
[2, 0.033, 2.253, 1.130]])
numerical_struct_samples = np.array(
[(0.637, -1.328, -0.118, 0.916),
(-0.146, 0.173, -0.065, 0.607),
(1.396, -1.405, 1.552, 1.498),
(-2.150, -2.201, 2.599, 2.582)],
dtype=[('a', 'f'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
numerical_struct_0_samples = np.array(
[(0, -1.461, -0.580, -2.496),
(1, -0.728, 1.033, 1.372),
(1, -1.509, -0.972, -0.833),
(0, -0.943, -0.142, -3.236)],
dtype=[('a', 'i'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
categorical_samples = np.array([
['a', 'b', 'g'],
['a', 'f', 'g'],
['b', 'b', 'g'],
['a', 'f', 'c']])
categorical_012_samples = np.array([
['a', 'c', 'c'],
['a', 'f', 'g'],
['a', 'c', 'c'],
['a', 'f', 'g']])
categorical_struct_samples = np.array(
[('a', 'f', 'c'),
('b', 'b', 'c'),
('a', 'b', 'g'),
('a', 'f', 'g')],
dtype=CATEGORICAL_STRUCT_ARRAY.dtype)
mixed_samples = np.array(
[(0.690, 'a', -1.944, 'b'),
(0.124, 'a', -1.102, 'bb'),
(1.445, 'c', -1.224, 'bb'),
(2.122, 'c', -0.028, 'aa')],
dtype=[('a', '<f8'), ('b', 'U1'), ('c', '<f8'), ('d', 'U2')])
numerical_samples_mean = np.array([
[-0.299, 1.016, -1.442, 0.611],
[0.159, -1.271, -0.347, 0.698],
[1.402, -2.630, 0.346, 0.754],
[-1.389, -0.431, 0.716, -0.882]])
numerical_0_samples_mean = np.array([
[0, 0.220, 0.733, 0.239],
[2, 0.325, 0.180, 3.161],
[0, 0.795, -0.818, 3.386],
[1, 0.907, -1.070, 2.265]])
numerical_struct_samples_mean = np.array(
[(0.215, -0.429, 0.723, 0.341),
(-0.808, 0.515, 0.586, 0.570),
(-0.920, 0.673, 0.546, -0.382),
(0.359, 0.131, -0.254, 1.302)],
dtype=[('a', 'f'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
numerical_struct_0_samples_mean = np.array(
[(0, -0.146, -0.832, 1.089),
(0, 0.462, -0.683, 2.174),
(2, 0.439, -1.637, 1.484),
(1, 1.292, -1.461, 4.102)],
dtype=[('a', 'i'), ('b', 'f'), ('c', 'f'), ('d', 'f')])
categorical_samples_mean = np.array([
['b', 'f', 'c'],
['b', 'b', 'c'],
['a', 'f', 'g'],
['a', 'b', 'c']])
categorical_012_samples_mean = np.array([
['a', 'c', 'c'],
['b', 'f', 'c'],
['b', 'f', 'g'],
['a', 'c', 'c']])
categorical_struct_samples_mean = np.array(
[('a', 'f', 'c'),
('a', 'b', 'c'),
('a', 'b', 'c'),
('a', 'f', 'c')],
dtype=CATEGORICAL_STRUCT_ARRAY.dtype)
mixed_samples_mean = np.array(
[(-1.250, 'c', 2.623, 'bb'),
(2.352, 'c', -1.269, 'a'),
(0.489, 'f', -0.604, 'bb'),
(3.556, 'a', -0.741, 'bb')],
dtype=[('a', '<f8'), ('b', 'U1'), ('c', '<f8'), ('d', 'U2')])
# yapf: enable
samples = self.numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0], samples_number=4)
assert np.allclose(samples, numerical_samples, atol=1e-3)
samples = self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0], samples_number=4)
assert np.allclose(samples, numerical_0_samples, atol=1e-3)
samples = self.numerical_struct_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=4)
for i in samples.dtype.names:
assert np.allclose(
samples[i], numerical_struct_samples[i], atol=1e-3)
samples = self.numerical_struct_a_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=4)
for i in samples.dtype.names:
assert np.allclose(
samples[i], numerical_struct_0_samples[i], atol=1e-3)
samples = self.categorical_np_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=4)
assert np.array_equal(samples, categorical_samples)
samples = self.categorical_np_012_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=4)
assert np.array_equal(samples, categorical_012_samples)
samples = self.categorical_struct_abc_augmentor.sample(
CATEGORICAL_STRUCT_ARRAY[0], samples_number=4)
for i in samples.dtype.names:
assert np.array_equal(samples[i], categorical_struct_samples[i])
samples = self.mixed_augmentor.sample(MIXED_ARRAY[0], samples_number=4)
assert np.array_equal(samples[['b', 'd']], mixed_samples[['b', 'd']])
for i in ['a', 'c']:
assert np.allclose(samples[i], mixed_samples[i], atol=1e-3)
# Test if minimum_per_class works
samples = self.numerical_np_augmentor.sample(
NUMERICAL_NP_ARRAY[0], samples_number=1000)
predictions = self.numerical_classifier.predict(samples)
_, counts = np.unique(predictions, return_counts=True)
assert np.all(counts >= 0.05 * 1000)
samples = self.numerical_np_0_augmentor.sample(
NUMERICAL_NP_ARRAY[0], samples_number=1000)
predictions = self.numerical_classifier.predict(samples)
_, counts = np.unique(predictions, return_counts=True)
assert np.all(counts > 0.05 * 1000)
# Initialised with higher rate
samples = self.numerical_struct_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=1000)
predictions = self.numerical_struct_classifier.predict(samples)
_, counts = np.unique(predictions, return_counts=True)
assert np.all(counts > 0.1 * 1000)
samples = self.numerical_struct_a_augmentor.sample(
NUMERICAL_STRUCT_ARRAY[0], samples_number=1000)
predictions = self.numerical_struct_classifier.predict(samples)
_, counts = np.unique(predictions, return_counts=True)
assert np.all(counts > 0.05 * 1000)
#######################################################################
# Get averages and proportions
vals = [['a', 'b'], ['b', 'c', 'f'], ['c', 'g']]
proportions = [
np.array([0.676, 0.324]),
np.array([0.333, 0.151, 0.516]),
np.array([0.667, 0.333])
]
###
samples = self.categorical_np_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=1000)
predictions = self.categorical_classifier.predict(samples)
_, counts = np.unique(predictions, return_counts=True)
assert np.all(counts > 0.05 * 1000)
for i, index in enumerate(range(CATEGORICAL_NP_ARRAY.shape[1])):
val, freq = np.unique(samples[:, index], return_counts=True)
freq = freq / freq.sum()
assert np.array_equal(val, vals[i])
assert np.allclose(freq, proportions[i], atol=1e-3)
###
proportions = [
np.array([0.665, 0.335]),
np.array([0.357, 0.158, 0.485]),
np.array([0.645, 0.355])
]
samples = self.categorical_np_012_augmentor.sample(
CATEGORICAL_NP_ARRAY[0], samples_number=1000)
predictions = self.categorical_classifier.predict(samples)
_, counts = np.unique(predictions, return_counts=True)
assert
|
np.all(counts > 0.05 * 1000)
|
numpy.all
|
import os
# os.environ['NUMBA_DISABLE_JIT'] = '1' #SHOULD WORK WITH 0 ALSO
import pytest
from pyequion import create_equilibrium, solve_equilibrium
import pyequion
from pyequion import reactions_species_builder as rbuilder
from pyequion import ClosingEquationType
from pyequion import symbolic_computations as mod_sym
# from xtest_pychemeq import assert_solution_result, compare_with_expected_perc_tuple
from utils_tests import (
assert_solution_result,
compare_with_expected_perc_tuple,
)
import numpy as np
import sympy
# from pyequion import caco3_specific_study
# --------------------------------------------
# TAGS BASED GENERATION
# --------------------------------------------
EXAMPLE_SPECIES = [
"H2O",
"NaHCO3",
"CaCl2",
"H+",
"OH-",
"Na+",
"NaOH",
"HCO3-",
"CO2",
"CO2(g)",
"CO3--",
"NaCO3-",
"Na2CO3",
"Ca++",
"CaHCO3+",
"CaCO3",
"CaOH+",
"Cl-",
]
EXAMPLE_REACTIONS = [
{"H+": -1, "H2O": 1, "OH-": -1, "type": "rev"},
{"HCO3-": -1, "Na+": -1, "NaHCO3": 1, "type": "rev"},
{"H+": 1, "H2O": -1, "Na+": -1, "NaOH": 1, "type": "rev"},
{"CO3--": -1, "H+": -1, "HCO3-": 1, "type": "rev"},
{"CO2": -1, "H+": 1, "HCO3-": 1, "type": "rev"},
{"CO2": 1, "CO2(g)": -1, "type": "henry"},
{"CO3--": -1, "Na+": -1, "NaCO3-": 1, "type": "rev"},
{"CO3--": -1, "Na+": -2, "Na2CO3": 1, "type": "rev"},
{"Ca++": -1, "CaCl2": 1, "Cl-": -2, "type": "irrev"},
{"Ca++": -1, "CaHCO3+": 1, "HCO3-": -1, "type": "rev"},
{"CO3--": -1, "Ca++": -1, "CaCO3": 1, "type": "rev"},
{"Ca++": -1, "CaOH+": 1, "H+": 1, "H2O": -1, "type": "rev"},
]
def test_nahco3_open():
initial_comp = ["H2O", "NaHCO3", "CO2(g)"]
E_SPECIES = [
"H2O",
"NaHCO3",
"H+",
"OH-",
"Na+",
"NaOH",
"HCO3-",
"CO2",
"CO2(g)",
"CO3--",
"NaCO3-",
"Na2CO3",
]
E_REACTIONS = [
{"H+": -1, "H2O": 1, "OH-": -1, "id_db": 10, "type": "rev"},
{"HCO3-": -1, "Na+": -1, "NaHCO3": 1, "id_db": 2, "type": "rev"},
{"H+": 1, "H2O": -1, "Na+": -1, "NaOH": 1, "id_db": 0, "type": "rev"},
{"CO3--": -1, "H+": -1, "HCO3-": 1, "id_db": 9, "type": "rev"},
{"CO2": -1, "H+": 1, "HCO3-": 1, "id_db": 8, "type": "rev"},
{"CO2": 1, "CO2(g)": -1, "id_db": 7, "type": "henry"},
{"CO3--": -1, "Na+": -1, "NaCO3-": 1, "id_db": 1, "type": "rev"},
{"CO3--": -1, "Na+": -2, "Na2CO3": 1, "id_db": 3, "type": "rev"},
]
species, reactions = rbuilder.get_species_reactions_from_compounds(
initial_comp
)
for s in species:
assert s in E_SPECIES
assert len(E_REACTIONS) == len(reactions)
assert len(E_SPECIES) == len(species)
def test_cacl2_open():
initial_comp = ["H2O", "CaCl2", "CO2(g)"]
E_SPECIES = [
"H2O",
"CaCl2",
"CO2(g)",
"H+",
"OH-",
"Ca++",
"CaOH+",
"Cl-",
"CO2",
"HCO3-",
"CO3--",
"CaCO3",
"CaHCO3+",
]
E_REACTIONS = [
{"H+": -1, "H2O": 1, "OH-": -1, "id_db": 10, "type": "rev"},
{"Ca++": -1, "CaCl2": 1, "Cl-": -2, "id_db": -1, "type": "irrev"},
{
"Ca++": -1,
"CaOH+": 1,
"H+": 1,
"H2O": -1,
"id_db": 4,
"type": "rev",
},
{"CO2": 1, "CO2(g)": -1, "id_db": 7, "type": "henry"},
{"CO2": -1, "H+": 1, "HCO3-": 1, "id_db": 8, "type": "rev"},
{"CO3--": -1, "H+": -1, "HCO3-": 1, "id_db": 9, "type": "rev"},
{"CO3--": -1, "Ca++": -1, "CaCO3": 1, "id_db": 12, "type": "rev"},
{"Ca++": -1, "CaHCO3+": 1, "HCO3-": -1, "id_db": 11, "type": "rev"},
]
species, reactions = rbuilder.get_species_reactions_from_compounds(
initial_comp
)
for s in species:
assert s in E_SPECIES
assert len(E_REACTIONS) == len(reactions)
assert len(E_SPECIES) == len(species)
def test_cacl2_nahco3():
initial_comp = ["H2O", "NaHCO3", "CaCl2"]
E_SPECIES = [
"H2O",
"NaHCO3",
"CaCl2",
"H+",
"OH-",
"Na+",
"NaOH",
"HCO3-",
"CO2",
"CO2(g)",
"CO3--",
"NaCO3-",
"Na2CO3",
"Ca++",
"CaHCO3+",
"CaCO3",
"CaOH+",
"Cl-",
]
E_REACTIONS = [
{"H+": -1, "H2O": 1, "OH-": -1, "id_db": 10, "type": "rev"},
{"HCO3-": -1, "Na+": -1, "NaHCO3": 1, "id_db": 2, "type": "rev"},
{"H+": 1, "H2O": -1, "Na+": -1, "NaOH": 1, "id_db": 0, "type": "rev"},
{"CO3--": -1, "H+": -1, "HCO3-": 1, "id_db": 9, "type": "rev"},
{"CO2": -1, "H+": 1, "HCO3-": 1, "id_db": 8, "type": "rev"},
{"CO2": 1, "CO2(g)": -1, "id_db": 7, "type": "henry"},
{"CO3--": -1, "Na+": -1, "NaCO3-": 1, "id_db": 1, "type": "rev"},
{"CO3--": -1, "Na+": -2, "Na2CO3": 1, "id_db": 3, "type": "rev"},
{"Ca++": -1, "CaCl2": 1, "Cl-": -2, "id_db": -1, "type": "irrev"},
{"Ca++": -1, "CaHCO3+": 1, "HCO3-": -1, "id_db": 11, "type": "rev"},
{"CO3--": -1, "Ca++": -1, "CaCO3": 1, "id_db": 12, "type": "rev"},
{
"Ca++": -1,
"CaOH+": 1,
"H+": 1,
"H2O": -1,
"id_db": 4,
"type": "rev",
},
]
species, reactions = rbuilder.get_species_reactions_from_compounds(
initial_comp
)
for s in species:
assert s in E_SPECIES
assert len(E_REACTIONS) == len(reactions)
assert len(E_SPECIES) == len(species)
def test_identify_element_in_species_list():
tags_list = {
"H+",
"OH-",
"CO2",
"CO3--",
"HCO3-",
"Na+",
"NaOH",
"NaCO3-",
"NaHCO3",
"Na2CO3",
"CaOH+",
"CaHCO3+",
"CaCO3",
"Ca++",
"Cl-",
"H2O",
}
tags_coefs = rbuilder.get_species_tags_with_an_element(tags_list, "Na")
assert len(tags_coefs) == 5
for el in ["Na+", "NaOH", "NaCO3-", "NaHCO3", "Na2CO3"]:
assert el in tags_coefs
assert tags_coefs["Na2CO3"] == 2
assert tags_coefs["NaHCO3"] == 1
# --------------------------------------------
# ENGINE DEFINITIONS
# --------------------------------------------
# def test_create_list_of_species_engine_nahco3():
# initial_comp = ['H2O', 'NaHCO3', 'CO2(g)']
# known_tag = ['H2O', 'CO2(g)']
# species, reactions = rbuilder.get_species_reactions_from_compounds(initial_comp)
# species_conv = convert_species_tag_for_engine(species)
# engine_species = create_list_of_species_engine(species_conv, species)
# engine_idxs = create_Indexes_instance(species_conv, len(known_tag))
# names = [sp.name for sp in engine_species]
# print(names)
# assert len(engine_species) > 0
# assert engine_idxs.size == 10
# # Deterministic: - Keep order
# assert names == ['H2O', 'NaHCO3', 'CO2g', 'Hp', 'OHm', 'Nap', 'NaOH', 'HCO3m', 'CO2', 'CO3mm', 'NaCO3m', 'Na2CO3']
# def test_create_list_of_reactions_engine_nahco3():
# initial_comp = ['H2O', 'NaHCO3', 'CO2(g)']
# species, reactions = rbuilder.get_species_reactions_from_compounds(initial_comp)
# species_conv = convert_species_tag_for_engine(species)
# reactions_conv = convert_species_tags_for_reactions(reactions)
# # engine_species, engine_idxs = create_list_of_species_engine(species_conv)
# dict_indexes_species = get_dict_indexes_of_species_to_variable_position(species_conv)
# engine_reactions = create_list_of_reactions_engine(reactions_conv, dict_indexes_species)
# assert isinstance(engine_reactions[0], pyequion.EqReaction)
# assert len(engine_reactions) == 7
# assert engine_reactions[0].idx_reaction_db >= 0
# assert engine_reactions[0].idx_species[0] >= 0
# def test_create_list_of_mass_balances_engine_nahco3():
# feed_compounds = ['NaHCO3']
# initial_comp = ['H2O', 'NaHCO3', 'CO2(g)']
# element = ['Na']
# species, reactions = rbuilder.get_species_reactions_from_compounds(initial_comp)
# mb_list_engine = create_list_of_mass_balances_engine(species, element,
# feed_compounds)
# assert len(mb_list_engine) == 1
# assert mb_list_engine[0].idx_feed == [(0, 1.0)]
# species_conv = convert_species_tag_for_engine(species)
# dict_indexes_species = get_dict_indexes_of_species_to_variable_position(species_conv)
# assert_list_in_numba(mb_list_engine[0].idx_species, dict_indexes_species['Nap'])
# pass
# --------------------------------------------
# APPLYED CASES
# --------------------------------------------
def test_engine_nahco3_solve_15mM_open():
EXPECTED = {
"sc": (1336.4, -15.0),
"I": (16.073e-3, 0.1),
"pH": (9.24, 1.0),
"DIC": (1.34e01 * 1e-3, -1),
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
args = (np.array([cNaHCO3]), TK, pyequion.pCO2_ref)
feed_compounds = ["NaHCO3"]
initial_feed_mass_balance = None
element_mass_balance = None
closing_equation_type = ClosingEquationType.OPEN
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance,
initial_feed_mass_balance,
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
assert_solution_result(solution, EXPECTED)
def test_engine_cacl2_solve_open_0p01():
EXPECTED = {
"sc": (1336.4, -200),
"I": (16.073e-3, 200), # ONLY PH KOWN
"pH": (5.61, 1.0),
"DIC": (1.34e01 * 1e-3, -1),
}
TK = 25.0 + 273.15
c_feed = 5 * 1e-3 # Forcing to be an array
args = (np.array([c_feed]), TK, pyequion.pCO2_ref)
"""
Big Issue: How to automate the known-mass-balance
- A specie is removed (CaCl2)
- A specie is still a variable (Ca)
- A specie is placed as known (Cl)
"""
feed_compounds = ["CaCl2"]
initial_feed_mass_balance = ["Cl-"]
closing_equation_type = ClosingEquationType.OPEN
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
initial_feed_mass_balance=initial_feed_mass_balance,
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
idx = sys_eq.idx_control.idx
x_guess = np.full(idx["size"], -1e-3)
x_guess[idx["Ca++"]] = np.log10(c_feed)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
def test_engine_with_parenthesis_Mn():
EXPECTED = {
"sc": (1336.4, -15.0),
"I": (17.959e-3, 3.0),
"pH": (9.18, 1.0),
"DIC": (1.56e02 * 1e-3, 1.0),
"SI": {
"Pyrochroite": (0.61, 1.0),
"Rhodochrosite": (4.65, 50.0), # error in thisone...
},
}
TK = 25.0 + 273.15
cNaHCO3 = 150e-3
args = (np.array([cNaHCO3]), TK, pyequion.pCO2_ref)
feed_compounds = ["MnCO3"]
initial_feed_mass_balance = None
# initial_feed_mass_balance = ['Cl-']
element_mass_balance = None
closing_equation_type = ClosingEquationType.OPEN
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance,
initial_feed_mass_balance,
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
meanMnCO3 = pyequion.get_mean_activity_coeff(solution, feed_compounds[0])
assert np.isclose(
meanMnCO3, solution.gamma[sys_eq.idx_control.idx["Mn++"]], 1e-2
)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Pyrochroite"], EXPECTED["SI"]["Pyrochroite"]
)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Rhodochrosite"],
EXPECTED["SI"]["Rhodochrosite"],
)
def test_engine_mix_solve_closed():
EXPECTED = {
"sc": (1339.4, 20.0),
"I": (16.102e-3, 1.0),
"pH": (9.24, 1.0),
"DIC": (1.34e01 * 1e-3, -1),
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 0.02e-3
carbone_total = EXPECTED["DIC"][0] # 1.34e+01 * 1e-3
args = (
np.array([cNaHCO3, cCaCl2]),
TK,
carbone_total,
) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
initial_feed_mass_balance=initial_feed_mass_balance,
)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
# input('')
assert_solution_result(solution, EXPECTED)
def test_engine_mix_closed_add_IS():
EXPECTED = {
"sc": (1314.9, 15),
"I": (15.083e-3, 1.0),
"pH": (8.2, 1.0),
"DIC": (1.5e01 * 1e-3, -1),
"SI": {
"Calcite": (-0.53, 5.0),
"Aragonite": (-0.67, 5.0),
"Halite": (-7.91, 5.0),
},
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 0.02e-3
carbone_total = cNaHCO3
args = (
np.array([cNaHCO3, cCaCl2]),
TK,
carbone_total,
) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
sys_eq = create_equilibrium(
feed_compounds, initial_feed_mass_balance=initial_feed_mass_balance
)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Halite"], EXPECTED["SI"]["Halite"]
)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Calcite"], EXPECTED["SI"]["Calcite"]
)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Aragonite"], EXPECTED["SI"]["Aragonite"]
)
def test_engine_mix_closed_add_IS_dic_modified():
EXPECTED = {
"sc": (1339.4, 15),
"I": (16.102e-3, 1.0),
"pH": (9.24, 1.0),
"DIC": (1.34e01 * 1e-3, -1),
"SI": (0.20, 200.0), # FIXME: High error!
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 0.02e-3
carbone_total = EXPECTED["DIC"][0] # 1.34e+01 * 1e-3
args = (
np.array([cNaHCO3, cCaCl2]),
TK,
carbone_total,
) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
initial_feed_mass_balance=initial_feed_mass_balance,
)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Calcite"], EXPECTED["SI"]
)
def test_engine_mix_default_feed_mb_closed():
EXPECTED = {
"sc": (1339.4, 15),
"I": (16.102e-3, 1.0),
"pH": (9.24, 1.0),
"DIC": (1.34e01 * 1e-3, -1),
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 0.02e-3
carbone_total = EXPECTED["DIC"][0] # 1.34e+01 * 1e-3
args = (
np.array([cNaHCO3, cCaCl2]),
TK,
carbone_total,
) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
# element_mass_balance = ['Na', 'Ca']
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance=None,
initial_feed_mass_balance=initial_feed_mass_balance,
)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
# jac=nahco3_residual_jacobian
)
assert_solution_result(solution, EXPECTED)
# def test_engine_mix_solve_near_saturation_closed():
# EXPECTED = {
# 'sc': (1488.7, -200),
# 'I': (17.305e-3, 1.0),
# 'pH': (8.1, 1.0),
# 'DIC': (1.40e+01*1e-3, -1),
# 'SI': (1.13, 5.0),
# }
# solution, dict_map_idx = solve_near_saturation_mix_case(EXPECTED['DIC'][0])
# assert_solution_result(solution, EXPECTED)
# assert compare_with_expected_perc_tuple(solution.SI[1], EXPECTED['SI'])
def test_engine_baco3_solve_1mM_open():
EXPECTED = { # AQION
"sc": (198.5, 15), # FIXME: add parameters for BaCO3 conductivity
"I": (2.9652e-3, 4.0),
"pH": (8.48, 2.0),
"DIC": (1.97e-3, 5.0),
"SI": (0.85, 3.0),
}
TK = 25.0 + 273.15
cFeed = 1e-3
args = (np.array([cFeed]), TK, pyequion.pCO2_ref)
feed_compounds = ["BaCO3"]
# initial_feed_mass_balance = None
# element_mass_balance = ['Ba']
closing_equation_type = ClosingEquationType.OPEN
sys_eq = create_equilibrium(
feed_compounds, closing_equation_type=closing_equation_type
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Witherite"], EXPECTED["SI"]
)
def test_engine_baco3_solve_1mM_closed():
EXPECTED = { # AQION
"sc": (241.8, 15), # FIXME: add parameters for BaCO3 conductivity
"I": (3.092e-3, 4.0),
"pH": (10.48, 2.0),
"DIC": (1e-3, -1),
"SI": (2.0, 3.0),
}
TK = 25.0 + 273.15
cFeed = 1e-3
DIC = EXPECTED["DIC"][0]
args = (np.array([cFeed]), TK, DIC)
feed_compounds = ["BaCO3"]
# initial_feed_mass_balance = None
# element_mass_balance = ['Ba']
# closing_equation_type = ClosingEquationType.CARBONE_TOTAL
sys_eq = create_equilibrium(feed_compounds)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Witherite"], EXPECTED["SI"]
)
def test_engine_baco3_nahco3_solve_1mM_each_closed():
EXPECTED = { # AQION
"sc": (300.1, 15), # FIXME: add parameters for BaCO3 conductivity
"I": (4.16e-3, 4.0),
"pH": (10.04, 2.0),
"DIC": (2e-3, -1),
"SI": (2.08, 3.0),
}
TK = 25.0 + 273.15
DIC = EXPECTED["DIC"][0]
cNaHCO3 = 1e-3
cBaCO3 = 1e-3
args = (np.array([cNaHCO3, cBaCO3]), TK, DIC)
feed_compounds = ["NaHCO3", "BaCO3"]
# initial_feed_mass_balance = None
# element_mass_balance = ['Na', 'Ba']
# closing_equation_type = ClosingEquationType.CARBONE_TOTAL
sys_eq = create_equilibrium(feed_compounds)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Witherite"], EXPECTED["SI"]
)
def test_engine_baco3_cacl2_solve_5_2_mM_closed():
EXPECTED = { # AQION
"sc": (1099.5, 15), # FIXME: add parameters for BaCO3 conductivity
"I": (15.457e-3, 15.0),
"pH": (10.71, 2.0),
"DIC": (5e-3, -1),
"SI-CaCO3-Calcite": (2.30, 10.0),
"SI-BaCO3": (2.99, 10.0),
# Include Vaterite, Aragonite, Amorph, Ikaite (6H2O)
}
sys_eq = pyequion.create_equilibrium(
feed_compounds=["CaCl2", "BaCO3"], initial_feed_mass_balance=["Cl-"]
)
TK = 25.0 + 273.15
cCaCl2 = 2e-3
cBaCO3 = 5e-3
DIC = 5e-3
args = (np.array([cCaCl2, cBaCO3]), TK, DIC)
solution = pyequion.solve_equilibrium(sys_eq, args=args)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Calcite"], EXPECTED["SI-CaCO3-Calcite"]
)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Witherite"], EXPECTED["SI-BaCO3"]
)
solution = pyequion.solve_equilibrium(
sys_eq, args=args, allow_precipitation=True
)
pyequion.print_solution(solution)
def test_engine_zemaits_na2so4_caso4():
# flake8: noqa
"Was not able to get good aggrement"
EXPECTED = { # AQION
"sc": (1099.5, 15), # FIXME: add parameters for BaCO3 conductivity
"I": (15.457e-3, 15.0),
"pH": (10.71, 2.0),
"DIC": (5e-3, -1),
"SI-CaCO3-Calcite": (2.30, 10.0),
"SI-BaCO3": (2.99, 10.0),
# Include Vaterite, Aragonite, Amorph, Ikaite (6H2O)
}
sys_eq = pyequion.create_equilibrium(feed_compounds=["Na2SO4", "CaSO4"])
TK = 25.0 + 273.15
cNa2SO4 = 94.8e-3
cCaSO4 = 21.8e-3
args = (np.array([cNa2SO4, cCaSO4]), TK, np.nan)
# solution = pyequion.solve_equilibrium(sys_eq, args=args)
# pyequion.print_solution(solution)
# assert_solution_result(solution, EXPECTED)
# assert compare_with_expected_perc_tuple(solution.saturation_index['Calcite'], EXPECTED['SI-CaCO3-Calcite'])
# assert compare_with_expected_perc_tuple(solution.saturation_index['Witherite'], EXPECTED['SI-BaCO3'])
solution = pyequion.solve_equilibrium(
sys_eq, args=args, allow_precipitation=True
)
pyequion.print_solution(solution)
def test_engine_greg_andr_caso4():
sys_eq = pyequion.create_equilibrium(feed_compounds=["CaSO4"])
TK = 25.0 + 273.15
# cNa2SO4 = 94.8e-3
cCaSO4 = 15.6e-3
args = (np.array([cCaSO4]), TK, np.nan)
solution = pyequion.solve_equilibrium(sys_eq, args=args)
pyequion.print_solution(solution)
# assert_solution_result(solution, EXPECTED)
# assert compare_with_expected_perc_tuple(solution.saturation_index['Calcite'], EXPECTED['SI-CaCO3-Calcite'])
# assert compare_with_expected_perc_tuple(solution.saturation_index['Witherite'], EXPECTED['SI-BaCO3'])
solution = pyequion.solve_equilibrium(
sys_eq, args=args, allow_precipitation=True
)
pyequion.print_solution(solution)
def test_engine_baco3_cacl2_nahco3_solve_1mM_each_closed():
EXPECTED = { # AQION
"sc": (484.5, 15), # FIXME: add parameters for BaCO3 conductivity
"I": (6.213e-3, 15.0),
"pH": (9.89, 2.0),
"DIC": (2e-3, -1),
"SI-Calcite": (1.73, 5.0),
"SI-BaCO3": (1.91, 5.0),
}
TK = 25.0 + 273.15
DIC = EXPECTED["DIC"][0]
cCaCl2 = 1e-3
cBaCO3 = 1e-3
cNaHCO3 = 1e-3
args = (np.array([cCaCl2, cBaCO3, cNaHCO3]), TK, DIC)
feed_compounds = ["CaCl2", "BaCO3", "NaHCO3"]
initial_feed_mass_balance = ["Cl-"]
# element_mass_balance = ['Ca', 'Ba', 'Na']
sys_eq = create_equilibrium(
feed_compounds,
initial_feed_mass_balance=initial_feed_mass_balance,
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Calcite"], EXPECTED["SI-Calcite"]
)
assert compare_with_expected_perc_tuple(
solution.saturation_index["Witherite"], EXPECTED["SI-BaCO3"]
)
# --------------------------------------------
# APPYED CASE - WITH IONS SPECIES
# --------------------------------------------
def test_engine_nap_hco3m_closed():
EXPECTED = {
"sc": (1310.8, -15.0),
"I": (15.032e-3, 0.1),
"pH": (8.2, 1.0),
"DIC": (15 * 1e-3, -1),
}
TK = 25.0 + 273.15
cNap = 15e-3
cHCO3m = 15e-3
carbone_total = 15e-3
args = (np.array([cNap, cHCO3m]), TK, carbone_total)
feed_compounds = ["Na+", "HCO3-"]
# initial_feed_mass_balance = None
# element_mass_balance = ['Na']
# closing_equation_type = ClosingEquationType.CARBONE_TOTAL
sys_eq = create_equilibrium(feed_compounds)
# closing_equation_type, element_mass_balance,
# initial_feed_mass_balance)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
assert_solution_result(solution, EXPECTED)
@pytest.mark.xfail(reason="Expected value for higher T to be obtained.")
def test_engine_nap_hco3m_higher_T_closed():
EXPECTED = {
"sc": (1310.8, -15.0),
"I": (15.032e-3, 0.1),
"pH": (8.2, 1.0),
"DIC": (15 * 1e-3, -1),
}
TK = 80.0 + 273.15
cNap = 15e-3
cHCO3m = 15e-3
carbone_total = 15e-3
args = (np.array([cNap, cHCO3m]), TK, carbone_total)
feed_compounds = ["Na+", "HCO3-"]
# initial_feed_mass_balance = None
# element_mass_balance = ['Na']
# closing_equation_type = ClosingEquationType.CARBONE_TOTAL
sys_eq = create_equilibrium(feed_compounds)
# closing_equation_type, element_mass_balance,
# initial_feed_mass_balance)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
assert_solution_result(solution, EXPECTED)
def test_engine_nap_hco3m_capp_clm_open():
EXPECTED = {
"sc": (1525.3, 25), # FIXME
"I": (20.3e-3, 3.0),
"pH": (8.06, 1.0),
"DIC": (15.0 * 1e-3, 5),
"IS(s)": (2.39, -1), # TODO nao incluido ainda
}
TK = 25.0 + 273.15
cNap = 15e-3
cHCO3m = 15e-3
cBapp = 2e-3
cClpp = 2 * 2e-3
carbone_total = 15e-3
# args = (np.array([cNap, cHCO3m, cBaqpp]), TK, carbone_total)
args = (np.array([cNap, cHCO3m, cBapp, cClpp]), TK, carbone_total)
feed_compounds = ["Na+", "HCO3-", "Ca++", "Cl-"]
fixed_elements = ["Cl-"]
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
# element_mass_balance,
fixed_elements=fixed_elements,
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
# assert compare_with_expected_perc_tuple(solution.c_molal[sys_eq.idx.BaCO3s], EXPECTED['BaCO3(s)'])
def test_engine_nap_hco3m_bacl2_open():
EXPECTED = {
"sc": (1525.3, 25), # FIXME
"I": (20.776e-3, 3.0),
"pH": (9.22, 1.0),
"DIC": (13.2 * 1e-3, 5),
"IS(s)": (2.39, -1), # TODO nao incluido ainda
}
TK = 25.0 + 273.15
cNap = 15e-3
cHCO3m = 15e-3
cBaqpp = 2e-3
# carbone_total = 15e-3
# args = (np.array([cNap, cHCO3m, cBaqpp]), TK, carbone_total)
args = (np.array([cNap, cHCO3m, cBaqpp]), TK, pyequion.pCO2_ref)
feed_compounds = ["Na+", "HCO3-", "BaCl2"]
initial_feed_mass_balance = ["Cl-"]
# element_mass_balance = ['Na', 'Ba']
closing_equation_type = ClosingEquationType.OPEN
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
# element_mass_balance,
initial_feed_mass_balance=initial_feed_mass_balance,
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
# assert compare_with_expected_perc_tuple(solution.c_molal[sys_eq.idx.BaCO3s], EXPECTED['BaCO3(s)'])
def test_engine_water_co2_solve_closed():
EXPECTED = { # from-aqion
"sc": (8.2, -15.0),
"I": (0.021e-3, 1.0),
"pH": (4.68, 1.0),
"DIC": (1e-3, -1),
}
TK = 25.0 + 273.15
# cFeed = 15e-3
carbone_total = EXPECTED["DIC"][0] # mM
args = (np.array([]), TK, carbone_total)
feed_compounds = ["CO2"]
initial_feed_mass_balance = None
element_mass_balance = []
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance,
initial_feed_mass_balance,
)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
assert_solution_result(solution, EXPECTED)
pass
# --------------------------------------------
# APPYED CASE - ENGINE - CO2(aq) CO32- AND H2O
# --------------------------------------------
def test_engine_water_co2_solve_fixed_pH_closing_equation():
EXPECTED = { # from-aqion
"sc": (8.2, -15.0),
"I": (0.021e-3, 1.0),
"pH": (4.68, 1.0),
"DIC": (1e-3, -1),
}
TK = 25.0 + 273.15
# cFeed = 15e-3
pH_fixed = EXPECTED["pH"][0] # mM
args = (np.array([]), TK, pH_fixed)
feed_compounds = ["CO2"]
initial_feed_mass_balance = None
element_mass_balance = []
closing_equation_type = ClosingEquationType.PH
# closing_equation_type=ClosingEquationType.NONE
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance,
initial_feed_mass_balance,
)
x_guess = np.full(sys_eq.idx_control.idx["size"], -2)
solution, fsol, _ = solve_equilibrium(
sys_eq, args=args, x_guess=x_guess, ret_fsol=True
)
print(fsol.message)
assert_solution_result(solution, EXPECTED)
pass
def test_engine_nap_hco3m_Capp_clm_closed():
EXPECTED = {
"sc": (1339.4, -200),
"I": (16.102e-3, 1.0),
"pH": (9.24, 1.0),
"DIC": (1.34e01 * 1e-3, -1),
}
TK = 25.0 + 273.15
cNap = 15e-3
cHCO3m = 15e-3
cCapp = 0.02e-3
cClm = 0.02e-3
carbone_total = EXPECTED["DIC"][0]
# args = (np.array([cNap, cHCO3m, cBaqpp]), TK, carbone_total)
feed_compounds = ["Na+", "HCO3-", "Ca++", "Cl-"]
# initial_feed_mass_balance = ['Cl-']
fixed_elements = ["Cl-"]
allow_precipitation = False
# element_mass_balance = ['Na', 'Ca']
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
# closing_equation_type, element_mass_balance,
closing_equation_type,
allow_precipitation=allow_precipitation,
fixed_elements=fixed_elements,
)
args = (np.array([cNap, cHCO3m, cCapp, cClm]), TK, carbone_total)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
def test_engine_nap_hco3m_Capp_clm_check_hco3_conc_closed():
EXPECTED = {
"sc": (1272.0, -1),
"I": (15.885e-3, 1.0),
"pH": (7.92, 1.0),
}
TK = 25.0 + 273.15
cNap = 1e-3
cHCO3m = 1e-3
cCapp = 5e-3
cClm = 10e-3 # CAREFUL HERE-> !! STOIC CL*2
carbone_total = cHCO3m
# args = (np.array([cNap, cHCO3m, cBaqpp]), TK, carbone_total)
feed_compounds = ["Na+", "HCO3-", "Ca++", "Cl-"]
# initial_feed_mass_balance = ['Cl-']
fixed_elements = ["Cl-"]
allow_precipitation = False
# element_mass_balance = ['Na', 'Ca']
sys_eq = create_equilibrium(
feed_compounds,
# element_mass_balance,
allow_precipitation=allow_precipitation,
fixed_elements=fixed_elements,
)
args = (np.array([cNap, cHCO3m, cCapp, cClm]), TK, carbone_total)
# x_guess = np.full(nahco3_eq.idx.size, -1.0)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
def test_engine_mix_solve_1_and_5_closed():
EXPECTED = {
"sc": (1272.0, -1),
"I": (15.885e-3, 1.0),
"pH": (7.92, 1.0),
}
TK = 25.0 + 273.15
cNaHCO3 = 1e-3
cCaCl2 = 5e-3
carbone_total = cNaHCO3 # 1.34e+01 * 1e-3
args = (
np.array([cNaHCO3, cCaCl2]),
TK,
carbone_total,
) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
# closing_equation_type = ClosingEquationType.CARBONE_TOTAL
sys_eq = create_equilibrium(
feed_compounds, initial_feed_mass_balance=initial_feed_mass_balance
)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
def test_two_irreversible_cacl2_bacl2():
EXPECTED = {
"sc": (3417.7, -1),
"I": (43.02e-3, 3.0),
"pH": (7.91, 1.0),
}
feed_compounds = ["NaHCO3", "CaCl2", "BaCl2"]
initial_feed_mass_balance = ["Cl-"]
# closing_eq = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds, initial_feed_mass_balance=initial_feed_mass_balance
)
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 5e-3
cBaCl2 = 5e-3
carbone_total = cNaHCO3
args = (np.array([cNaHCO3, cCaCl2, cBaCl2]), TK, carbone_total)
solution = solve_equilibrium(sys_eq, args=args)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
def test_engine_mix_solve_1_and_5_closed_precipitation():
EXPECTED = {
"sc": (6977.5, -1),
"I": (99.779e-3, 1.0),
"pH": (8.85, 1.0),
"SI": {
"Calcite": (0.00, 1.0),
"Aragonite": (-0.14, 1.0),
"Vaterite": (-0.57, 1.0),
},
"sat-conc": {
"Calcite": (80.0e-3, 1.0),
"Aragonite": (0.0, -1.0),
"Vaterite": (0.0, -1.0),
},
}
TK = 25.0 + 273.15
cCaCO3 = 80e-3
cCaCl2 = 33.25e-3
carbone_total = cCaCO3 # 1.34e+01 * 1e-3
args = (
np.array([cCaCO3, cCaCl2]),
TK,
carbone_total,
) # Instead of pCO2->DIC
feed_compounds = ["CaCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
allow_precipitation = True
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
initial_feed_mass_balance=initial_feed_mass_balance,
)
# This was with an old API, the create_equilibrium is created without solid reaction and
# only in the solution function the code is adjust to treat the precipitation
# x_guess = np.full(sys_eq.idx_control.idx['size'], -1e-1)
solution = solve_equilibrium(
sys_eq,
args=args,
# jac=nahco3_residual_jacobian
allow_precipitation=allow_precipitation,
)
pyequion.print_solution(solution)
assert_solution_result(solution, EXPECTED)
for tag in EXPECTED["SI"]:
assert compare_with_expected_perc_tuple(
solution.saturation_index[tag], EXPECTED["SI"][tag]
)
assert compare_with_expected_perc_tuple(
solution.preciptation_conc[tag], EXPECTED["sat-conc"][tag]
)
def test_engine_activity_external_ideal():
EXPECTED = {
"pH": (8.2806817914915, 10.0), # IDEAL
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 0.02e-3
carbone_total = cNaHCO3
c_feed = np.array([cNaHCO3, cCaCl2])
args = (c_feed, TK, carbone_total) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
element_mass_balance = ["Na", "Ca"]
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance,
initial_feed_mass_balance,
)
# setup_log_gamma(sys_eq, TK, c_feed)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
setup_log_gamma_func=pyequion.setup_log_gamma_ideal,
calc_log_gamma=pyequion.calc_log_gamma_ideal,
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert np.isclose(EXPECTED["pH"][0], solution.pH)
def test_engine_activity_external_debye_huckel():
EXPECTED = {
"pH": (8.2, 1.0),
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 0.02e-3
carbone_total = cNaHCO3
c_feed = np.array([cNaHCO3, cCaCl2])
args = (c_feed, TK, carbone_total) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
element_mass_balance = ["Na", "Ca"]
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance,
initial_feed_mass_balance,
)
# setup_log_gamma(sys_eq, TK, c_feed)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert np.isclose(EXPECTED["pH"][0], solution.pH, 1e-2)
def test_engine_activity_external_debye_huckel_mean_coef_for_neutral():
EXPECTED = {
"pH": (8.2, 1.0),
}
TK = 25.0 + 273.15
cNaHCO3 = 15e-3
cCaCl2 = 0.02e-3
carbone_total = cNaHCO3
c_feed = np.array([cNaHCO3, cCaCl2])
args = (c_feed, TK, carbone_total) # Instead of pCO2->DIC
feed_compounds = ["NaHCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
element_mass_balance = ["Na", "Ca"]
closing_equation_type = ClosingEquationType.CARBON_TOTAL
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
element_mass_balance,
initial_feed_mass_balance,
)
# setup_log_gamma(sys_eq, TK, c_feed)
x_guess = np.full(sys_eq.idx_control.idx["size"], -1e-4)
solution = solve_equilibrium(
sys_eq,
x_guess=x_guess,
args=args,
# setup_log_gamma_func=pyequion.act.setup_log_gamma_bdot_mean_activity_neutral,
# calc_log_gamma=pyequion.act.calc_log_gamma_dh_bdot_mean_activity_neutral
activity_model_type=pyequion.TypeActivityCalculation.DEBYE_MEAN
# jac=nahco3_residual_jacobian
)
pyequion.print_solution(solution)
assert np.isclose(EXPECTED["pH"][0], solution.pH, 1e-2)
def test_engine_caco3_cacl2_pitzer():
TK = 25.0 + 273.15
cNaHCO3 = 80e-3
cCaCl2 = 33.25e-3
args = (np.array([cNaHCO3, cCaCl2]), TK, pyequion.pCO2_ref)
feed_compounds = ["CaCO3", "CaCl2"]
initial_feed_mass_balance = ["Cl-"]
closing_equation_type = ClosingEquationType.OPEN
sys_eq = create_equilibrium(
feed_compounds,
closing_equation_type,
initial_feed_mass_balance=initial_feed_mass_balance,
)
x_guess =
|
np.full(sys_eq.idx_control.idx["size"], -1e-2)
|
numpy.full
|
# Copyright (c) 2020-2021 impersonator.org authors (<NAME> and <NAME>). All rights reserved.
import cv2
import numpy as np
import torchvision
from typing import Union, List
import torch
from scipy.spatial.transform import Rotation as R
def compute_scaled_size(origin_size, control_size):
"""
Args:
origin_size (tuple or List): (h, w) or [h, w]
control_size (int or float): the final size of the min(h, w)
Returns:
scaled_size (tuple or List): (h', w')
"""
scale_rate = np.sqrt(control_size * control_size / (origin_size[0] * origin_size[1]))
scaled_size = (int(origin_size[0] * scale_rate), int(origin_size[1] * scale_rate))
return scaled_size
def read_cv2_img(path):
"""
Read color images
Args:
path (str): Path to image
Returns:
img (np.ndarray): color images with RGB channel, and its shape is (H, W, 3).
"""
img = cv2.imread(path, -1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def load_images(images_paths: Union[str, List[str]], image_size):
"""
Args:
images_paths (Union[str, List[str]]):
Returns:
images (np.ndarray): shape is (ns, 3, H, W), channel is RGB, and color space is [-1, 1].
"""
if isinstance(images_paths, str):
images_paths = [images_paths]
images = []
for image_path in images_paths:
image = read_cv2_img(image_path)
image = normalize_img(image, image_size=image_size, transpose=True)
images.append(image)
images = np.stack(images, axis=0) # (ns, 3, H, W)
return images
def read_mask(path, image_size):
"""
Read mask
Args:
path (str): Path to mask
Returns:
mask (np.ndarray): mask image with grayscale, and its shape is (1, H, W) in the range of [0, 1]
"""
mask = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (image_size, image_size))
mask = mask.astype(np.float32) / 255
mask = np.expand_dims(mask, 0)
return mask
def load_parse(parse_path, image_size):
mask = cv2.imread(parse_path, cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (image_size, image_size))
mask = mask.astype(np.float32) / 255
mask = np.expand_dims(mask, 0)
return mask
def load_img_parse(img_path, parse_path, image_size):
image = transform_img(read_cv2_img(img_path), transpose=True)
mask = load_parse(parse_path, image_size)
return image, mask
def save_cv2_img(img, path, image_size=None, normalize=False, transpose=True):
if transpose:
img = np.transpose(img, (1, 2, 0))
if normalize:
img = (img + 1) / 2.0 * 255
img = img.astype(np.uint8)
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if image_size is not None:
img = cv2.resize(img, (image_size, image_size))
cv2.imwrite(path, img)
return img
def transform_img(image, image_size=None, transpose=False):
if image_size is not None and image_size != image.shape[0]:
image = cv2.resize(image, (image_size, image_size))
image = image.astype(np.float32)
image /= 255.0
if transpose:
image = image.transpose((2, 0, 1))
return image
def normalize_img(image, image_size=None, transpose=False):
image = transform_img(image, image_size, transpose)
image *= 2
image -= 1
return image
def resize_img(img, scale_factor):
new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [
new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
]
return new_img, actual_factor
def tensor2im(img, imtype=np.uint8, unnormalize=True, idx=0, nrows=None):
# select a sample or create grid if img is a batch
if len(img.shape) == 4:
nrows = nrows if nrows is not None else int(np.sqrt(img.size(0)))
img = img[idx] if idx >= 0 else torchvision.utils.make_grid(img, nrows)
img = img.cpu().float()
if unnormalize:
img += 1.0
img /= 2.0
image_numpy = img.numpy()
# image_numpy = np.transpose(image_numpy, (1, 2, 0))
image_numpy *= 255.0
return image_numpy.astype(imtype)
def kp_to_bbox_param(kp, vis_thresh=0, diag_len=150.0):
"""
Finds the bounding box parameters from the 2D keypoints.
Args:
kp (Kx3): 2D Keypoints.
vis_thresh (float): Threshold for visibility.
diag_len(float): diagonal length of bbox of each person
Returns:
[center_x, center_y, scale]
"""
if kp is None:
return
if kp.shape[1] == 3:
vis = kp[:, 2] > vis_thresh
if not np.any(vis):
return
min_pt = np.min(kp[vis, :2], axis=0)
max_pt = np.max(kp[vis, :2], axis=0)
else:
min_pt = np.min(kp, axis=0)
max_pt = np.max(kp, axis=0)
person_height = np.linalg.norm(max_pt - min_pt)
if person_height < 0.5:
return
center = (min_pt + max_pt) / 2.
scale = diag_len / person_height
return np.append(center, scale)
def process_hmr_img(im_path, bbox_param, rescale=None, image=None, image_size=256, proc=False):
"""
Args:
im_path (str): the path of image.
image (np.ndarray or None): if it is None, then loading the im_path, else use image.
bbox_param (3,) : [cx, cy, scale].
rescale (float, np.ndarray or None): rescale factor.
proc (bool): the flag to return processed image or not.
image_size (int):
Returns:
proc_img (np.ndarray): if proc is True, return the process image, else return the original image.
"""
if image is None:
image = cv2.imread(im_path)
orig_h, orig_w = image.shape[0:2]
center = bbox_param[:2]
scale = bbox_param[2]
if rescale is not None:
scale = rescale
if proc:
image_scaled, scale_factors = resize_img(image, scale)
resized_h, resized_w = image_scaled.shape[:2]
else:
scale_factors = [scale, scale]
resized_h = orig_h * scale
resized_w = orig_w * scale
center_scaled = np.round(center * scale_factors).astype(np.int)
if proc:
# Make sure there is enough space to crop image_size x image_size.
image_padded = np.pad(
array=image_scaled,
pad_width=((image_size,), (image_size,), (0,)),
mode='edge'
)
padded_h, padded_w = image_padded.shape[0:2]
else:
padded_h = resized_h + image_size * 2
padded_w = resized_w + image_size * 2
center_scaled += image_size
# Crop image_size x image_size around the center.
margin = image_size // 2
start_pt = (center_scaled - margin).astype(int)
end_pt = (center_scaled + margin).astype(int)
end_pt[0] = min(end_pt[0], padded_w)
end_pt[1] = min(end_pt[1], padded_h)
if proc:
proc_img = image_padded[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
# height, width = image_scaled.shape[:2]
else:
# height, width = end_pt[1] - start_pt[1], end_pt[0] - start_pt[0]
proc_img = cv2.resize(image, (image_size, image_size))
# proc_img = None
center_scaled -= start_pt
return {
# return original too with info.
'image': proc_img,
'im_path': im_path,
'im_shape': (orig_h, orig_w),
'center': center_scaled,
'scale': scale,
'start_pt': start_pt,
}
def cam_denormalize(cam, N):
# This is camera in crop image coord.
new_cam = np.hstack([N * cam[0] * 0.5, cam[1:] + (2. / cam[0]) * 0.5])
return new_cam
def cam_init2orig(cam, scale, start_pt, N=224):
"""
Args:
cam (3,): (s, tx, ty)
scale (float): scale = resize_h / orig_h
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
Returns:
cam_orig (3,): (s, tx, ty), camera in original image coordinates.
"""
# This is camera in crop image coord.
cam_crop = np.hstack([N * cam[0] * 0.5, cam[1:] + (2. / cam[0]) * 0.5])
# This is camera in orig image coord
cam_orig = np.hstack([
cam_crop[0] / scale,
cam_crop[1:] + (start_pt - N) / cam_crop[0]
])
# print('cam crop', cam_crop)
# print('cam orig', cam_orig)
return cam_orig
def cam_orig2crop_center(cam, scale, start_pt, N=256, normalize=True):
"""
Args:
cam (3,): (s, tx, ty), camera in orginal image coordinates.
scale (float): scale = resize_h / orig_h or (resize_w / orig_w)
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
normalize (bool)
Returns:
"""
cam_recrop = np.hstack([
cam[0] * scale,
cam[1:] + (N - start_pt) / (scale * cam[0])
])
# print('cam re-crop', cam_recrop)
if normalize:
cam_norm = np.hstack([
cam_recrop[0] * (2. / N),
cam_recrop[1:] - N / (2 * cam_recrop[0])
])
# print('cam norm', cam_norml)
else:
cam_norm = cam_recrop
return cam_norm
def cam_orig2boxcrop(cam, scale, start_pt, N=256, normalize=True):
"""
Args:
cam (3,): (s, tx, ty), camera in orginal image coordinates.
scale (float): scale = resize_h / orig_h or (resize_w / orig_w)
start_pt (2,): (lt_x, lt_y)
N (int): hmr_image_size (224) or IMG_SIZE
normalize (bool)
Returns:
"""
cam_recrop = np.hstack([
cam[0] * scale,
cam[1:] - start_pt / cam[0]
])
# print('cam re-crop', cam_recrop)
if normalize:
cam_norm = np.hstack([
cam_recrop[0] * 2. / N,
cam_recrop[1:] - N / (2 * cam_recrop[0])
])
# print('cam norm', cam_norm)
else:
cam_norm = cam_recrop
return cam_norm
def cam_process(cam_init, scale_150, start_pt_150, scale_proc, start_pt_proc, HMR_IMG_SIZE=224, IMG_SIZE=256):
"""
Args:
cam_init:
scale_150:
start_pt_150:
scale_proc:
start_pt_proc:
HMR_IMG_SIZE:
IMG_SIZE:
Returns:
"""
# print(HMR_IMG_SIZE, IMG_SIZE)
cam_orig = cam_init2orig(cam_init, scale=scale_150, start_pt=start_pt_150, N=HMR_IMG_SIZE)
cam_crop = cam_orig2crop_center(cam_orig, scale=scale_proc, start_pt=start_pt_proc, N=IMG_SIZE, normalize=True)
return cam_orig, cam_crop
def intrinsic_mtx(f, c):
"""
Obtain intrisic camera matrix.
Args:
f: np.array, 1 x 2, the focus lenth of camera, (fx, fy)
c: np.array, 1 x 2, the center of camera, (px, py)
Returns:
- cam_mat: np.array, 3 x 3, the intrisic camera matrix.
"""
return np.array([[f[1], 0, c[1]],
[0, f[0], c[0]],
[0, 0, 1]], dtype=np.float32)
def extrinsic_mtx(rt, t):
"""
Obtain extrinsic matrix of camera.
Args:
rt: np.array, 1 x 3, the angle of rotations.
t: np.array, 1 x 3, the translation of camera center.
Returns:
- ext_mat: np.array, 3 x 4, the extrinsic matrix of camera.
"""
# R is (3, 3)
R = cv2.Rodrigues(rt)[0]
t = np.reshape(t, newshape=(3, 1))
Rc = np.dot(R, t)
ext_mat =
|
np.hstack((R, -Rc))
|
numpy.hstack
|
# from imports import *
import threading
import pandas as pd
# from plyfile import PlyData
from sklearn.decomposition import PCA
from multiprocessing import Process
import random
import os
from time import time
import numpy as np
import ezdxf
import open3d as o3d
from math import sqrt, floor
import laspy
from shapely.geometry.collection import GeometryCollection
from shapely.geometry.polygon import Polygon
from shapely.geometry import Point, MultiPoint, LineString
from shapely import ops
from shapely.geometry.multilinestring import MultiLineString
def SaveRenderOptions(vis):
print("Saving camera parameters")
params = vis.get_view_control().convert_to_pinhole_camera_parameters()
o3d.io.write_pinhole_camera_parameters("./data/camera.json", params)
return False
def MaskTrajectoryFile(vis):
trajectory_file = "./data/camera_trajectory.json"
mask_trajectory_file = "./data/camera_trajectory.json.mask"
if(os.path.exists(trajectory_file)):
os.rename(trajectory_file, mask_trajectory_file)
elif(os.path.exists(mask_trajectory_file)):
os.rename(mask_trajectory_file, trajectory_file)
return False
class TrajectoryRecorder():
def __init__(self):
self.trajectory = []
self.trajectory_file = "./data/camera_trajectory.json"
if(os.path.exists(self.trajectory_file)):
os.remove(self.trajectory_file)
def record(self, vis):
params = vis.get_view_control().convert_to_pinhole_camera_parameters()
self.trajectory.append(params)
def save(self, vis):
trajectory = o3d.camera.PinholeCameraTrajectory()
trajectory.parameters = self.trajectory
o3d.io.write_pinhole_camera_trajectory(self.trajectory_file, trajectory)
def delete(self, vis):
self.trajectory = []
if(os.path.exists(self.trajectory_file)):
os.remove(self.trajectory_file)
def AppendCameraTrajectory(vis):
print("Append camera trajectory.")
params = vis.get_view_control().convert_to_pinhole_camera_parameters()
trajectory = o3d.camera.PinholeCameraTrajectory()
if(os.path.exists("./data/camera_trajectory.json")):
trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json")
trajectory.parameters = trajectory.parameters + [params]
o3d.io.write_pinhole_camera_trajectory("./data/camera_trajectory.json", trajectory)
return False
def LoadRenderOptions(vis, returnVis = False):
# time.sleep(1) # sleep 1 second
paramsFile = "./data/camera.json"
if(not os.path.exists(paramsFile)):
return False
print("Loading camera parameters")
params = o3d.io.read_pinhole_camera_parameters(paramsFile)
vis.get_view_control().convert_from_pinhole_camera_parameters(params)
if(returnVis):
return vis
else:
return False
def AnimationCallBack(vis):
ctr = vis.get_view_control()
ctr.rotate(0.2, 0.0)
# ctr.scale(1/80)
return False
class PlayTrajectory():
def __init__(self):
assert(os.path.exists("./data/camera_trajectory.json"))
self.trajectory = o3d.io.read_pinhole_camera_trajectory("./data/camera_trajectory.json").parameters
self.i = 0
self.time = time()
def StepTrajectory(self, vis):
if(self.i < len(self.trajectory)): # and time() - self.time > 1):
ctr = vis.get_view_control()
ctr.convert_from_pinhole_camera_parameters(self.trajectory[self.i])
self.time = time()
self.i += 1
class DataTool:
def __init__(self, piece_size = 1000000, threads_allowed = 1000, pointSize = 5):
self.piece_size = piece_size
self.threads_allowed = threads_allowed
self.vis = None
self.bBox = None
self.pointCloud = None
self.displayCloud = None
self.pointSize = pointSize
def ReadPointCloudTxt(self, path, pcData):
t = time()
pcFile = open(path, 'r')
self.pointCloud = None
threads = [None] * self.threads_allowed
points_read = 0
thread_index = 0
while True:
if(threads[thread_index] is not None and threads[thread_index].isAlive()):
print("Wait for thread {}".format(thread_index), end=" \r")
threads[thread_index].join()
chunk = pcFile.readlines(self.piece_size)
if(len(chunk) < 1):
break
if(pcData.shape[0] <= points_read + len(chunk)):
if(pcData.shape[0] == 0):
pcData.resize((points_read + len(chunk))*2, axis=0)
else:
pcData.resize(pcData.shape[0]*2, axis=0)
# if(type(self.pointCloud) is np.ndarray):
# self.pointCloud = np.append(self.pointCloud, np.zeros(shape=(len(chunk), 7), dtype="float32"), axis=0)
# else:
# self.pointCloud = np.zeros(shape=(len(chunk), 7), dtype="float32")
threads[thread_index] = threading.Thread(target= self.__ReadPCChunkTxt, args=(chunk, points_read, pcData))
threads[thread_index].start()
points_read += len(chunk)
thread_index += 1
if(thread_index >= self.threads_allowed):
thread_index = 0
print("{0} points read".format(points_read), end='\r')
for i in range(self.threads_allowed):
if(threads[i] is not None):
print("Join thread {}".format(i), end=" \r")
threads[i].join()
pcData.resize(points_read, axis=0)
pcFile.close()
print("PC Finished reading {} points in {:.2f} min".format(pcData.shape[0], (time() - t)/60))
return self.pointCloud
def __ReadPCChunkTxt(self, chunk, start_index, pcData):
for i in range(len(chunk)):
if(chunk[i] != ""):
flts = chunk[i].replace('\n','').split()
# self.pointCloud[start_index + i] = np.array([float(flts[0]), float(flts[1]), float(flts[2]), float(flts[3]),
# float(flts[4]), float(flts[5]), float(flts[6])])
pcData[start_index + i] = np.array([float(flts[0]), float(flts[1]), float(flts[2]), float(flts[3]),
float(flts[4]), float(flts[5]), float(flts[6])])
del chunk
def ReadPointLabelsTxt(self, path):
t = time()
labelsFile = open(path, 'r')
labelsArr = labelsFile.read().split('\n')
if(labelsArr[-1] == ''):
del labelsArr[-1]
self.labels = np.array(labelsArr, dtype='int')
print("Finished reading {} labels in {:.2f} min".format(self.labels.shape[0], (time() - t)/60))
return self.labels
def ConvertToBin(self, path_to_pointcloud, path_to_pointlabels, output_path, extension = ".hdf5"):
if(os.path.isfile(output_path+extension)):
return
else:
print("Converting: ",output_path)
t = time()
pointcloud = np.array(pd.read_csv(path_to_pointcloud, sep=" ", dtype=np.float32, header=None), dtype=np.float32)
h5File = None
if(extension == ".hdf5"):
h5File = h5py.File(output_path+".hdf5", 'w')
h5File.create_dataset("pointcloud", data=pointcloud, dtype='float32', compression="lzf")
del pointcloud
if(path_to_pointlabels):
labels = np.array(pd.read_csv(path_to_pointlabels, dtype=np.int8, header=None))
if(extension == ".hdf5"):
h5File.create_dataset("labels", data=labels, dtype='int8', compression="lzf")
elif(extension == ".npy"):
pointcloud = np.concatenate((pointcloud, labels.astype(np.float32)), 1)
del labels
print("Done reading")
if(extension == ".hdf5"):
h5File.close()
elif(extension == ".npy"):
np.save(output_path, pointcloud, allow_pickle=False)
print("done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
def ConvertDatasets(self, folder, outputFolder):
pcFiles = [f for f in listdir(folder) if isfile(join(folder, f)) and f.endswith('.txt')]
os.makedirs(outputFolder, exist_ok=True)
for file in pcFiles:
name = file.replace('.txt', '')
if(not isfile(join(folder, name+'.labels'))):
self.ConvertToBin(join(folder, name+'.txt'), None, join(outputFolder, name))
else:
self.ConvertToBin(join(folder, name+'.txt'), join(folder, name+'.labels'), join(outputFolder, name))
def createWindow(self, windowName = "Pointcloud"):
# vis = o3d.visualization.Visualizer()
self.vis = o3d.visualization.VisualizerWithKeyCallback()
self.vis.create_window(windowName, 800, 800)
opt = self.vis.get_render_option()
# opt.line_width = 100
opt.point_size = self.pointSize
# opt.background_color = np.asarray([0, 0, 0])
def addPointCloud(self, pointCloud, downSample = False, color = None):
pc = o3d.geometry.PointCloud()
pc.points = o3d.utility.Vector3dVector(pointCloud)
if(color != None):
pc.paint_uniform_color(np.asarray(color))
if(downSample):
pc = o3d.geometry.voxel_down_sample(pc, voxel_size=0.02)
self.vis.add_geometry(pc)
def setPointCloud(self, pointCloud, downSample = False, color = None):
pc = o3d.geometry.PointCloud()
pc.points = o3d.utility.Vector3dVector(pointCloud)
if(downSample):
pc = o3d.geometry.voxel_down_sample(pc, voxel_size=0.02)
if(self.pointCloud == None):
self.pointCloud = pc
else:
self.pointCloud.points = pc.points
if(color != None):
self.pointCloud.paint_uniform_color(np.asarray(color))
def addBoundingBox(self, bBox, color = []):
self.addBbox(self.vis, bBox, color)
def addPolyline(self, points, color = []):
self.addLine(self.vis, points, color)
@staticmethod
def addLine(vis, points, color = []):
lines = []
for i in range(len(points)-1):
lines.append([i, i+1])
colors = [color for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np.array(points))
line_set.lines = o3d.utility.Vector2iVector(np.array(lines))
line_set.colors = o3d.utility.Vector3dVector(np.array(colors))
vis.add_geometry(line_set)
@staticmethod
def addBbox(vis, bBox, color = []):
lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]]
box = [[bBox[0], bBox[2], bBox[4]],
[bBox[1], bBox[2], bBox[4]],
[bBox[0], bBox[3], bBox[4]],
[bBox[1], bBox[3], bBox[4]],
[bBox[0], bBox[2], bBox[5]],
[bBox[1], bBox[2], bBox[5]],
[bBox[0], bBox[3], bBox[5]],
[bBox[1], bBox[3], bBox[5]]]
if(len(color) == 0):
colors = [[1,0,0] for _ in range(len(lines))]
else:
colors = [color for _ in range(len(lines))]
line_set = o3d.geometry.LineSet()
line_set.points = o3d.utility.Vector3dVector(np.array(box))
line_set.lines = o3d.utility.Vector2iVector(np.array(lines))
line_set.colors = o3d.utility.Vector3dVector(np.array(colors))
vis.add_geometry(line_set)
def setBoundingBox(self, bBox, color = None):
box = [[bBox[0], bBox[2], bBox[4]],
[bBox[1], bBox[2], bBox[4]],
[bBox[0], bBox[3], bBox[4]],
[bBox[1], bBox[3], bBox[4]],
[bBox[0], bBox[2], bBox[5]],
[bBox[1], bBox[2], bBox[5]],
[bBox[0], bBox[3], bBox[5]],
[bBox[1], bBox[3], bBox[5]]]
if(color == None):
colors = [[1,0,0] for _ in range(12)] #len(lines)
else:
colors = [color for _ in range(12)]
if(self.bBox == None):
lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7],[0, 4], [1, 5], [2, 6], [3, 7]]
line_set = o3d.geometry.LineSet()
line_set.lines = o3d.utility.Vector2iVector(np.array(lines))
line_set.points = o3d.utility.Vector3dVector(np.array(box))
line_set.colors = o3d.utility.Vector3dVector(np.array(colors))
self.bBox = line_set
else:
self.bBox.points = o3d.utility.Vector3dVector(np.array(box))
self.bBox.colors = o3d.utility.Vector3dVector(np.array(colors))
def setDisplayedCloud(self, bBox):
if(self.pointCloud == None or bBox == None):
return
points = np.asarray(self.pointCloud.points)
rows = np.where((points[:,0] >= bBox[0]) &
(points[:,0] <= bBox[1]) &
(points[:,1] >= bBox[2]) &
(points[:,1] <= bBox[3]) &
(points[:,2] >= bBox[4]) &
(points[:,2] <= bBox[5]) )
if(self.displayCloud == None):
self.displayCloud = o3d.geometry.PointCloud()
self.displayCloud.points = o3d.utility.Vector3dVector(points[rows])
def VisualizePointCloudAsync(self, dataset = [], dataColors = None, downSample = False, deleteZeros = False, bBoxes = None, lines = None, boxesColors = [], linesColors = [], windowName = None, animationFunction = None, loadCameraSettings = False, recordTrajectory = False):
p = Process(target=self.VisualizePointCloud, args=(dataset, dataColors, downSample, deleteZeros, bBoxes, lines, boxesColors, linesColors, windowName, animationFunction, loadCameraSettings, recordTrajectory))
p.start()
def VisualizePointCloud(self, dataset, dataColors = None, downSample = False, deleteZeros = False, bBoxes = None, lines = None, boxesColors = [], linesColors = [], windowName = None, animationFunction = None, loadCameraSettings = False, recordTrajectory = False):
# if(len(dataset) == 0):
# return
if(windowName is None):
pointCount = sum(0 if (data is None) else len(data) for data in dataset)
windowName = f"Point count: {pointCount}"
self.createWindow(windowName=windowName)
for i in range(len(dataset)):
if(dataset[i] is None):
continue
if(len(dataset[i]) == 0):
continue
dataset[i] = np.array(dataset[i])
if (deleteZeros):
if(len(dataset[i][0]) == 3):
indexes = np.where((dataset[i][:, 0] == 0.0) & (dataset[i][:, 1] == 0.0) & (dataset[i][:, 2] == 0.0))
else:
indexes = np.where((dataset[i][:, 0] == 0.0) & (dataset[i][:, 1] == 0.0) & (dataset[i][:, 2] == 0.0) & (dataset[i][:, 3] == 0.0))
dataset[i] = np.delete(dataset[i], indexes, axis=0)
print("Adding dataset {}/{} to visualization ".format(i+1, len(dataset)), end = '\r')
pc = o3d.geometry.PointCloud()
if(len(dataset[i][0]) == 3):
pc.points = o3d.utility.Vector3dVector(dataset[i])
else:
pc.points = o3d.utility.Vector3dVector(dataset[i][:,:3])
if(not (dataColors is None)):
if(not (dataColors[i] is None)):
if(len(dataColors[i]) == len(dataset[i]) and len(dataset[i]) != 3):
pc.colors = o3d.utility.Vector3dVector(np.asarray(dataColors[i]))
elif(len(dataColors) == len(dataset)):
pc.paint_uniform_color(np.asarray(dataColors[i]))
if(not (downSample is None) and downSample != False):
if(not isinstance(downSample, float)):
downSample = 0.02
pc = o3d.geometry.PointCloud.voxel_down_sample(pc, voxel_size=downSample)
self.vis.add_geometry(pc)
print("")
if(bBoxes is not None):
print("Adding {} bBoxes to visualization".format(len(bBoxes)), end = '\r')
for i in range(len(bBoxes)):
# print("Adding bBox {}/{} to visualization".format(i+1, len(bBoxes)), end = '\r')
color = []
if(len(boxesColors) > i and boxesColors[i] is not None):
color = boxesColors[i]
self.addBoundingBox(bBoxes[i], color)
if(not lines is None):
opt = self.vis.get_render_option()
opt.point_size = 2
for i in range(len(lines)):
print("Adding {}/{} line to visualization".format(i, len(lines)), end = '\r')
color = [1.0, 0.0, 0.0] if linesColors is None or len(linesColors)-1 < i else linesColors[i]
self.addPolyline(lines[i], color)
self.vis.register_key_callback(ord("s"), SaveRenderOptions)
self.vis.register_key_callback(ord("S"), SaveRenderOptions)
self.vis.register_key_callback(ord("l"), LoadRenderOptions)
self.vis.register_key_callback(ord("L"), LoadRenderOptions)
self.vis.register_key_callback(ord("m"), MaskTrajectoryFile)
self.vis.register_key_callback(ord("M"), MaskTrajectoryFile)
if recordTrajectory:
recorder = TrajectoryRecorder()
self.vis.register_key_callback(ord("a"), recorder.record)
self.vis.register_key_callback(ord("A"), recorder.record)
self.vis.register_key_callback(ord("r"), recorder.save)
self.vis.register_key_callback(ord("R"), recorder.save)
self.vis.register_key_callback(ord("d"), recorder.delete)
self.vis.register_key_callback(ord("D"), recorder.delete)
# paramFiles = "./data/camera.json"
# if(os.path.exists(paramFiles)):
# os.remove(paramFiles)
if not (animationFunction is None):
self.vis.register_animation_callback(animationFunction)
if(loadCameraSettings):
self.vis = LoadRenderOptions(self.vis, returnVis=True)
self.vis.run()
self.vis.destroy_window()
def DoBoxesQA(self, pointcloud = None, bBoxes = None, downSamplePC = False):
if(len(pointcloud) == 0 and len(self.pointCloud) == 0):
return
elif(len(pointcloud) != 0):
self.setPointCloud(pointcloud, downSamplePC)
acceptedBoxes = []
def darkMode(vis):
opt = vis.get_render_option()
opt.background_color = np.asarray([0, 0, 0])
return False
def acceptBox(vis):
print("Accept")
acceptedBoxes.append(bBoxes[self.boxIndex])
vis.close()
return False
def discardBox(vis):
print("Discard")
vis.close()
return False
key_to_callback = {}
key_to_callback[ord("Y")] = acceptBox
key_to_callback[ord("N")] = discardBox
key_to_callback[ord("D")] = darkMode
self.boxIndex = 0
for box in bBoxes:
self.setDisplayedCloud(box)
self.setBoundingBox(box)
o3d.visualization.draw_geometries_with_key_callbacks([self.displayCloud, self.bBox], key_to_callback, "QA", 800, 800)
self.boxIndex += 1
print("QA done")
return acceptedBoxes
def QAResults(self, dataFolder, boxesFolder, boxesExportFolder, override = True):
pcFiles = [f for f in listdir(boxesFolder) if isfile(join(boxesFolder, f)) and f.endswith('.txt')]
for file in pcFiles:
name = file.replace('.txt', '').replace('BBOXES_', '')
boxesFile = join(boxesFolder, 'BBOXES_'+name+'.txt')
dataFile = join(dataFolder, name+'.hdf5')
newBoxPath = join(boxesExportFolder, 'BBOXES_'+name+'.txt')
if(isfile(dataFile)):
if(override == False and isfile(newBoxPath)):
print("Already done: "+dataFile)
continue
print("QA: "+dataFile)
pc = self.ReadHDF5XYZ(dataFile)
boxes = ReadBoundingBoxes(boxesFile)
newBoxes = self.DoBoxesQA(pc, boxes, True)
SaveBoundingBoxes(newBoxPath, newBoxes)
def SaveHDF5(pointcloud, labels, output_path):
if(not output_path.endswith(".hdf5")):
output_path += ".hdf5"
print("Converting: ",output_path)
t = time()
h5File = h5py.File(output_path, 'w')
h5File.create_dataset("pointcloud", data=pointcloud, dtype='float32', compression="lzf")
h5File.create_dataset("labels", data=labels, dtype='int8', compression="lzf")
h5File.close()
print("done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
def ReadHDF5(path, with_labels = True):
print("Reading '{}'".format(path))
t=time()
h5File = h5py.File(path, 'r')
pointCloud = np.array(h5File["pointcloud"], dtype="float32")
if(with_labels):
labels = np.array(h5File["labels"], dtype="float32")
labels = np.expand_dims(labels, 1)
pointCloud = np.append(pointCloud, labels, axis=1)
del labels
print("Finished reading in {:.2f} min. Shape = {}".format((time() - t)/60, pointCloud.shape))
return pointCloud
def ReadXYZ(file, dataName = "pointcloud", verbose = False, readFormat=None):
if(verbose):
print("Reading pointcloud of '{}'".format(path))
t=time()
xyz = None
if(file.endswith(".hdf5")):
h5File = h5py.File(file, 'r')
pc = h5File["pointcloud"]
xyz = pc[:, :3]
h5File.close()
if(file.endswith(".npy") or readFormat == ".npy"):
pc = np.load(file)
xyz = pc[:, :3]
elif(file.endswith(".las")):
import laspy
lasFile = laspy.file.File(file, mode = "r")
xyz = np.concatenate((np.expand_dims(lasFile.x,1), np.expand_dims(lasFile.y,1), np.expand_dims(lasFile.z,1)), 1)
# xyz *= np.array(lasFile.header.scale)
lasFile.close()
elif(file.endswith(".ply")):
plydata = PlyData.read(file)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
xyz = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1)), axis=1)
if(verbose):
print("Finished reading pointcloud in {:.2f} min. Shape = {}".format((time() - t)/60, xyz.shape))
return xyz
def ReadRGB(file, dataName = "pointcloud", verbose = False):
t=time()
rgb = None
if(file.endswith(".hdf5")):
h5File = h5py.File(file, 'r')
pc = h5File["pointcloud"]
rgb = pc[:, 4:7]
h5File.close()
if(file.endswith(".npy")):
pts = np.load(file)
rgb = pts[:, 3:6]
elif(file.endswith(".las")):
import laspy
lasFile = laspy.file.File(file, mode = "r")
rgb = np.concatenate((np.expand_dims(lasFile.Red,1), np.expand_dims(lasFile.Green,1), np.expand_dims(lasFile.Blue,1)), 1)
rgb = rgb/65536 #[0,1]
lasFile.close()
print("Finished reading RGB values in {:.2f} min. Shape = {}".format((time() - t)/60, rgb.shape))
return rgb
def PointsInBlock(pts, pt, blocksize):
if(not isinstance(blocksize,list) and not isinstance(blocksize, np.ndarray)):
blocksize = [blocksize, blocksize]
mask_x = np.logical_and(pts[:,0]<pt[0]+blocksize[0]/2, pts[:,0]>pt[0]-blocksize[0]/2)
mask_y = np.logical_and(pts[:,1]<pt[1]+blocksize[1]/2, pts[:,1]>pt[1]-blocksize[1]/2)
pts = pts[np.where(mask_x & mask_y)[0]]
if(len(blocksize) == 3):
mask_z = np.logical_and(pts[:,2]<pt[2]+blocksize[2]/2, pts[:,2]>pt[2]-blocksize[2]/2)
return pts[np.where(mask_z)[0]]
else:
return pts
def PointsInRange(pts, pt, pointcloud_range, filter_height = True):
"""
pointcloud_range = [minX, mixY, minZ, maxX, maxY, maxZ]
X and Z are relative to pt
Z values are just clipped
minX, mixY, minZ - negative values
"""
if(len(pointcloud_range) == 6):
minX, minY, minZ, maxX, maxY, maxZ = pointcloud_range
else:
range_X, range_Y, range_Z, = pointcloud_range
minX = -range_X/2
minY = -range_Y/2
minZ = -range_Z/2
maxX = range_X/2
maxY = range_Y/2
maxZ = range_Z/2
mask_x = np.logical_and(pts[:,0]<pt[0]+maxX, pts[:,0]>pt[0]+minX)
mask_y = np.logical_and(pts[:,1]<pt[1]+maxY, pts[:,1]>pt[1]+minY)
pts = pts[np.where(mask_x & mask_y)[0]]
if(len(pts) == 0):
return pts
if(filter_height):
# mask_z = np.logical_and(pts[:,2]<pt[2]+maxZ, pts[:,2]>pt[2]+minZ)
mask_z = pts[:,2]<(min(pts[:,2])+(maxZ-minZ))
pts = pts[np.where(mask_z)[0]]
return pts
def crop_lines(chunk_center, chunk_size, lines, min_z, max_z):
min_x = chunk_center[0]-chunk_size[0]/2
max_x = chunk_center[0]+chunk_size[0]/2
min_y = chunk_center[1]-chunk_size[1]/2
max_y = chunk_center[1]+chunk_size[1]/2
coords = (
(min_x, min_y, max_z),
(min_x, max_y, max_z),
(max_x, max_y, max_z),
(max_x, min_y, max_z),
(min_x, min_y, min_z),
(min_x, max_y, min_z),
(max_x, max_y, min_z),
(max_x, min_y, min_z),
)
polygon = Polygon(coords).convex_hull
# multi_line = MultiLineString([ops.clip_by_rect(LineString(line) for line in lines])
# cropped_lines = np.array(ops.clip_by_rect(multi_line, min_x, min_y, max_x, max_y).coords)
# cropped_lines = [np.array(line.coords) for line in multi_line.geoms]
cropped_lines = []
for line in lines:
# new_line = ops.clip_by_rect(LineString(line), min_x, min_y, max_x, max_y)
linestring = line if isinstance(line, LineString) else LineString(line)
new_line = polygon.intersection(linestring)
if(isinstance(new_line, MultiLineString)):
split_lines = [np.array(geom.coords) for geom in new_line.geoms]
cropped_lines.extend(split_lines)
elif(isinstance(new_line, GeometryCollection)):
for geom in new_line.geoms:
split_lines.append(np.array(geom.coords))
else:
cropped_lines.append(np.array(new_line.coords))
# DataTool().VisualizePointCloudAsync([xyz], lines = cropped_lines)
# DataTool().VisualizePointCloudAsync([xyz], lines = lines)
# return [line[np.logical_not(np.isnan(line).any(1))] for line in cropped_lines if len(line)>0]
return [line[1:-1] for line in cropped_lines if len(line)>0]
def SelectPointsFromBlock(pts, pt, blocksize, npoints = None):
selectedPts = PointsInBlock(pts, pt, blocksize)
if(len(selectedPts) == 0):
return np.zeros((0, 3))
if(npoints is None):
return selectedPts
else:
return selectedPts[np.random.choice(len(selectedPts), npoints, replace=True)]
def TemplatesInBlock(templates, pt, blocksize):
if(not isinstance(blocksize,list) and not isinstance(blocksize, np.ndarray)):
blocksize = [blocksize, blocksize]
mask_x = np.logical_and(templates[:,:,0]<pt[0]+blocksize[0]/2, templates[:,:,0]>pt[0]-blocksize[0]/2)
mask_y = np.logical_and(templates[:,:,1]<pt[1]+blocksize[1]/2, templates[:,:,1]>pt[1]-blocksize[1]/2)
mask = (mask_x & mask_y).any(axis=1)
templates = templates[np.where(mask)[0]]
if(len(blocksize) == 3):
mask_z = np.logical_and(templates[:,:,2]<pt[2]+blocksize[2]/2, templates[:,:,2]>pt[2]-blocksize[2]/2)
mask_z = mask_z.any(axis=1)
return templates[np.where(mask_z)[0]]
else:
return templates
def SelectTemplatesFromBlock(templates, pt, blocksize):
return TemplatesInBlock(templates, pt, blocksize)
def ReadPoints(fileList, curbLabel):
ptsList = []
curbPtsList = []
for file in fileList:
pts = np.load(os.path.join(Paths.Curbs.forDelineation, (file if file.endswith(".npy") else file+".npy")))
curbPtsIdx = np.where(pts[:,3] == curbLabel)
otherPts = np.where(pts[:,3] == 2)
curbPtsList.append(pts[curbPtsIdx][:,:3])
ptsList.append(pts[otherPts][:,:3])
return ptsList, curbPtsList
def ReadLinesPoint(fileList):
linePtsList = []
for file in fileList:
file = os.path.basename(file)
if not file.endswith(".npy"):
file += ".npy"
file = os.path.join(Paths.Curbs.denseLines, file)
linePtsList.append(np.load(file))
return linePtsList
def ReadXYZRGB(file):
xyz = ReadXYZ(file)
rgb = ReadRGB(file)
return xyz.astype(np.float32), rgb.astype(np.float32)
def ReadPolyLines(file):
dwg = ezdxf.readfile(file, errors = "ignore")
lines = []
for entity in dwg.entities:
# print(entity.dxftype())
points = []
if(entity.dxftype() == 'LINE'):
points = [entity.dxf.start.xyz, entity.dxf.end.xyz]
else:
for i, point in enumerate(entity.points()):
points.append(point.xyz)
# print(f"{i} : {x}, {y}, {z}")
lines.append(np.array(points))
return lines
def ReadLine(points_file, line_file):
xyz = ReadXYZ(points_file)
# rgb = ReadRGB(points_file)
line = np.load(line_file)
return xyz, None, line
def GroupTiles(xyz, centers, tile_size):
tiles = []
for pt in centers:
pts = PointsInBlock(xyz, pt, tile_size)
if(len(pts) > 0):
tiles.append(pts)
return tiles
def FillTiles(tiles, numberOfPoints):
return [pts[np.random.randint(0, len(pts), numberOfPoints)] for pts in tiles]
def CalculateTileBoxes(minZ, maxZ, centers, tile_size):
bboxes = [BoundingBoxFromVoxel([pt[0], pt[1], 0], tile_size, minZ, maxZ) for pt in centers]
return bboxes
def CalculateTileCenters(xyz, line, step):
points = np.concatenate([xyz, line], axis=0)[:,:2]
return np.unique(np.round(points[:,:2] / step) * step, axis=0)
def ReadCurbData(line_file, shift = True):
cloud_file = os.path.join(Paths.Curbs.forDelineation, os.path.basename(line_file))
if(not os.path.exists(cloud_file) or not os.path.exists(line_file)):
return None, None, None
xyz, _, line = ReadLine(cloud_file, line_file)
if(shift):
minXYZ = np.concatenate([xyz, line],axis=-2).min(axis=0)
xyz -= minXYZ
line -= minXYZ
# Prepare point cloud tiles
# tiles, centers = SplitCurbIntoTiles(xyz, line, tile_size)
# if(visualize):
# bboxes = CalculateTileBoxes(tiles, centers, tile_size)
# DataTool().VisualizePointCloudAsync(tiles, lines=[line], bBoxes=bboxes)
# org_pts_count = sum([len(tile) for tile in tiles])
# tiles = [tile[np.random.randint(len(tile), size=points_in_tile)] for tile in tiles]
# sampled_pts_count = sum([len(tile) for tile in tiles])
# Prepare curb line
# if(visualize):
# DataTool(pointSize=20).VisualizePointCloudAsync([line], dataColors=[[0,0,1]], lines=[line], bBoxes=bboxes)
# distances = []
# for i in range(len(line)-1):
# distances.append(Distance(line[i], line[i+1]))
# print(f"avg distance between points: {np.mean(distances)}")
return xyz, np.array(line)
def ReadLines(file_name):
files = [os.path.splitext(file)[0] for file in os.listdir(Paths.Curbs.forDelineation) if file.startswith(file_name) and os.path.splitext(file)[0][-1].isdigit()]
points = []
# colors = []
lines = []
for file in files:
pts_file = os.path.join(Paths.Curbs.forDelineation, file+".npy")
line_file = os.path.join(Paths.Curbs.denseLines, file+".npy")
xyz, _, line = ReadLine(pts_file, line_file)
points.append(xyz)
lines.append(line)
DataTool().VisualizePointCloudAsync(points, [np.random.uniform(0,1,(3,)) for _ in range(len(points))], lines=lines)
def NormalizeVector(vec):
vec = np.array(vec)
length = sqrt(np.sum(vec**2))
return vec / length
def Distance2D(a, b):
return Distance(a[:2], b[:2])
def Distance(a, b):
return np.sqrt(np.sum((a - b)**2, axis=-1))
def DensifyLines(lines, maxGap):
newLines = []
for points in lines:
newPoints = [points[0]]
for i in range(len(points)-1):
a = points[i]
b = points[i+1]
distance = Distance(a, b)
addPoints = []
if(distance > maxGap):
dirVec = NormalizeVector(b - a)
addPoints = [a + (dirVec * dist) for dist in np.linspace(0, distance, num=floor(distance / maxGap)+2, dtype=np.float64)[1:-1]]
addPoints.append(b)
newPoints = newPoints + addPoints
newLines.append(np.array(newPoints))
return newLines
def ReadTemplates(fileList, nodes:int, densify = True):
templateList = []
separateCurbs = []
for file in fileList:
name = os.path.splitext(os.path.basename(file))[0]
curbs = []
curbIdx = 0
while True:
fileName = os.path.join(Paths.Curbs.denseLines, f"{name}_{curbIdx}.npy")
if(os.path.exists(fileName)):
curbs.append(np.load(fileName))
curbIdx += 1
else:
break
assert(len(curbs) > 0)
templates = np.zeros((0, nodes, 3))
for curb in curbs:
curb_templates = []
for i in range(len(curb[0])):
template = [curb[y][i] for y in range(nodes)]
curb_templates.append(template)
if(densify):
curb_templates = DensifyTemplates(curb_templates)
templates = np.concatenate([templates, np.array(curb_templates)], axis=0)
separateCurbs.append(curb_templates)
templateList.append(templates)
return templateList, separateCurbs
def DensifyTemplates(templates, maxGap = 0.01):
newTemplates = []
for i in range(len(templates)-1):
ta = templates[i]
tb = templates[i+1]
lines = [[ta[i], tb[i]] for i in range(len(ta))]
lines = DensifyLines(lines, maxGap)
# DataTool().VisualizePointCloudAsync(lines, windowName="a")
minLen = np.min([len(line) for line in lines])
lines = [line[np.linspace(0, len(line)-1, minLen, dtype=int)] for line in lines]
# DataTool().VisualizePointCloudAsync(lines, windowName="b")
for i in range(len(lines[0])):
template = [lines[y][i] for y in range(len(lines))]
newTemplates.append(template)
# DataTool().VisualizePointCloudAsync(lines = newTemplates, windowName="c")
return newTemplates
def ReadCurbs(file, selectOnly = None):
nodes = ["node0", "node1", "node2", "node3"]
if(not (selectOnly is None)):
nodes = [selectOnly]
allLines = []
for node in nodes:
dwg = ezdxf.readfile(os.path.join(Paths.Curbs.lines, file+"_"+node+".dxf"))
lines = []
for entity in dwg.entities:
points = []
if(entity.dxftype() == 'LINE'):
points = [entity.dxf.start.xyz, entity.dxf.end.xyz]
else:
for point in entity.points():
points.append(point.xyz)
lines.append(np.array(points))
allLines.append(lines)
curbs = []
for i in range(len(allLines[0])):
curb = np.array([lines[i] for lines in allLines])
curbs.append(curb)
return curbs
def ConstructCurbPatterns(curbs):
patterns = []
for curb in curbs:
for i in range(curb.shape[1]):
pattern = [curb[y][i] for y in range(curb.shape[0])]
patterns.append(np.array(pattern))
return patterns
def ConstructCurbLines(curbs):
lines = []
for curb in curbs:
for points in curb:
lines.append(points)
return lines
def ConstructLinesOutOfTemplates(templates):
lines = np.zeros((templates.shape[1], templates.shape[0], templates.shape[2]))
for i in range(len(lines)):
lines[i] = templates[:, i]
return lines
def WritePolyLines(lines, file):
dwg = ezdxf.new()
msp = dwg.modelspace()
for line in lines:
msp.add_lwpolyline(line)
dwg.saveas(file)
return lines
def NormalizeVector(vec):
vec = np.array(vec)
length = sqrt(np.sum(vec**2))
return vec / length
def sign(points, p2, p3):
return (points[:, 0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (points[:, 1] - p3[1]);
def PointInTriangle(points, v1, v2, v3):
b1 = sign(points, v1, v2) < 0
b2 = sign(points, v2, v3) < 0
b3 = sign(points, v3, v1) < 0
return (b1 == b2) & (b2 == b3)
def PointsInRectangle(points, A, B, C, D):
a = PointInTriangle(points, A, B, C)
b = PointInTriangle(points, A, D, C)
return np.where(a | b)[0]
def PointsInRectangle(points, start, end, edge):
A, B, C, D = CalculateBox(start, end, edge/2)
a = PointInTriangle(points, A, B, C)
b = PointInTriangle(points, A, D, C)
return np.where(a | b)[0]
def CalculateBox(start, end, edge):
dir = NormalizeVector(end - start)
right = np.cross(dir, (0,0,1))
left = np.cross(dir, (0,0,-1))
A = end + left * edge
B = end + right * edge
C = start + right * edge
D = start + left * edge
return A, B, C, D
def LabelWirePoints(points, lines, maxDistance = 2):
labels = np.zeros((len(points),), np.uint8)
goodLbl = []
for line in tqdm(lines):
start = line[0]
end = line[-1]
boxPtsIdx = PointsInRectangle(points, start, end, 5)
boxPts = points[boxPtsIdx]
for i in range(len(line)-1):
a = line[i]
b = line[i+1]
segmentPtsIdx = PointsInRectangle(boxPts, a, b, maxDistance)
segmentPts = boxPts[segmentPtsIdx]
segmentGoodPtsIdx = np.where((segmentPts[:, 2] >= (np.min([a[2], b[2]])-maxDistance/2)) & (segmentPts[:, 2] <= (np.max([a[2], b[2]])+maxDistance/2)))
labels[boxPtsIdx[segmentPtsIdx[segmentGoodPtsIdx]]] = 1
goodLbl += list(boxPtsIdx[segmentPtsIdx[segmentGoodPtsIdx]])
return labels
def CutPointsAroundLine(points, line, width, height):
a = np.logical_and(points[:, 0] >= np.min(line[:,0])-width, points[:, 0] <= np.max(line[:,0])+width)
b = np.logical_and(points[:, 1] >= np.min(line[:,1])-width, points[:, 1] <= np.max(line[:,1])+width)
c = np.logical_and(points[:, 2] >= np.min(line[:,2])-height, points[:, 2] <= np.max(line[:,2])+height)
idx = np.where(np.logical_and(np.logical_and(a,b), c))[0]
pts = points[idx]
return pts, idx
def LabelCurbPoints(labels, points, lines, width = 0.5, height = 0.7, class_number = 1, margin = 0):
if(isinstance(labels, np.ndarray)):
labels = [labels]
assert(isinstance(labels, list)) # multiple numpy arrays of labels with instance numbers
for id, wholeLine in tqdm(enumerate(lines)):
id += 1
tempIdx = list(range(len(wholeLine)))
size = 20
step = size-1 # one point overlap
for lineIdx in [tempIdx[i : i + size] for i in range(0, len(tempIdx), step)]:
line = wholeLine[lineIdx]
pts, idx = CutPointsAroundLine(points, line, width, height)
# for i in tqdm(range(len(line)-1)):
for i in range(len(line)-1):
a = line[i]
b = line[i+1]
if(margin > 0):
vec = NormalizeVector(a - b)*margin
a -= vec
b += vec
segmentPtsIdx = PointsInRectangle(pts, a, b, width)
segmentPts = pts[segmentPtsIdx]
# segmentPtsIdx = PointsInRectangle(points, a, b, width)
# segmentPts = points[segmentPtsIdx]
segmentGoodPtsIdx = np.where((segmentPts[:, 2] >= (np.min([a[2], b[2]])-height/2)) & (segmentPts[:, 2] <= (np.max([a[2], b[2]])+height/2)))
for i in range(len(labels)):
labels[i][idx[segmentPtsIdx[segmentGoodPtsIdx]]] = [class_number, id]
# for pt in tqdm(line):
for pt in line:
dist = np.sqrt(np.sum(np.power(pts - pt, 2), axis=1))
segmentGoodPtsIdx = np.where(dist <= width/2)[0]
for i in range(len(labels)):
labels[i][idx[segmentGoodPtsIdx]] = [class_number, id]
if(len(labels) == 1):
labels = labels[0]
return labels
# import scipy.spatial as spatial
from sklearn.neighbors import KDTree
def BuildPointTree(points):
print("Building KDTree...")
t = time()
tree = KDTree(points, leaf_size=10)
print("Done in {}:{} min.".format(int((time() - t)/60), int((time() - t)%60)))
return tree
def LabelNearestPoints(labels, tree, lines, radius, class_number = 1):
print("Quering KDTree...")
for pt in tqdm(lines):
idx = tree.query_radius(pt[:3], radius)
if(isinstance(labels, list)):
for i in range(len(labels)):
labels[i][idx] = class_number
else:
labels[idx] = class_number
return labels
def LabelPointsInDistance(labels, src_pts, lines, radius_lbl):
src_pts = src_pts[:,:3]
for pt in tqdm(lines):
dist = np.sqrt(np.sum(np.power(src_pts - pt, 2), axis=1))
for radius, lbl in radius_lbl:
idx = np.where(dist <= radius)[0]
if(isinstance(labels, list)):
for i in range(len(labels)):
labels[i][idx] = class_number
else:
labels[idx] = class_number
return labels
def CenterOfPoints(pts):
return (np.max(pts, axis=0) + np.min(pts, axis=0))/2
# return np.min(pts, axis=0)
def NormalizePoints(pts, centerPoint):
if(isinstance(pts, list)):
return [points - centerPoint for points in pts]
else:
return pts - centerPoint
def ReadLabels(file, verbose = False, readFormat=None):
if(verbose):
print("Reading labels of '{}'".format(file))
t=time()
lbl = None
if(file.endswith(".hdf5") or readFormat == ".hdf5"):
h5File = h5py.File(file, 'r')
lbl = np.array(h5File["labels"])
h5File.close()
elif(file.endswith(".las") or readFormat == ".las"):
import laspy
lasFile = laspy.file.File(file, mode = "r")
lbl = lasFile.Classification
lasFile.close()
elif(file.endswith(".labels") or file.endswith(".txt") or readFormat == ".txt" or readFormat == ".labels"):
lbl = np.array(pd.read_csv(file, dtype=np.int8, header=None))
elif(file.endswith(".ply") or readFormat == ".ply"):
plydata = PlyData.read(file)
lbl = plydata["vertex"].data["class"].astype(np.float32)
elif(file.endswith(".npy") or readFormat == ".npy"):
pc = np.load(file)
if(pc.shape[1] == 7):
lbl = pc[:, 6]
if(pc.shape[1] == 5):
lbl = pc[:, 4]
if(pc.shape[1] == 4):
lbl = pc[:, 3]
lbl = np.expand_dims(lbl, 1)
if(len(lbl.shape) == 1):
lbl = np.expand_dims(lbl, 1)
print("Finished reading labels in {:.2f} min. Shape = {}".format((time() - t)/60, lbl.shape))
return lbl
def ReadXYZL(file, lblFile = None, verbose = False):
if(verbose):
printline("Reading: '{}'".format(os.path.basename(file)))
t=time()
xyz = None
lbl = None
if(file.endswith(".hdf5")):
h5File = h5py.File(file, 'r')
pc = h5File["pointcloud"]
xyz = pc[:, :3]
if(lblFile is None):
lbl = h5File["labels"]
h5File.close()
elif(file.endswith(".las")):
import laspy
lasFile = laspy.file.File(file, mode = "r")
xyz = np.concatenate((np.expand_dims(lasFile.x,1), np.expand_dims(lasFile.y,1), np.expand_dims(lasFile.z,1)), 1)
if(lblFile is None):
lbl = lasFile.Classification
lasFile.close()
elif(file.endswith(".ply")):
plydata = PlyData.read(file)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
xyz = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1)), axis=1)
lbl = plydata["vertex"].data["class"].astype(np.float32)
if(not (lblFile is None) and lblFile.endswith(".labels")):
lbl = ReadLabels(lblFile)
if(len(lbl.shape) == 1):
lbl = np.expand_dims(lbl, 1)
xyzl = np.concatenate((xyz, lbl), 1)
printline("Finished in {:.2f} min. Shape = {}".format((time() - t)/60, xyzl.shape))
return xyzl
def ReadHDF5Boxes(path):
h5File = h5py.File(path, 'r')
boxesPos = np.array(h5File["boxes"])
boundindBoxes = []
for vox in boxesPos:
boundindBoxes.append(BoundingBoxFromVoxel(Point(vox[0], vox[1], vox[2]), Const.voxelSize))
return boundindBoxes
class DataReader:
threads = []
dataset = []
def ReadFiles(self, files, pointsDataSet = "points", silent=True, positionData = False):
if(type(files) is not list):
files = [files]
points = []
labels = []
position = []
t=time()
count = 0
for f in files:
count+=1
h5File = h5py.File(f, 'r')
tempLabels = np.asarray(h5File["labels"], dtype="int8")
if(tempLabels.shape[1] == 1):
tempLabels = np.eye(Const.numOfCategories, dtype="int8")[tempLabels]
tempLabels = np.squeeze(tempLabels, axis=2)
if(len(points) == 0):
points = np.asarray(h5File[pointsDataSet], dtype="float32")
labels = tempLabels
if(positionData):
position = np.asarray(h5File["position"], dtype="float32")
else:
points = np.concatenate((points, np.asarray(h5File[pointsDataSet], dtype="float32")))
labels = np.concatenate((labels, tempLabels))
if(positionData):
position = np.concatenate((position, np.asarray(h5File["position"], dtype="float32")))
if(not silent):
print("Read file {}/{}. Voxels got: {}.".format(count, len(files), len(points)))
if(not silent):
elapsed = round(time() - t)
print("{} dataset read in {:.0f} min {:.0f} sec".format(len(files), (elapsed - (elapsed % 60))/60, elapsed % 60))
if(positionData):
return points, position, labels
else:
return points, labels
class Point:
def __init__(self, x, y, z, label = -1):
self.x = x
self.y = y
self.z = z
self.label = label
@staticmethod
def from_XYZL(XYZL):
return Point(XYZL[0], XYZL[1], XYZL[2], XYZL[3])
@staticmethod
def from_XYZ(XYZ):
return Point(XYZ[0], XYZ[1], XYZ[2])
def GetPointsInBoundingBox(points, boundingBox):
if(len(boundingBox) != 6):
return None
rows = GetPointsIndexInBoundingBox(points, boundingBox)
return points[rows]
def CountPointsInBox(points, boundingBox):
if(len(boundingBox) != 6):
return None
indices = GetPointsIndexInBoundingBox(points, boundingBox)
return len(indices[0])
def GetPointsIndexInBoundingBox(points, boundingBox):
if(len(boundingBox) != 6):
return None
return np.where((points[:,0] >= boundingBox[0]) & (points[:,0] <= boundingBox[1]) &
(points[:,1] >= boundingBox[2]) & (points[:,1] <= boundingBox[3]) &
(points[:,2] >= boundingBox[4]) & (points[:,2] <= boundingBox[5]))
def BoundingBoxFromVoxel(vxlCntr, vxlEdge, minZ = 0, maxZ = 0):
if(not(type(vxlCntr) is Point)):
vxlCntr = Point(vxlCntr[0], vxlCntr[1], vxlCntr[2])
if type(vxlEdge) is int or type(vxlEdge) is float or type(vxlEdge) is np.float64:
subEdgeX = vxlEdge/2
subEdgeY = vxlEdge/2
subEdgeZ = vxlEdge/2
elif(len(vxlEdge) == 3):
subEdgeX = vxlEdge[0]/2
subEdgeY = vxlEdge[1]/2
subEdgeZ = vxlEdge[2]/2
minX = vxlCntr.x - subEdgeX
maxX = vxlCntr.x + subEdgeX
minY = vxlCntr.y - subEdgeY
maxY = vxlCntr.y + subEdgeY
if(minZ == 0):
minZ = vxlCntr.z - subEdgeZ
if(maxZ == 0):
maxZ = vxlCntr.z + subEdgeZ
return [minX, maxX, minY, maxY, minZ, maxZ]
def GetGlobalBoundingBox(points, discardZeros = False):
if(discardZeros):
points = np.array(points)
indexes = np.where(points[:] == [0, 0, 0])
points = np.delete(points, indexes, axis=0)
mins = np.amin(points, axis = 0)
maxs = np.amax(points, axis = 0)
return [mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]]
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def LinearGradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [
int(s[j] + (float(t)/(n-1))*(f[j]-s[j]))
for j in range(3)
]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return RGB_list
def SaveBoundingBoxes(file_path, bBoxes):
file = open(file_path,"w")
for box in bBoxes:
file.write(str(box[0])+" "+str(box[1])+" "+str(box[2])+" "+str(box[3])+" "+str(box[4])+" "+str(box[5])+"\n")
file.close()
def SaveVoxels(file_path, voxels):
file = open(file_path,"w")
for vox in voxels:
file.write(str(vox[0])+" "+str(vox[1])+" "+str(vox[2])+" "+str(vox[3])+" "+str(vox[4])+" "+str(vox[5])+" "+str(vox[6])+"\n")
file.close()
def ReadBoundingBoxes(file_path):
file = open(file_path,"r")
boundingBoxes = []
for line in file:
fl = line.split()
floats = []
for l in fl:
floats.append(float(l))
boundingBoxes.append(floats)
file.close()
return boundingBoxes
def DownsampleAndAddclass(points, classNum, voxelSize = -1):
if(voxelSize != -1):
pointCloud = o3d.geometry.PointCloud()
pointCloud.points = o3d.utility.Vector3dVector(points)
pointCloud = o3d.geometry.voxel_down_sample(pointCloud, voxel_size=voxelSize)
points = np.asarray(pointCloud.points)
labels = np.full((len(points), 1), classNum)
points = np.append(points, labels, axis = 1)
return points
def PrepPointCloud(dataIN, objectLabel, noObjectLabel, downSampleVoxel = -1, verbose = False):
dataTool = DataTool()
print("Reading: {}".format(dataIN))
worldPoints = ReadXYZ(dataIN)
pointLabels = ReadLabels(dataIN)
indexes = np.nonzero(pointLabels == Label.cars)
carPoints = worldPoints[indexes]
worldPoints = np.delete(worldPoints, indexes, axis=0)
carPoints = DownsampleAndAddclass(carPoints, objectLabel, downSampleVoxel)
worldPoints = DownsampleAndAddclass(worldPoints, noObjectLabel, downSampleVoxel)
pointCloud = np.concatenate((carPoints, worldPoints))
#pointCloud = carPoints
if(verbose):
print("Points left: {}".format(len(pointCloud)))
return pointCloud
def FilterSpecClassVoxels(voxels, classLabel, noClassLabel, minPointCount = 0):
accepted = []
rejected = []
avgPointsCountInVoxel = 0
avgClassPointCount = 0
for vox in voxels:
points = np.array(vox)
indexes = np.where(points[:,3] == classLabel)[0]
avgPointsCountInVoxel += len(vox)
avgClassPointCount += len(indexes)
if(len(indexes) >= minPointCount):
accepted.append(vox)
else:
rejected.append(vox)
return np.array(accepted), np.array(rejected)
def GetBoundingBoxesOfPoint(voxels, verbose = False, discardZeros = True):
boxes = []
maxx = 0
maxy = 0
maxz = 0
for vox in voxels:
box = GetGlobalBoundingBox(vox, discardZeros)
boxes.append(box)
if(verbose):
maxx = max(maxx, box[1]-box[0])
maxy = max(maxy, box[3]-box[2])
maxz = max(maxz, box[5]-box[4])
if(verbose):
print("max x len: {}, max y len: {}, max z len: {}".format(maxx, maxy, maxz))
return boxes
def AddNoise(batch_data):
return batch_data.shape
def GetPointsAndLabels(voxels, numOfPointInSample):
points = []
labels = []
for vox in voxels:
if(len(vox) < numOfPointInSample):
zeros = np.zeros((numOfPointInSample-len(vox), 4))
vox = np.concatenate((vox, zeros))
#shuffle
indexes = np.random.choice(len(vox), numOfPointInSample, replace = False)
vox = vox[indexes]
points.append(vox[..., 0:3])
labels.append(vox[..., 3])
return np.array(points), np.array(labels)
def GetPointsAndAns(voxels, numOfPointInSample, classLabel, minCountOfTrue = 100):
points = []
labels = np.zeros(shape=(len(voxels), 2), dtype = np.float32)
avgPointsInVoxel = 0
acceptedCarVoxels = 0
avgCarPointsInVoxels = 0
i = 0
for vox in voxels:
avgPointsInVoxel += len(vox)
res = np.where(vox[..., 3] == classLabel)
if(len(res[0]) >= minCountOfTrue):
labels[i] = [1.0, 0.0]
avgCarPointsInVoxels += len(res[0])
acceptedCarVoxels += 1
else:
labels[i] = [0.0, 1.0]
if(len(vox) < numOfPointInSample):
zeros = np.zeros((numOfPointInSample-len(vox), 4))
vox = np.concatenate((vox, zeros))
#shuffle
indexes = np.random.choice(len(vox), numOfPointInSample, replace = False)
points.append(vox[indexes, 0:3])
i+=1
avgPointsInVoxel /= len(voxels)
avgCarPointsInVoxels /= acceptedCarVoxels
acceptedCarVoxels /= len(voxels)
return np.array(points), labels, avgPointsInVoxel, acceptedCarVoxels, avgCarPointsInVoxels
def CreateCheckPointFile():
open(Paths.checkPointFilePath, "w")
def CreatePausePointFile():
open(Paths.pausePointFilePath, "w")
def DeleteCheckPointFile():
if(IsCheckPointFileExists()):
remove(Paths.checkPointFilePath)
def DeletePausePointFile():
remove(Paths.pausePointFilePath)
def IsCheckPointFileExists():
return isfile(Paths.checkPointFilePath)
def IsPausePointFileExists():
if(not isfile(Paths.pausePointFilePath)):
print("Press enter to continue")
input()
CreatePausePointFile()
def modelPath():
if(not exists(Paths.dataPath)):
return None
pcFiles = [Paths.dataPath+"/"+f for f in listdir(Paths.dataPath) if isfile(join(Paths.dataPath, f)) and f.startswith('model')]
if(len(pcFiles) == 0):
return None
assert len(pcFiles) == 1, "More than one model in data folder"
return pcFiles[0]
def FireBaseStuff():
# def writeMessageToFirebase():
# from firebase import Firebase
# config = {
# "apiKey": "<KEY>",
# "authDomain": "online-app-600.firebaseapp.com",
# "databaseURL": "https://online-app-600.firebaseio.com",
# "storageBucket": "online-app-600.appspot.com"
# }
# firebase = Firebase(config)
# auth = firebase.auth()
# #auth.create_user_with_email_and_password("<EMAIL>", "<PASSWORD>")
# #user = auth.sign_in_with_email_and_password("<EMAIL>", "<PASSWORD>")
# user = auth.sign_in_with_email_and_password("<EMAIL>", "<PASSWORD>")
# print(auth.get_account_info(user['idToken']))
# #auth.send_email_verification(user['idToken'])
# db = firebase.database()
# data = db.child("duomenys").get()
# #storage = firebase.storage()
# #data = storage.child("images/google-services.json").get_url(None)
# print(data.val())
# def readMessagesFromFirestore():
# users_ref = firestoreDB.collection('duomenys')
# docs = users_ref.stream()
# for doc in docs:
# data = doc.to_dict()
# print('{} => {}'.format(data["name"], data["text"]))
# def writeMessageToFirestore():
# newDocKey = firestoreDB.collection('duomenys').document();
# doc_ref = firestoreDB.collection('duomenys').document(newDocKey.id)
# doc_ref.set({
# 'name': 'Jonas',
# 'test': 'works from python',
# })
return 0
def PostMessage(dictData, training):
message = ""
for key, value in dictData.items():
if(type(value) is float):
message += "{}:{:.3f}. ".format(str(key), value)
else:
message += "{}:{}. ".format(str(key), value)
print(message)
if(training):
file = open(Paths.trainLogPath, "a")
#newDocRef = firestoreDB.collection(FireStroreCollection.train).document()
#col_ref = firestoreDB.collection(FireStroreCollection.train).document(newDocRef.id)
else:
file = open(Paths.dataProcPath, "a")
#newDocRef = firestoreDB.collection(FireStroreCollection.dataProc).document()
#col_ref = firestoreDB.collection(FireStroreCollection.dataProc).document(newDocRef.id)
file.write(message+"\n")
try:
notifyDevice.send(message)
#col_ref.set(dictData)
except:
print("Online message error")
def UpSampleBatchSize(points, labels, numOfPoints):
points = np.array(points)
labels = np.array(labels)
newPoint = []
newLabels = []
for batch, labels in zip(points, labels):
zeros = np.zeros((numOfPoints-len(batch), len(batch[0])))
batch = np.concatenate((batch, zeros))
zeros = np.zeros((numOfPoints-len(batch), len(labels[0])))
labels = np.concatenate((labels, zeros))
indexes = np.random.choice(len(points[0]), numOfPoints, replace = False)
newPoint.append(batch[indexes])
newLabels.append(labels[indexes])
return np.array(newPoint), np.array(newLabels)
class memoryCheck():
"""Checks memory of a given system"""
def __init__(self):
if os.name == "posix":
self.value = self.linuxRam()
elif os.name == "nt":
self.value = self.windowsRam()
def windowsRam(self):
"""Uses Windows API to check RAM in this OS"""
kernel32 = ctypes.windll.kernel32
c_ulong = ctypes.c_ulong
class MEMORYSTATUS(ctypes.Structure):
_fields_ = [
("dwLength", c_ulong),
("dwMemoryLoad", c_ulong),
("dwTotalPhys", c_ulong),
("dwAvailPhys", c_ulong),
("dwTotalPageFile", c_ulong),
("dwAvailPageFile", c_ulong),
("dwTotalVirtual", c_ulong),
("dwAvailVirtual", c_ulong)
]
memoryStatus = MEMORYSTATUS()
memoryStatus.dwLength = ctypes.sizeof(MEMORYSTATUS)
kernel32.GlobalMemoryStatus(ctypes.byref(memoryStatus))
return int(memoryStatus.dwTotalPhys/1024**2)
def linuxRam(self):
"""Returns the RAM of a linux system"""
totalMemory = os.popen("free -m").readlines()[1].split()[1]
return int(totalMemory)
def GetLabelPoint(points, label):
indexes = np.where(points[:,3] == label)
return points[indexes], indexes
def VisualizeDataset(dataPath, markEachClass = False, pointsDataSet = "points", hasBoxesData = False):
print("Visualize Dataset")
points = []
downSample = False
dataTool = DataTool()
if(pointsDataSet == "points"):
h5File = h5py.File(dataPath, 'r')
pointcloud = np.array(np.asarray(h5File[pointsDataSet]))
pointsCount = len(pointcloud)*len(pointcloud[0])
pointcloud = pointcloud.reshape(pointsCount, pointcloud.shape[2])
if(markEachClass):
labels = np.array(np.asarray(h5File["labels"], dtype="int8"))
if(len(labels[0,0]) == 9):
labels = labels.reshape(pointsCount, 9)
labels = np.argmax(labels, axis=1)
else:
labels = labels.reshape(pointsCount, 1)
labels = np.expand_dims(labels, 1)
pointcloud = np.append(pointcloud, labels, axis=1)
indexes = np.where(pointcloud[:, :3] == [0., 0., 0.])
points = np.delete(pointcloud, indexes, axis=0)
print("Nonzero values {:.3f} => {}".format(len(points)/pointsCount, len(points)))
print(GetGlobalBoundingBox(points))
else:
points = ReadXYZL(dataPath)
downSample = True
boundingBoxes = []
if(hasBoxesData):
if(pointsDataSet == "points"):
boundingBoxes = ReadHDF5Boxes(dataPath)
else:
boundingBoxes = ReadHDF5Boxes("G:/PointCloud DataSets/semenatic3d/processedTest/test_bildstein_station3_xyz_intensity_rgb.hdf5")
if(markEachClass):
print("Set colors for each label")
# manMadeTerrain, naturalTerrain, highVegetation, lowVegetation, buildings, hardScape, cars, unlabeled = SeparateEachClass(points)
manMadeTerrain, naturalTerrain, highVegetation, lowVegetation, buildings, hardScape, cars = SeparateEachClass(points)
dataTool.VisualizePointCloud( [manMadeTerrain, naturalTerrain, highVegetation, lowVegetation, buildings, hardScape, cars],
[[1,0.5,0], [0,1,0.5], [0,1,0], [0.2, 1, 0.3], [0,0,1], [0.5,0,1], [1,0,0]], downSample, boundingBoxes)
else:
dataTool.VisualizePointCloud([points], [None], downSample, boundingBoxes)
def SeparateEachClass(points):
count = np.max(points[:,3])
classesPoints = []
for i in range(1, int(count)+1):
pts, _ = GetLabelPoint(points, i)
classesPoints.append(pts)
return classesPoints
def GetOffsetArray(pointCloud):
mins = [0,0,0]
if(len(pointCloud.shape) == 2):
mins = np.amin(pointCloud, axis=0)
elif(len(pointCloud.shape) == 3):
mins = np.amin(pointCloud, axis=1)
assert False
else:
return 0
minX = mins[0]
minY = mins[1]
minZ = mins[2]
offset = np.array([minX, minY, minZ])
for i in range(len(offset)):
if(offset[i] > 0):
offset[i] *= -1
else:
offset[i] = abs(offset[i])
return offset
def printline(message):
addSpaces = max(60 - len(message), 0)
print(message, end = " " * addSpaces + "\r")
def VoxelizeDataset(path, WindowXYZ, MinPointCountInWindow, PointCountInWindow, PointComponents, discardPoints = True):
pointCloud = ReadXYZL(path)
pointCloud = pointCloud.astype(np.float32)
printline("Voxelizing {} points".format(len(pointCloud)))
t=time()
pointLabels = pointCloud[:, 3]
pointCloud = np.delete(pointCloud, 3, 1)
pointCloud = pointCloud + GetOffsetArray(pointCloud)
voxel_index = np.floor(pointCloud[:] / WindowXYZ).astype(np.int32)
indices = np.expand_dims(np.arange(len(voxel_index)), axis=1)
voxel_index = np.concatenate((voxel_index, indices), axis=1)
printline("Sorting points")
voxel_index = voxel_index[np.lexsort((voxel_index[:,2], voxel_index[:,1], voxel_index[:,0]))]
pointCloud = np.concatenate((pointCloud[voxel_index[:,3]], np.expand_dims(pointLabels[voxel_index[:,3]], axis=1)), axis=1)
voxel_index = np.delete(voxel_index, 3, 1)
printline("Dividing points into voxels")
uniqueVoxels, indexes, counts = np.unique(voxel_index, axis=0, return_counts= True, return_index=True)
indexes = indexes[1:]
pointCloud = np.split(pointCloud, indexes, axis=0)
# pointLabels = np.split(pointLabels, indexes, axis=0)
if(discardPoints):
delIndexes =
|
np.where(counts[:] < MinPointCountInWindow)
|
numpy.where
|
import numpy as np
import copy as cp
from scipy.linalg import expm
from . import cmanif
class ManifoldPointArray:
def __init__(self, manifold):
self._manifold = cp.deepcopy(manifold)
self._coords = np.array([])
def __str__(self):
return "Array of {num} points of the manifold: ".format(num=len(self._coords))+ str(self._manifold)
@property
def manifold(self):
return self._manifold
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
self._coords = self._manifold.project_on_manifold(coords)
class ManifoldPointArrayParameterized(ManifoldPointArray):
def __init__(self, manifold):
assert manifold.parameterized
self._local_coords = np.array([])
ManifoldPointArray.__init__(self,manifold)
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
ManifoldPointArray.coords.fset(self,coords)
self._local_coords = self._manifold.compute_inverse_parameterization(self._coords)
#self._local_coords = np.empty([coords.shape[0],self._manifold.local_dim])
#inverse_parameterization = self._manifold.compute_inverse_parameterization
#for i, point in enumerate(self._coords):
# self._local_coords[i] = inverse_parameterization(point)
@property
def local_coords(self):
return self._local_coords
@local_coords.setter
def local_coords(self, local_coords):
self._local_coords = np.empty(local_coords.shape)
self._local_coords[:] = local_coords
self._coords = self._manifold.compute_parameterization(local_coords)
class TangentVectorArray:
def __init__(self, manifold_point_array):
self._base_point_array = cp.deepcopy(manifold_point_array)
self._coords = np.zeros(self._base_point_array.coords.shape)
def __str__(self):
return "Array of {num} tangent vectors of the manifold: ".format(num=len(self._coords)) \
+ str(self._base_point_array.manifold)
@property
def base_point_array(self):
return self._base_point_array
@property
def manifold(self):
return self._base_point_array.manifold
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
self._coords = self.manifold.project_on_tangent_space(self._base_point_array.coords,coords)
def perform_geodesic_step(self, step_length=1):
self._base_point_array._coords, self._coords = self.manifold.geodesic_step(self._base_point_array.coords, self.coords, step=step_length)
def normal_vector_coords(self):
return self.manifold.normal_vector(self._base_point_array._coords,self._coords)
def christoffel_matrix_lin_comb_mult(self, coeffs):
christoffel_lin_comb = self.manifold.christoffel_matrix_lin_comb
base_coords = self._base_point_array._coords
mult_coords = np.empty(self._coords.shape)
for i, tangent_coords in enumerate(self._coords):
matrix = christoffel_lin_comb(base_coords[i], coeffs[i])
mult_coords[i] = np.dot(matrix, tangent_coords)
return mult_coords
class TangentVectorArrayParameterized(TangentVectorArray):
def __init__(self, manifold_point_array):
assert manifold_point_array.manifold.parameterized
TangentVectorArray.__init__(self,manifold_point_array)
self._local_coords = np.zeros(self._base_point_array.local_coords.shape)
def perform_geodesic_step(self, step_length=1):
TangentVectorArray.perform_geodesic_step(self, step_length)
self._base_point_array._local_coords = self.manifold.compute_inverse_parameterization(self._base_point_array._coords)
jacobi_matrix = self.manifold.compute_jacobi_matrix(self._base_point_array._local_coords)
inverse_riemannian_matrix = self.manifold.compute_inverse_riemannian_matrix(self._base_point_array._local_coords)
jacobi_transp_dot_coords = np.zeros([self._coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.ambient_dim):
jacobi_transp_dot_coords[:,i] += jacobi_matrix[:,j,i]*self._coords[:,j]
self._local_coords = np.zeros([self._coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.local_dim):
self._local_coords[:,i] += inverse_riemannian_matrix[:,i,j] * jacobi_transp_dot_coords[:,j]
@property
def coords(self):
return self._coords
@coords.setter
def coords(self, coords):
TangentVectorArray.coords.fset(self,coords)
jacobi_matrix = self.manifold.compute_jacobi_matrix(self._base_point_array._local_coords)
inverse_riemannian_matrix = self.manifold.compute_inverse_riemannian_matrix(self._base_point_array._local_coords)
jacobi_transp_dot_coords = np.zeros([coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.ambient_dim):
jacobi_transp_dot_coords[:,i] += jacobi_matrix[:,j,i]*coords[:,j]
self._local_coords = np.zeros([coords.shape[0],self.manifold.local_dim])
for i in range(self.manifold.local_dim):
for j in range(self.manifold.local_dim):
self._local_coords[:,i] += inverse_riemannian_matrix[:,i,j] * jacobi_transp_dot_coords[:,j]
@property
def local_coords(self):
return self._local_coords
@local_coords.setter
def local_coords(self, local_coords):
self._local_coords = local_coords
jacobi_matrix = self.manifold.compute_jacobi_matrix(self._base_point_array._local_coords)
for i, base_point in enumerate(self._base_point_array.local_coords):
self._coords[i] = np.dot(jacobi_matrix[i,:,:], local_coords[i])
class Manifold:
def __init__(self):
# set to True if a parameterization is implemented, (e.g. see Shpere2)
self._parameterized = False
def __str__(self):
return self._description
def __eq__(self, other):
if isinstance(other, Manifold):
return self._description == other._description
return NotImplemented
@property
def ambient_dim(self):
return self._dim
@property
def local_dim(self):
return self._local_dim
@property
def parameterized(self):
return self._parameterized
def christoffel_matrices(self, base_point_coords):
"""
Slow fallback implementation of computing christoffel matrices from normal vectors,
which should be reimplemented explicitly for performance reasons
(see for instance implementations on the Sphere, Rotation Group, or Grassmannian)
"""
dim = self._dim
cm = np.empty([dim,dim,dim])
basis = np.identity(dim)
for i in range(dim):
v_i = self.project_on_tangent_space(base_point_coords, basis[i])
n_i = self.normal_vector(base_point_coords, v_i)
cm[:,i,i] = - n_i
for j in range(i,dim):
v_j = self.project_on_tangent_space(base_point_coords, basis[j])
n_ipj = self.normal_vector(base_point_coords, v_i + v_j)
n_imj = self.normal_vector(base_point_coords, v_i - v_j)
cm[:,i,j] = (n_imj - n_ipj)/4
cm[:,j,i] = cm[:,i,j]
return cm
def christoffel_matrix_lin_comb(self, base_point_coords, coeffs):
"""
Slow fallback implementation of computing a linear combination of christoffel matrices,
which should be reimplemented explicitly for performance reasons
(see for instance implementations on the Sphere, Rotation Group, or Grassmannian)
"""
cm = self.christoffel_matrices(base_point_coords)
return np.asmatrix(np.tensordot(coeffs, cm, axes=(0,0)))
class EuclideanSpace(Manifold):
def __init__(self, d):
self._dim=d
self._local_dim=d
self._description = "Euclidean Space R^{dim}".format(dim=self._dim)
Manifold.__init__(self)
def project_on_manifold(self, vector):
return np.array(vector)
def project_on_tangent_space(self, base_point_coords, vector):
return np.array(vector)
def geodesic_step(self, base_point_coords, tangent_vector_coords, step=1.0):
new_base_point_coords = base_point_coords + step * tangent_vector_coords
new_tangent_vector_coords = np.array(tangent_vector_coords)
return new_base_point_coords, new_tangent_vector_coords
def normal_vector(self, base_point_coords, tangent_vector_coords):
return
|
np.zeros(tangent_vector_coords.shape)
|
numpy.zeros
|
import os
import time
import numpy as np
import pyccl as ccl
from scipy.interpolate import interp1d
import pytest
T0 = 0.0
T0_CLS = 0.0
@pytest.fixture(scope='module', params=['fftlog', 'bessel'])
def corr_method(request):
errfacs = {'fftlog': 0.2, 'bessel': 0.1}
return request.param, errfacs[request.param]
@pytest.fixture(scope='module', params=['analytic', 'histo'])
def set_up(request):
t0 = time.time()
nztyp = request.param
dirdat = os.path.dirname(__file__) + '/data/'
cosmo = ccl.Cosmology(Omega_c=0.30, Omega_b=0.00, Omega_g=0, Omega_k=0,
h=0.7, sigma8=0.8, n_s=0.96, Neff=0, m_nu=0.0,
w0=-1, wa=0, T_CMB=2.7, transfer_function='bbks',
mass_function='tinker',
matter_power_spectrum='linear')
cosmo.cosmo.gsl_params.INTEGRATION_LIMBER_EPSREL = 2.5E-5
cosmo.cosmo.gsl_params.INTEGRATION_EPSREL = 2.5E-5
# Ell-dependent correction factors
# Set up array of ells
fl = {}
lmax = 10000
nls = (lmax - 400)//20+141
ells = np.zeros(nls)
ells[:101] = np.arange(101)
ells[101:121] = ells[100] + (np.arange(20) + 1) * 5
ells[121:141] = ells[120] + (np.arange(20) + 1) * 10
ells[141:] = ells[140] + (np.arange(nls - 141) + 1) * 20
fl['lmax'] = lmax
fl['ells'] = ells
# Initialize tracers
if nztyp == 'analytic':
# Analytic case
zmean_1 = 1.0
sigz_1 = 0.15
zmean_2 = 1.5
sigz_2 = 0.15
z1, tmp_a1 = np.loadtxt(dirdat + "ia_amp_analytic_1.txt", unpack=True)
z2, tmp_a2 = np.loadtxt(dirdat + "ia_amp_analytic_2.txt", unpack=True)
pz1 = np.exp(-0.5 * ((z1 - zmean_1) / sigz_1)**2)
pz2 = np.exp(-0.5 * ((z2 - zmean_2) / sigz_2)**2)
elif nztyp == 'histo':
# Histogram case
z1, pz1 = np.loadtxt(dirdat + "bin1_histo.txt", unpack=True)[:, 1:]
_, tmp_a1 = np.loadtxt(dirdat + "ia_amp_histo_1.txt", unpack=True)
z2, pz2 = np.loadtxt(dirdat + "bin2_histo.txt", unpack=True)[:, 1:]
_, tmp_a2 = np.loadtxt(dirdat + "ia_amp_histo_2.txt", unpack=True)
else:
raise ValueError("Wrong Nz type " + nztyp)
bz = np.ones_like(pz1)
# Renormalize the IA amplitude to be consistent with A_IA
D1 = ccl.growth_factor(cosmo, 1./(1+z1))
D2 = ccl.growth_factor(cosmo, 1./(1+z2))
rho_m = ccl.physical_constants.RHO_CRITICAL * cosmo['Omega_m']
a1 = - tmp_a1 * D1 / (5e-14 * rho_m)
a2 = - tmp_a2 * D2 / (5e-14 * rho_m)
# Initialize tracers
trc = {}
trc['g1'] = ccl.NumberCountsTracer(cosmo, False,
(z1, pz1),
(z2, bz))
trc['g2'] = ccl.NumberCountsTracer(cosmo, False,
(z2, pz2),
(z2, bz))
trc['l1'] = ccl.WeakLensingTracer(cosmo, (z1, pz1))
trc['l2'] = ccl.WeakLensingTracer(cosmo, (z2, pz2))
trc['i1'] = ccl.WeakLensingTracer(cosmo, (z1, pz1),
has_shear=False,
ia_bias=(z1, a1))
trc['i2'] = ccl.WeakLensingTracer(cosmo, (z2, pz2),
has_shear=False,
ia_bias=(z2, a2))
trc['ct'] = ccl.CMBLensingTracer(cosmo, 1100.)
# Read benchmarks
def read_bm(fname):
th, xi =
|
np.loadtxt(fname, unpack=True)
|
numpy.loadtxt
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements representations of slabs and surfaces, as well as
algorithms for generating them. If you use this module, please consider
citing the following work::
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, "Surface Energies of Elemental Crystals", Scientific Data,
2016, 3:160080, doi: 10.1038/sdata.2016.80.
as well as::
<NAME>.; <NAME>. Efficient creation and convergence of surface slabs,
Surface Science, 2013, 617, 53–59, doi:10.1016/j.susc.2013.05.016.
"""
from functools import reduce
from math import gcd
import math
import itertools
import logging
import warnings
import copy
import os
import json
import numpy as np
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage, fcluster
from monty.fractions import lcm
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.sites import PeriodicSite
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import in_coord_list
from pymatgen.analysis.structure_matcher import StructureMatcher
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2014, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "6/10/14"
logger = logging.getLogger(__name__)
class Slab(Structure):
"""
Subclass of Structure representing a Slab. Implements additional
attributes pertaining to slabs, but the init method does not
actually implement any algorithm that creates a slab. This is a
DUMMY class who's init method only holds information about the
slab. Also has additional methods that returns other information
about a slab such as the surface area, normal, and atom adsorption.
Note that all Slabs have the surface normal oriented perpendicular to the a
and b lattice vectors. This means the lattice vectors a and b are in the
surface plane and the c vector is out of the surface plane (though not
necessarily perpendicular to the surface).
.. attribute:: miller_index
Miller index of plane parallel to surface.
.. attribute:: scale_factor
Final computed scale factor that brings the parent cell to the
surface cell.
.. attribute:: shift
The shift value in Angstrom that indicates how much this
slab has been shifted.
"""
def __init__(self, lattice, species, coords, miller_index,
oriented_unit_cell, shift, scale_factor, reorient_lattice=True,
validate_proximity=False, to_unit_cell=False,
reconstruction=None, coords_are_cartesian=False,
site_properties=None, energy=None):
"""
Makes a Slab structure, a structure object with additional information
and methods pertaining to slabs.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
miller_index ([h, k, l]): Miller index of plane parallel to
surface. Note that this is referenced to the input structure. If
you need this to be based on the conventional cell,
you should supply the conventional structure.
oriented_unit_cell (Structure): The oriented_unit_cell from which
this Slab is created (by scaling in the c-direction).
shift (float): The shift in the c-direction applied to get the
termination.
scale_factor (np.ndarray): scale_factor Final computed scale factor
that brings the parent cell to the surface cell.
reorient_lattice (bool): reorients the lattice parameters such that
the c direction is along the z axis.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
reconstruction (str): Type of reconstruction. Defaults to None if
the slab is not reconstructed.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
energy (float): A value for the energy.
"""
self.oriented_unit_cell = oriented_unit_cell
self.miller_index = tuple(miller_index)
self.shift = shift
self.reconstruction = reconstruction
self.scale_factor =
|
np.array(scale_factor)
|
numpy.array
|
import numpy as np
def rotate_pos(pos, angle):
""" Transformation the coordinate in the angle
Args:
pos (numpy.ndarray): local state, shape(data_size, 2)
angle (float): rotate angle, in radians
Returns:
rotated_pos (numpy.ndarray): shape(data_size, 2)
"""
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle),
|
np.cos(angle)
|
numpy.cos
|
# coding: utf-8
# 2021/4/5 @ liujiayu
import logging
import numpy as np
import pickle
from tqdm import tqdm
from collections import namedtuple
from collections import defaultdict
from EduKTM import KTM
hyper_para = namedtuple("hyperparameters", ["r", "D", "deltaT", "S", "lambda_U_1", "lambda_U", "lambda_P", "lambda_S"])
default_hyper = hyper_para(6, 2, 1, 5, 0.01, 2, 2, 0.01)
def init_parameters(stu_num, prob_num, know_num, time_window_num):
u_latent = np.random.normal(0.5, 0.01, size=(time_window_num, stu_num, know_num))
i_latent = 0.1 * np.random.uniform(0, 1, size=(prob_num, know_num)) # problems' latent vector(V)
alpha = np.random.uniform(0, 1, size=stu_num)
B = 0.01 * np.random.normal(0, 1, size=prob_num)
return u_latent, i_latent, alpha, B
def stu_curve(u_latent, alpha, r, D, deltaT, S, time_freq): # learning and forgetting curve
freq_norm = D * time_freq / (time_freq + r)
learn_factor = u_latent * freq_norm
forget_factor = u_latent * np.exp(-deltaT / S)
pred_u = learn_factor * np.expand_dims(alpha, axis=1) + forget_factor * np.expand_dims(1 - alpha, axis=1)
return pred_u, freq_norm
class EKPT(KTM):
"""
EKPT model, training (MAP) and testing methods
Parameters
----------
q_m: array
Q matrix, shape = (prob_num, know_num)
stu_num: int
number of students
prob_num: int
number of problems
know_num: int
number of knowledge
time_window_num: int
number of time windows
args: namedtuple
all hyper-parameters
----------
"""
def __init__(self, q_m, stu_num, prob_num, know_num, time_window_num, args=default_hyper):
super(EKPT, self).__init__()
self.args = args
self.q_m = q_m
self.stu_num, self.prob_num, self.know_num = stu_num, prob_num, know_num
self.time_window_num = time_window_num
self.u_latent, self.i_latent, self.alpha, self.B = init_parameters(stu_num, prob_num, know_num, time_window_num)
# partial order of knowledge in each problem
self.par_mat = np.zeros(shape=(prob_num, know_num, know_num))
for i in range(prob_num):
for o1 in range(know_num):
if self.q_m[i][o1] == 0:
continue
for o2 in range(know_num):
if self.q_m[i][o2] == 0:
self.par_mat[i][o1][o2] = 1
# exercise relation
self.exer_neigh = (np.dot(self.q_m, self.q_m.transpose()) > 0).astype(int)
def train(self, train_data, epoch, lr=0.001, lr_b=0.0001, epsilon=1e-3, init_method='mean') -> ...:
# train_data(list): response data, length = time_window_num, e.g.[[{'user_id':, 'item_id':, 'score':},...],...]
assert self.time_window_num == len(train_data), 'number of time windows conflicts'
u_latent, i_latent = np.copy(self.u_latent), np.copy(self.i_latent)
alpha, B = np.copy(self.alpha), np.copy(self.B)
# mean score of each student in train_data
sum_score = np.zeros(shape=self.stu_num)
sum_count = np.zeros(shape=self.stu_num)
# knowledge frequency in each time window
time_freq = np.zeros(shape=(self.time_window_num, self.stu_num, self.know_num))
for t in range(self.time_window_num):
for record in train_data[t]:
user, item, rating = record['user_id'], record['item_id'], record['score']
time_freq[t][user][np.where(self.q_m[item] == 1)[0]] += 1
sum_score[user] += rating
sum_count[user] += 1
# initialize student latent with mean score
if init_method == 'mean':
u_latent = np.random.normal(20 * np.expand_dims(sum_score / (sum_count + 1e-9), axis=1) / self.know_num,
0.01, size=(self.time_window_num, self.stu_num, self.know_num))
for iteration in range(epoch):
u_latent_tmp, i_latent_tmp = np.copy(u_latent), np.copy(i_latent)
alpha_tmp, B_tmp = np.copy(alpha), np.copy(B)
i_gradient = np.zeros(shape=(self.prob_num, self.know_num))
b_gradient = np.zeros(shape=self.prob_num)
alpha_gradient = np.zeros(shape=self.stu_num)
for t in range(self.time_window_num):
u_gradient_t = np.zeros(shape=(self.stu_num, self.know_num))
record_num_t = len(train_data[t])
users = [record['user_id'] for record in train_data[t]]
items = [record['item_id'] for record in train_data[t]]
ratings = [record['score'] for record in train_data[t]]
pred_R = [np.dot(u_latent[t][users[i]], i_latent[items[i]]) - B[items[i]] for i in range(record_num_t)]
pred_u, freq_norm = stu_curve(u_latent, alpha, self.args.r, self.args.D, self.args.deltaT, self.args.S,
time_freq) # both shape are (time_window_num, stu_num, know_num)
for i in range(record_num_t):
user, item, rating = users[i], items[i], ratings[i]
R_diff = pred_R[i] - rating
b_gradient[item] -= R_diff
u_gradient_t[user] += R_diff * i_latent[item]
i_gradient[item] += R_diff * u_latent[t][user] + self.args.lambda_S * i_latent[item]
i_gradient[item] -= self.args.lambda_S * np.sum(
np.expand_dims(self.exer_neigh[item], axis=1) * i_latent, axis=0) / sum(self.exer_neigh[item])
if t == 0:
u_gradient_t[user] += self.args.lambda_U_1 * u_latent[0][user]
else:
u_gradient_t[user] += self.args.lambda_U * (u_latent[t][user] - pred_u[t - 1][user])
alpha_gradient[user] += np.dot(pred_u[t - 1][user] - u_latent[t][user], u_latent[t][user] * (
freq_norm[t - 1][user] - np.exp(-self.args.deltaT / self.args.S)))
if t < self.time_window_num - 1:
u_gradient_t[user] += self.args.lambda_U * (pred_u[t][user] - u_latent[t + 1][user]) * (
alpha[user] * freq_norm[t][user] + (1 - alpha[user]) * np.exp(
- self.args.deltaT / self.args.S))
o1, o2 = np.where(self.par_mat[item] == 1)
for j in range(len(o1)):
i_gradient[item][o1[j]] -= self.args.lambda_P * 0.5 * (1 - np.tanh(
0.5 * (i_latent[item][o1[j]] - i_latent[item][o2[j]])))
i_gradient[item][o2[j]] += self.args.lambda_P * 0.5 * (1 - np.tanh(
0.5 * (i_latent[item][o1[j]] - i_latent[item][o2[j]])))
u_latent[t] -= lr * u_gradient_t
i_latent -= lr * i_gradient
B -= lr_b * b_gradient
alpha = np.clip(alpha - lr * alpha_gradient, 0, 1)
change = max(np.max(np.abs(u_latent - u_latent_tmp)), np.max(np.abs(i_latent - i_latent_tmp)),
np.max(np.abs(alpha - alpha_tmp)), np.max(np.abs(B - B_tmp)))
if iteration > 20 and change < epsilon:
break
self.u_latent, self.i_latent, self.alpha, self.B = u_latent, i_latent, alpha, B
def eval(self, test_data) -> tuple:
test_rmse, test_mae = [], []
for i in tqdm(test_data, "evaluating"):
stu, test_id, true_score = i['user_id'], i['item_id'], i['score']
predict_rating = np.clip(np.dot(self.u_latent[-1][stu], self.i_latent[test_id]) - self.B[test_id], 0, 1)
test_rmse.append((predict_rating - true_score) ** 2)
test_mae.append(abs(predict_rating - true_score))
return np.sqrt(
|
np.average(test_rmse)
|
numpy.average
|
# -*- coding: utf-8 -*-
u"""
Beta regression for modeling rates and proportions.
References
----------
Grün, Bettina, <NAME>, and <NAME>. Extended beta regression
in R: Shaken, stirred, mixed, and partitioned. No. 2011-22. Working Papers in
Economics and Statistics, 2011.
Smithson, Michael, and <NAME>. "A better lemon squeezer?
Maximum-likelihood regression with beta-distributed dependent variables."
Psychological methods 11.1 (2006): 54.
"""
import numpy as np
from scipy.special import gammaln as lgamma
import patsy
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.tools.decorators import cache_readonly
from statsmodels.base.model import (
GenericLikelihoodModel, GenericLikelihoodModelResults, _LLRMixin)
from statsmodels.genmod import families
_init_example = """
Beta regression with default of logit-link for exog and log-link
for precision.
>>> mod = BetaModel(endog, exog)
>>> rslt = mod.fit()
>>> print(rslt.summary())
We can also specify a formula and a specific structure and use the
identity-link for precision.
>>> from sm.families.links import identity
>>> Z = patsy.dmatrix('~ temp', dat, return_type='dataframe')
>>> mod = BetaModel.from_formula('iyield ~ C(batch, Treatment(10)) + temp',
... dat, exog_precision=Z,
... link_precision=identity())
In the case of proportion-data, we may think that the precision depends on
the number of measurements. E.g for sequence data, on the number of
sequence reads covering a site:
>>> Z = patsy.dmatrix('~ coverage', df)
>>> formula = 'methylation ~ disease + age + gender + coverage'
>>> mod = BetaModel.from_formula(formula, df, Z)
>>> rslt = mod.fit()
"""
class BetaModel(GenericLikelihoodModel):
"""Beta Regression.
The Model is parameterized by mean and precision. Both can depend on
explanatory variables through link functions.
Parameters
----------
endog : array_like
1d array of endogenous response variable.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user (models specified using a formula
include an intercept by default). See `statsmodels.tools.add_constant`.
exog_precision : array_like
2d array of variables for the precision.
link : link
Any link in sm.families.links for mean, should have range in
interval [0, 1]. Default is logit-link.
link_precision : link
Any link in sm.families.links for precision, should have
range in positive line. Default is log-link.
**kwds : extra keywords
Keyword options that will be handled by super classes.
Not all general keywords will be supported in this class.
Notes
-----
Status: experimental, new in 0.13.
Core results are verified, but api can change and some extra results
specific to Beta regression are missing.
Examples
--------
{example}
See Also
--------
:ref:`links`
""".format(example=_init_example)
def __init__(self, endog, exog, exog_precision=None,
link=families.links.Logit(),
link_precision=families.links.Log(), **kwds):
etmp = np.array(endog)
assert np.all((0 < etmp) & (etmp < 1))
if exog_precision is None:
extra_names = ['precision']
exog_precision = np.ones((len(endog), 1), dtype='f')
else:
extra_names = ['precision-%s' % zc for zc in
(exog_precision.columns
if hasattr(exog_precision, 'columns')
else range(1, exog_precision.shape[1] + 1))]
kwds['extra_params_names'] = extra_names
super(BetaModel, self).__init__(endog, exog,
exog_precision=exog_precision,
**kwds)
self.link = link
self.link_precision = link_precision
# not needed, handled by super:
# self.exog_precision = exog_precision
# inherited df do not account for precision params
self.nobs = self.endog.shape[0]
self.df_model = self.nparams - 1
self.df_resid = self.nobs - self.nparams
assert len(self.exog_precision) == len(self.endog)
self.hess_type = "oim"
if 'exog_precision' not in self._init_keys:
self._init_keys.extend(['exog_precision'])
self._init_keys.extend(['link', 'link_precision'])
self._null_drop_keys = ['exog_precision']
self.results_class = BetaResults
self.results_class_wrapper = BetaResultsWrapper
@classmethod
def from_formula(cls, formula, data, exog_precision_formula=None,
*args, **kwargs):
if exog_precision_formula is not None:
if 'subset' in kwargs:
d = data.ix[kwargs['subset']]
Z = patsy.dmatrix(exog_precision_formula, d)
else:
Z = patsy.dmatrix(exog_precision_formula, data)
kwargs['exog_precision'] = Z
return super(BetaModel, cls).from_formula(formula, data, *args,
**kwargs)
def predict(self, params, exog=None, exog_precision=None, which="mean"):
"""Predict values for mean or precision
Parameters
----------
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linpred" : linear predictor for the mean function
- "linpred_precision" : linear predictor for the precision function
Returns
-------
ndarray, predicted values
"""
k_mean = self.exog.shape[1]
if which in ["mean", "linpred"]:
if exog is None:
exog = self.exog
params_mean = params[:k_mean]
# Zparams = params[k_mean:]
linpred = np.dot(exog, params_mean)
if which == "mean":
mu = self.link.inverse(linpred)
return mu
else:
return linpred
elif which in ["precision", "linpred_precision"]:
if exog_precision is None:
exog_precision = self.exog_precision
params_prec = params[k_mean:]
linpred_prec =
|
np.dot(exog_precision, params_prec)
|
numpy.dot
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
from scipy.sparse.linalg import splu
from scipy.sparse import csr_matrix as sparse
from enum import Enum
from warnings import warn
from matplotlib import pyplot as plt
class DiffEqSolver(Enum):
EULER = 1,
RUNGE_KUTTA = 2
class DynamicModels(Enum):
NoModel = 0,
SynchronousGeneratorOrder4 = 1, # fourth order synchronous machine
SynchronousGeneratorOrder6 = 2, # sixth order synchronous machine
VoltageSourceConverter = 3, # voltage source converter
ExternalGrid = 4, # external grid
AsynchronousSingleCageMotor = 5, # single cage asynchronous motor
AsynchronousDoubleCageMotor = 6 # double cage asynchronous motor
class TransientStabilityEvents:
def __init__(self):
self.time = list()
self.event_type = list()
self.object = list()
self.params = list()
self.events_available = ['Bus short circuit', 'Bus recovery', 'Line failure', 'Line recovery']
def add(self, t, evt_type, obj, param):
"""
Add elements
:param t: time in seconds
:param evt_type: event type
:param obj: object selected
:param param: extra parameters
"""
if evt_type not in self.events_available:
raise Exception('Event not supported!')
self.time.append(t)
self.event_type.append(evt_type)
self.object.append(obj)
self.params.append(param)
def remove_at(self, i):
"""
Remove the elements at a position
:param i: index
"""
self.time.pop(i)
self.event_type.pop(i)
self.object.pop(i)
self.params.pop(i)
class TransientStabilityResults:
def __init__(self):
self.name = "Transient stability"
self.voltage = None
self.omega = None
self.time = None
self.available_results = ['Bus voltage']
def plot(self, result_type, ax=None, indices=None, names=None, LINEWIDTH=2):
"""
Plot the results
:param result_type:
:param ax:
:param indices:
:param names:
:return:
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if indices is None:
indices = np.array(range(len(names)))
if len(indices) > 0:
labels = names[indices]
y_label = ''
title = ''
if result_type == 'Bus voltage':
y = np.abs(self.voltage[:, indices])
y_label = '(p.u.)'
title = 'Bus voltage module'
else:
pass
df = pd.DataFrame(data=y, columns=labels, index=self.time)
df.fillna(0, inplace=True)
if len(df.columns) > 10:
df.plot(ax=ax, linewidth=LINEWIDTH, legend=False)
else:
df.plot(ax=ax, linewidth=LINEWIDTH, legend=True)
ax.set_title(title)
ax.set_ylabel(y_label)
ax.set_xlabel('Time')
return df
else:
return None
class SynchronousMachineOrder4:
"""
4th Order Synchronous Machine Model
https://wiki.openelectrical.org/index.php?title=Synchronous_Machine_Models#4th_Order_.28Two-Axis.29_Model
Copyright (C) 2014-2015 <NAME>. All rights reserved.
typical values:
Ra = 0.0
Xa = 0.0
Xd = 1.68
Xq = 1.61
Xdp = 0.32
Xqp = 0.32
Xdpp = 0.2
Xqpp = 0.2
Td0p = 5.5
Tq0p = 4.60375
Td0pp = 0.0575
Tq0pp = 0.0575
H = 2
"""
def __init__(self, H, Ra, Xd, Xdp, Xdpp, Xq, Xqp, Xqpp, Td0p, Tq0p, base_mva, Sbase, bus_idx, fn=50,
speed_volt=False, solver=DiffEqSolver.RUNGE_KUTTA):
"""
:param H: is the machine inertia constant (MWs/MVA)
:param Ra: armature resistance (pu)
:param Xd: d-axis reactance (p.u.)
:param Xdp: d-axis transient reactance (p.u.)
:param Xdpp:is the d-axis subtransient reactance (pu)
:param Xq: q-axis reactance (p.u.)
:param Xqp: q-axis transient reactance (p.u.)
:param Xqpp: is the q-axis subtransient reactance (pu)
:param Td0p: d-axis transient open loop time constant (s)
:param Tq0p: q-axis transient open loop time constant (s)
:param base_mva: machine base power
:param Sbase: system base power (100 MVA usually)
:param fn: frequency
:param speed_volt: include speed-voltage term option?
:param solver: DiffEqSolver
"""
self.solver = solver
self.bus_idx = bus_idx
self.Vfd = 0.0
self.Id = 0.0
self.Iq = 0.0
# stator voltage (d, q axis)
self.Vd = 0.0
self.Vq = 0.0
self.Vt = 0.0
self.P = 0.0
self.Q = 0.0
self.Pm = 0.0
self.omega = 0.0
self.delta = 0.0
self.Eqp = 0.0
self.Edp = 0.0
self.Vang = 0.0
self.Tm = 0.0
self.In = 0.0
self.Im = 0.0
self.omega_prev = 0.0
self.delta_prev = 0.0
self.Eqp_prev = 0.0
self.Edp_prev = 0.0
self.arr_Eqp = np.zeros(3)
self.arr_Edp = np.zeros(3)
self.arr_omega = np.zeros(3)
self.arr_delta = np.zeros(3)
self.Td0p = Td0p
self.Tq0p = Tq0p
# angular speed (w = 2·pi·f)
self.omega_n = 2.0 * np.ones_like(H) * np.pi * fn
# Check for speed-voltage term option
self.speed_volt = speed_volt # True / False
# Convert impedances and H to system MVA base
self.H = H * base_mva / Sbase
self.Ra = Ra * Sbase / base_mva
self.Xd = Xd * Sbase / base_mva
self.Xdp = Xdp * Sbase / base_mva
self.Xdpp = Xdpp * Sbase / base_mva
self.Xq = Xq * Sbase / base_mva
self.Xqp = Xqp * Sbase / base_mva
self.Xqpp = Xqpp * Sbase / base_mva
# Equivalent Norton impedance for Ybus
self.Yg = self.get_yg()
def get_yg(self):
"""
Get the generator admittance
:return: shunt admittance
"""
return (self.Ra - 1j * 0.5 * (self.Xdp + self.Xqp)) / (self.Ra ** 2.0 + (self.Xdp * self.Xqp))
def initialise(self, vt0, S0):
"""
Initialise machine signals and states based on load flow voltage and complex power injection
:param vt0: complex initial voltage
:param S0: complex initial power
:return:
"""
# Calculate initial armature current
Ia0 = np.conj(S0 / vt0)
phi0 = np.angle(Ia0)
# Calculate steady state machine emf (i.e. voltage behind synchronous reactance)
Eq0 = vt0 + (self.Ra + 1j * self.Xq) * Ia0
self.delta = np.angle(Eq0)
# Convert currents to rotor reference frame
self.Id =
|
np.abs(Ia0)
|
numpy.abs
|
import sys
from os.path import join, dirname
from tqdm import tqdm
import numpy as np
import scipy as sp
sys.path.append(join(dirname(__file__), "../.."))
from src.utils.factory import calc_acc , calc_loss_NTK
class LearningCurve:
def __init__(self, cfg, lr, NTK_train, train_label, f_train_0, f_test_0):
self.time_range = np.arange(0, cfg.GENERAL.EPOCH, 1)
self.n_train = int(cfg.DATA.DATA_NUM * (1 - cfg.DATA.SPLIT_RATIO))
self.n_class = cfg.DATA.CLASS
self.NTK_train = NTK_train
self.train_label = train_label
self.f_train_0 = f_train_0
self.f_test_0 = f_test_0
self.id_mat = np.eye(self.n_train)
self.lr = lr
self.diff, self.P, self.D = self.prepare()
def prepare(self):
diff = self.f_train_0 - self.train_label
mat = self.id_mat - self.lr * self.NTK_train / (self.n_train * self.n_class)
diag, P = np.linalg.eigh(mat)
D = np.diag(diag)
return diff, P, D
def basic_calc(self, epoch, split, label, NTK_prod=None):
if epoch == 0:
p_mat = self.id_mat
else:
p_mat = self.P @ (self.D**epoch) @ self.P.T
if split == 'train':
output =
|
np.dot(p_mat, self.diff)
|
numpy.dot
|
import numpy as np;
from numpy.linalg import inv
from numpy.linalg import svd
from numpy.linalg import eig
from numpy.linalg import det
from scipy.optimize import leastsq,least_squares,fmin
import pandas as pd
import numpy as np
import time
import random
import os.path
import argparse
from epipolar_geometry import EpipolarGeometry
from model import RecModel
from image_sequence import ImageSequence
class UncalibratedReconstruction:
'''
Class that contains high level methods to perform 3D reconstruction from a sequence of uncalibrated images.
'''
def __init__(self,sequence_length,width,height,triang_method=0,opt_triang=0,opt_f=1,self_foc=0):
'''
Constructor
Args:
sequence_length: number of images (views)
width: width of the images
height: height of the images
triang_method: triangulation method (0: standard, 1: polynomial)
opt_triang: optimize initial 3D point estimate
opt_f: optimize fundamental matrix estimation
self_foc: for self-calibration, type of focal length expected across views (0: fixed, 1: varying )
'''
self._eg_utils=EpipolarGeometry()
#things that are needed throught the class
self._sequence_length=sequence_length
self._width=width;
self._height=height;
self._mm=(width+height)/2;
self._triangulation_method=triang_method
self._optimize_triangulation=opt_triang
self._optimize_f=opt_f
self._self_foc=self_foc
def two_view_geometry_computation(self,view1_feat2D,view2_feat2D):
'''
Method to compute the fundamental matrix and epipoles for two views
Args:
view1_feat2D: 2D feature coordinates in view 1
view2_feat2D: 2D feature coordinates in view 2
Returns:
F: the fundamental matrix
epipole_1: view1 epipole
epipole_2: view2 epipoles
'''
F=self._eg_utils.fundamental_matrix(view1_feat2D,view2_feat2D,self._optimize_f)
epipole_1=self._eg_utils.get_epipole(F)
epipole_2=self._eg_utils.get_epipole(np.transpose(F))
return F,epipole_1,epipole_2
def compute_reference_frame(self,epipole,F):
'''
Method to compute the reference frame of the reconstruction (i.e. plane at infinity in an affine or metric space).
Args:
epipole: the epipole
F: the fundamental matrix
Returns:
p: the reference plane
h: the homography [e]xF
'''
H=self._eg_utils.compute_homography(epipole,F) #compute the homography [e]xF
# get the reference plane
p = np.sum(np.divide(np.eye(3)-H, np.transpose(np.asarray([epipole, epipole, epipole]))),axis=0)/3
# adjust reference plane to make the first two projection matrices as equal as possible
p=fmin(self.init_plane,np.append(p,1),xtol=1e-25,ftol=1e-25,args=(H.real,epipole.real));
p=p[0:3]
return p, H
def init_plane(self,p,H,epi):
'''
Error function to make the difference between the first two projection matrices as small as possible
Note: assuming that the two views used for the initial reconstruction are not too far apart (this their projection matrices are almost equal), has proven to give good results
Args:
p: the reference plane (i.e. plane at infinity)
H: homography [e]x[F]
epi: epipola
Returns:
error: difference between two projection matrices
'''
epi=np.reshape(epi,(3,1));
p=np.reshape(p,(1,4));
t=p[0,0:3]
t=np.reshape(t,(1,3))
error =sum(sum(abs(H+epi.dot(t)-p[0,3]*np.eye(3))));
return error
def estimate_initial_projection_matrices(self,H,epipole_2,p):
'''
Method to estimate the projection matrices for the two views (i.e. P1=[I | 0], P2=[H+epi1|e])
Args:
H: homography [e]x[F]
epipole_2: epipole in the 2nd view
p: the reference plane of the reconstruction (i.e. plane at infinity)
Returns:
P: projection matrices for these two views
'''
P=np.zeros((3,4,self._sequence_length))
P[:,:,0] = [[1,0,0,0],[0,1,0,0],[0,0,1,0]]; # P1=[I | 0], i.e. frist frame aligned with world frame
epi_tmp=np.reshape(epipole_2,(3,1)); # P2=[H+epi1|e]
P[:,:3,1]=H+epi_tmp.dot(np.reshape(p,(1,3)))
P[:,3,1]= epipole_2
P[:,:,1]= P[:,:,1]/P[2,2,1]
return P
def get_initial_structure(self,feat_2D,P,epipole_1,epipole_2,F):
'''
Method to get an initial 3D structure (i.e. 3D point cloud), from the first two projection matrices through triangulation.
Args:
feat_2D: 2D feature coordinates for all images
P: projection matrices for all views (only the first two views are used)
epipole_1: view 1 epipole
epipole_2: view 2 epipole
F: fundamental matrix
Returns:
points3D: 3D point cloud
'''
number_of_features=feat_2D.shape[2]
points3D=np.zeros(shape=[number_of_features,4]);
for i in range(0,number_of_features):
if (self._triangulation_method==0):
x=self._eg_utils.triangulate_points(feat_2D[0,:,i],feat_2D[1,:,i],P[:,:,0],P[:,:,1]);
x=x[0:3]
elif (self._triangulation_method==1):
x=self._eg_utils.polynomial_triangulation(feat_2D[1,:,i],feat_2D[1,:,i],epipole_1,epipole_2,F,P[:,:,0],P[:,:,1])
x=x[0:3]/x[3] # normalize
if (self._optimize_triangulation==1):
#refine 3D point estimation (due to noise, lines of sight may not intersect perfectly). Minimizations should be carried out in the images
# and not in the projective 3D space, thus the reprojection error is used.
x=fmin(self.refine_3d_point,x, xtol=1e-25,ftol=1e-25, full_output=0, args=(P[:,:,0],P[:,:,1],feat_2D[0,:,i],feat_2D[1,:,i]))
points3D[i,:]=np.append(x,1)
return points3D
def refine_3d_point(self,point3D,P1,P2,view1_feat2D,view2_feat2D):
'''
Method to compute the reprojection error of a 3D point in two views
Args:
point3D: 3D point cloud
P1: projection matrix of view 1
P2: projection matrix of view 2
view1_feat2D: 2D feature coordinates in view 1
view2_feat2D: 2D feature coordinates in view 1
Returns:
error: the reprojection error
'''
point3D=np.append(point3D,1);
error=sum(self.compute_reprojection_error_point(P1,point3D,view1_feat2D)+self.compute_reprojection_error_point(P2,point3D,view2_feat2D))
# sdfds
return error
def projective_pose_estimation(self,feat_2D,P,points3D):
'''
Method to add views using an initial 3D structure, i.e. compute the projection matrices for all the additional views (the first two are already
estimated in previous steps)
Args:
feat_2D: 2D feature coordinates for all images
P: projection matrices
points3d: 3D point cloud
Returns:
P: projection matrices for all views
'''
number_of_features=feat_2D.shape[2]
AA=np.zeros(shape=[2*number_of_features,12]);
for i in range(2,self._sequence_length):
for j in range(0,number_of_features):
AA[2*j,0:4]=points3D[j];
AA[2*j,8:12]=-feat_2D[i,0,j]*points3D[j]
AA[2*j+1,4:8]=points3D[j];
AA[2*j+1,8:12]=-feat_2D[i,1,j]*points3D[j]
U, s, Vh = svd(AA)
V=np.transpose(Vh)
VV=V[0:12,11]
VV=VV/VV[10]
VV=np.delete(VV,10)
#refine the estimate for the i-th projection matrix
result=least_squares(self._eg_utils.refine_projection_matrix,VV, args=(points3D,feat_2D[i,:,:]))
VV=result.x
Pr=np.zeros(shape=[3,4]);
Pr[0,:]=VV[0:4]
Pr[1,:]=VV[4:8]
Pr[2,:]=np.append(np.append(VV[8:10],1),VV[10])
P[:,:,i]=Pr
return P
def bundle_adjustment(self,feat_2D,P,feat3D):
'''
Method to refine structure and motion, i.e. refine the projection matrices and 3D points using the reprojection error
Args:
feat_2D: 2D feature coordinates for all images
P: projection matrices
points3d: 3D point cloud
Returns:
P: the refined projection matrices
feat3D: the refined 3D point cloud
error: the reprojection error
'''
number_of_features=feat_2D.shape[2]
#The vector to be optimized
X=np.reshape(P[:,:,0],(1,12));
# Append the projection matrices...
for i in range(1,self._sequence_length):
X=np.append(X,np.reshape(P[:,:,i],(1,12)))
X=np.delete(X,[10,22,(self._sequence_length-1)*12+10])
# ...and then append the 3D points
X=np.append(X,np.reshape(feat3D[:,0:3],number_of_features*self._sequence_length))
# Optimize using Levenberg-Marquardt
result=least_squares(self._eg_utils.overall_reprojection_error,X, max_nfev=1000,method='lm',args=([feat_2D]))
X=result.x
error=np.power(sum(self._eg_utils.overall_reprojection_error(X,feat_2D)),2)
#get the refined projection matrices from the optimal vector
for i in range(0,self._sequence_length):
P[:,:,i]=np.reshape(X[0+i*11:12+i*11],(3,4));
P[2,3,i]=P[2,2,i]
P[2,2,i]=1
#get the refined 3D coordinates from the optimal vector
feat3D[:,0:3]=np.reshape(X[self._sequence_length*11:self._sequence_length*11+self._sequence_length*number_of_features*3],(number_of_features,3))
Tp1= np.vstack([P[:,:,0],[0,0,0,1]]);
for i in range(0,self._sequence_length):
P[:,:,i]=P[:,:,i].dot(inv(Tp1))
feat3D=Tp1.dot(np.transpose(feat3D))
feat3D=np.transpose(feat3D/feat3D[3,:]);
return P,feat3D,error
def self_calibration(self,P):
'''
Self calibration using the procedure described in
<NAME>, <NAME> and <NAME>, "Self-Calibration and Metric Reconstruction in spite of Varying and Unknown Internal Camera Parameters", Proc. International Conference on Computer Vision, Narosa Publishing House, pp.90-95, 1998.
Args:
P: projection matrices
Returns:
Tm: transformation matrix that will transform from the projective space to metric space
K: camera intrisic parameters for each view
error: the reprojection error
'''
# setup the system of equations
AAA=np.zeros(shape=[4*self._sequence_length-4,6]);
for i in range(0,self._sequence_length-1):
P_tmp=P[:,:,i+1]
AAA[0+4*i,:]=[(-np.power(P_tmp[1, 1],2)+np.power(P_tmp[0, 1],2)-np.power(P_tmp[1, 0],2)+np.power(P_tmp[0, 0],2)) ,(-2*P_tmp[1, 0]*P_tmp[1, 3]+2*P_tmp[0, 0]*P_tmp[0, 3]),(-2*P_tmp[1, 1]*P_tmp[1, 3]+2*P_tmp[0, 1]*P_tmp[0, 3]),(2*P_tmp[0, 2]*P_tmp[0, 3]-2*P_tmp[1, 2]*P_tmp[1, 3]),(-
|
np.power(P_tmp[1, 3],2)
|
numpy.power
|
"""
Name: Transfer Matrix
Author: <NAME>
Contact: <EMAIL>
Affiliation: Nara Institute of Science and Technology
"""
import argparse
import csv
import importlib.resources as pkg_resources
import multiprocessing
import numpy as np
import os
import sys
from ruamel.yaml import YAML
import scipy.constants as const
from scipy import interpolate
import time
from tqdm import tqdm
from pistachio.data import refractive_index_data
yaml = YAML()
class Layer:
"""
Contains information and functions associated with a single, planar layer.
Attributes
----------
material : string
Name of the material comprising this layer.
thickness : float
Thickness of the layer (in meters).
refractive_index : [...,...,...] 1d array
Real part of the complex refractive index.
extinction_coeff : [..., ..., ...] 1d array
Imaginary part of the complex refractive index.
"""
def __init__(self, material="Air", thickness=1.0e-3, num_points=100):
self.material = material
self.thickness = thickness
self.wavelengths = []
self.refractive_index = []
self.extinction_coeff = []
self.set_complex_refractive(self.refractive_index, self.extinction_coeff)
self.kx = []
self.kz = []
self.transfer_matrices = []
def set_complex_refractive(self, n_real=None, n_imag=None):
"""
Sets the complex refractive index.
Parameters
----------
n_real : [...,...,...] 1d array of floats
List of real part of the refractive index for different wavelengths.
n_imag : [...,...,...] 1 array of floats
List of imaginary part of the refractive index for different wavelengths.
Returns
-------
None
"""
n_complex = []
for ii in range(len(n_real)):
n_complex.append(n_real[ii] + 1j * n_imag[ii])
self.complex_refractive = np.array(n_complex)
def get_data_from_csv(self, refractive_filename):
"""
Extracts refractive index data associated with a layer material
from file downloaded from refractiveindex.info and sets wavelength,
real refractive index, and extinction coefficient (if present).
Parameters
----------
refractive_filename : str
Name of the .csv file in data/refractive_index_data
from which to extract refractive index data.
Returns
-------
None
Notes
----
The data from refractiveindex.info uses micrometers for wavelength units.
This function assumes the file is left as is and converts to meters.
"""
with pkg_resources.path(refractive_index_data, refractive_filename) as params:
params = os.path.abspath(params)
wavelen = []
n_real = []
n_imag = []
with open(params, 'r', encoding='utf-8') as csv_file:
csvreader = csv.reader(csv_file)
next(csvreader, None)
for row in csvreader:
# wavelen.append(float(row[0]) * 10**-6)
wavelen.append(float(row[0]))
n_real.append(float(row[1]))
try:
n_imag.append(float(row[2]))
except IndexError:
n_imag.append(0.0)
self.wavelengths = np.array(wavelen)
self.refractive_index = np.array(n_real)
self.extinction_coeff = np.array(n_imag)
self.set_complex_refractive(n_real, n_imag)
def make_datapoints(self, wavelengths):
"""
Makes a new set of data points from user-defined num_points and max/min wavelength,
and uses SciPy interpolation to match spacing and number of data points
for refractive index so that array lengths are consistent between layers.
SciPy's interpolate.interp1d uses x and y values to
generate a function whose argument uses interpolation to find the value of new points.
f(x) = y
See the documentation for details.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html#scipy.interpolate.interp1d
Parameters
----------
wavelengths: [..., ..., ...] 1d array
List of user-defined wavelengths of light (in meters)
via max, min wavelength in yaml config file.
Returns
-------
None
Notes
-----
Interpolation of the refractive index passes unit testing for values in the middle of the new
values for x (wavelengths) passed into the interpolation functions. Original refractive index
and extinction coefficients are usually given to about three decimal places. Interpolated
values sometimes preserve the original value to the full precision, but sometimes differ past
the first or second demical place. This is something the user ought to be aware of.
The user should conduct independent tests of the accuracy of their own data.
Furthermore, data near the edges of x are unstable and deviate a few percent from the reference data.
When constructing new wavelength data using this function, DO NOT
use wavelengths outside of any reference refractive index data that you provide.
The function will extrapolate that data instead of raising an error, and the
new refractive indices may be unphysical.
"""
num_points = len(wavelengths)
if len(self.refractive_index) == 1:
self.refractive_index =
|
np.full(num_points, self.refractive_index)
|
numpy.full
|
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
def _rand_rectangle(H,W):
"""
pick a random rectangle in a (H,W) image
"""
dh = np.random.randint(int(H/16)+1, int(H/4))
dw = np.random.randint(int(W/16)+1, int(W/4))
top = np.random.randint(0, H-dh-1)
left = np.random.randint(0, W-dw-1)
return top, left, top+dh, left+dw
def _circle_mask_generator(imshape, intensity=3):
"""
Generator that yields circular masks
:imsize: 2-tuple of image dimensions
:intensity: poisson intensity for number of circles to draw
"""
xx, yy = np.meshgrid(np.arange(imshape[0]), np.arange(imshape[1]))
while True:
num_circles = np.random.poisson(3)
mask = np.zeros(imshape, dtype=np.float32)
for n in range(num_circles):
x0 = np.random.randint(0, imshape[1])
y0 = np.random.randint(0, imshape[0])
r = np.random.uniform(10, (imshape[0]+imshape[1])/8)
clip = (xx - x0)**2 +(yy - y0)**2 <= r**2
mask[clip] = 1
yield np.expand_dims(mask, 2)
def _splotch_mask_generator(imshape):
"""
Generator that yields splotchy masks
:imshape: 2-tuple of image dimensions
"""
xx, yy = np.meshgrid(
|
np.arange(imshape[0])
|
numpy.arange
|
import random
import operator
import time
import numpy as np
import tensorflow as tf
from multiprocessing import Process, Pipe
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
def tf_sum(x, axis=None, keepdims=False):
axis = None if axis is None else [axis]
return tf.reduce_sum(x, axis=axis, keep_dims=keepdims)
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * tf_sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf_sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return tf_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return tf_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class VecEnv(object):
"""
Vectorized environment base class
"""
def step(self, vac):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
raise NotImplementedError
def reset(self):
"""
Reset all environments
"""
raise NotImplementedError
def close(self):
pass
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
self.action_space = env.action_space
self.observation_space = env.observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
def step(self, action_n):
results = [env.step(a) for (a,env) in zip(action_n, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if done:
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def render(self):
return self.envs[0].render()
@property
def num_envs(self):
return len(self.envs)
class DummyVecTestEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
self.action_space = env.action_space
self.observation_space = env.observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
def step(self, action_n):
results = [env.step(a) for (a,env) in zip(action_n, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def render(self):
return self.envs[0].render()
@property
def num_envs(self):
return len(self.envs)
class VecNormalize(VecEnv):
"""
Vectorized environment base class
"""
def __init__(self, venv, ob=True, ret=False, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
self.venv = venv
self._observation_space = self.venv.observation_space
self._action_space = venv.action_space
self.ob_rms = RunningMeanStd(shape=self._observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step(self, vac):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.venv.step(vac)
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
return self._obfilt(obs)
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def close(self):
self.venv.close()
@property
def num_envs(self):
return self.venv.num_envs
class VecNormalizeTest(VecEnv):
def __init__(self, venv, mean, var, clipob=10., epsilon=1e-8):
self.venv = venv
self._observation_space = self.venv.observation_space
self._action_space = venv.action_space
self.mean = mean
self.var = var
self.clipob = clipob
self.epsilon = epsilon
def render(self):
return self.venv.render()
def step(self, vac):
obs, rews, dones, infos = self.venv.step(vac)
obs = self._obfilt(obs)
return obs, rews, dones, infos
def _obfilt(self, obs):
obs = np.clip((obs - self.mean) / np.sqrt(self.var + self.epsilon), -self.clipob, self.clipob)
return obs
def reset(self):
obs = self.venv.reset()
return self._obfilt(obs)
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def close(self):
self.venv.close()
@property
def num_envs(self):
return self.venv.num_envs
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
@property
def num_envs(self):
return len(self.remotes)
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class ReplayBuffer(object):
def __init__(self, size):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
data = (obs_t, action, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t),
|
np.array(actions)
|
numpy.array
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: vo_centering_iterative
:platform: Unix
:synopsis: A plugin to find the center of rotation per frame
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import math
import logging
import numpy as np
import scipy.ndimage as ndi
import scipy.ndimage.filters as filter
import pyfftw.interfaces.scipy_fftpack as fft
from scipy import signal
from savu.plugins.utils import register_plugin
from savu.data.plugin_list import CitationInformation
from savu.plugins.filters.base_filter import BaseFilter
from savu.plugins.driver.iterative_plugin import IterativePlugin
# :u*param search_area: Search area in pixels from horizontal approximate \
# centre of the image. Default: (-50, 50).
@register_plugin
class VoCenteringIterative(BaseFilter, IterativePlugin):
"""
A plugin to calculate the centre of rotation using the Vo Method
:param ratio: The ratio between the size of object and FOV of \
the camera. Default: 0.5.
:param row_drop: Drop lines around vertical center of the \
mask. Default: 20.
:param search_radius: Use for fine searching. Default: 6.
:param step: Step of fine searching. Default: 0.5.
:param expand_by: The number of pixels to expand the search region by \
on each iteration. Default: 5
:param boundary_distance: Accepted distance of minima from the boundary of\
the listshift in the coarse search. Default: 3.
:u*param preview: A slice list of required frames (sinograms) to use in \
the calulation of the centre of rotation (this will not reduce the data \
size for subsequent plugins). Default: [].
:param datasets_to_populate: A list of datasets which require this \
information. Default: [].
:param out_datasets: The default \
names. Default: ['cor_raw','cor_fit', 'reliability'].
:u*param start_pixel: The approximate centre. If value is None, take the \
value from .nxs file else set to image centre. Default: None.
"""
def __init__(self):
super(VoCenteringIterative, self).__init__("VoCenteringIterative")
self.search_area = (-20, 20)
self.peak_height_min = 50000 # arbitrary
self.min_dist = 3 # min distance deamed acceptible from boundary
self.expand_by = 5 # expand the search region by this amount
self.list_shift = None
self.warning_level = 0
self.final = False
self.at_boundary = False
self.list_metric = []
self.expand_direction = None
def _create_mask(self, Nrow, Ncol, obj_radius):
du, dv = 1.0/Ncol, (Nrow-1.0)/(Nrow*2.0*math.pi)
cen_row, cen_col = int(np.ceil(Nrow/2)-1), int(np.ceil(Ncol/2)-1)
drop = self.parameters['row_drop']
mask = np.zeros((Nrow, Ncol), dtype=np.float32)
for i in range(Nrow):
num1 = np.round(((i-cen_row)*dv/obj_radius)/du)
p1, p2 = (np.clip(np.sort((-num1+cen_col, num1+cen_col)),
0, Ncol-1)).astype(int)
mask[i, p1:p2+1] = np.ones(p2-p1+1, dtype=np.float32)
if drop < cen_row:
mask[cen_row-drop:cen_row+drop+1, :] = \
np.zeros((2*drop + 1, Ncol), dtype=np.float32)
mask[:, cen_col-1:cen_col+2] = np.zeros((Nrow, 3), dtype=np.float32)
return mask
def _get_start_shift(self, centre):
if self.parameters['start_pixel'] is not None:
shift = centre - int(self.parameters['start_pixel']/self.downlevel)
else:
in_mData = self.get_in_meta_data()[0]
shift = centre - in_mData['centre'] if 'centre' in \
in_mData.get_dictionary().keys() else 0
return int(shift)
def _coarse_search(self, sino, list_shift):
# search minsearch to maxsearch in 1 pixel steps
list_metric = np.zeros(len(list_shift), dtype=np.float32)
(Nrow, Ncol) = sino.shape
# check angles to determine if a sinogram should be chopped off.
# Copy the sinogram and flip left right, to make a full [0:2Pi] sino
sino2 = np.fliplr(sino[1:])
# This image is used for compensating the shift of sino2
compensateimage = np.zeros((Nrow-1, Ncol), dtype=np.float32)
# Start coarse search in which the shift step is 1
compensateimage[:] = np.flipud(sino)[1:]
mask = self._create_mask(2*Nrow-1, Ncol,
0.5*self.parameters['ratio']*Ncol)
count = 0
for i in list_shift:
sino2a = np.roll(sino2, i, axis=1)
if i >= 0:
sino2a[:, 0:i] = compensateimage[:, 0:i]
else:
sino2a[:, i:] = compensateimage[:, i:]
list_metric[count] = np.sum(
np.abs(fft.fftshift(fft.fft2(
|
np.vstack((sino, sino2a))
|
numpy.vstack
|
import numpy
# import autograd.numpy as numpy # Thinly-wrapped numpy
# from autograd import value_and_grad # The only autograd function you may ever need
# from autograd import grad
#from autograd.differential_operators import jacobian
import scipy.stats
import scipy.special
import shared.statHelper as statHelper
import scipy.optimize
import shared.analyzeHelper as analyzeHelper
import time
from collections import defaultdict
import pickle
import showResultsText
import rpy2
print("rpy2 version = " + rpy2.__version__)
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
ro.r('source(\'imports.R\')')
ro.r('set.seed(8985331)') # set a fixed random seed to make results of glasso, stars etc reproducible
# import warnings
# import scipy.linalg
# from SpikeAndSlabNonContinuous_MCMC import SpikeAndSlabProposed as SpikeAndSlabProposed_nonContinuousMCMC
import samplingHelper
class SpikeAndSlabProposedModelSearch:
allDelta = [0.8, 0.5, 0.05, 0.01, 0.001, 0.0]
# hyper-parameters for response variance
ETA_SQUARE_R = 1.0
NU_R = 1.0
# hyper-parameters for the relevant regression coefficients
ETA_SQUARE_1 = 100.0
NU_1 = 1.0
def __init__(self, allObservedResponses, allObservedCovariates, delta):
self.delta = delta
self.nu1 = SpikeAndSlabProposedModelSearch.NU_1
self.etaSquare1 = SpikeAndSlabProposedModelSearch.ETA_SQUARE_1
if self.delta > 0.0:
deltaToSigmaSquare0 = pickle.load(open( "deltaToSigmaSquare0", "rb" ) )
self.sigmaSquare0 = deltaToSigmaSquare0[delta]
else:
self.sigmaSquare0 = None
self.fullp = allObservedCovariates.shape[1]
self.y = allObservedResponses
self.n = allObservedResponses.shape[0]
self.setX(allObservedCovariates)
return
@staticmethod
def getSigmaSquareR_reducedModel(allObservedResponses, allObservedCovariates, delta, selectedVars, NUMBER_OF_MCMC_SAMPLES_TOTAL):
allObservedCovariates_sub = allObservedCovariates[:, selectedVars]
subModel = SpikeAndSlabProposedModelSearch(allObservedResponses, allObservedCovariates_sub, delta)
return subModel.getSigmaSquareR_fullModel_fromCurrentModel(NUMBER_OF_MCMC_SAMPLES_TOTAL)
# checked
# calculates p(beta = delta | relevantVariable, nu1, etaSquare1)
@staticmethod
def getBetaGivenNu1EtaSquare1(delta, NUMBER_OF_SAMPLES):
allLogProbs = numpy.zeros(NUMBER_OF_SAMPLES)
for i in range(NUMBER_OF_SAMPLES):
sigmaSquare = samplingHelper.getScaledInvChiSquareSample(nu = SpikeAndSlabProposedModelSearch.NU_1, etaSquare = SpikeAndSlabProposedModelSearch.ETA_SQUARE_1, numberOfSamples = 1)[0]
allLogProbs[i] = - ((delta ** 2) / (2.0 * sigmaSquare)) - samplingHelper.exactLogNormalizationConstant_O_static(delta, sigmaSquare)
logProbEstimate = scipy.special.logsumexp(allLogProbs) - numpy.log(NUMBER_OF_SAMPLES)
return logProbEstimate
@staticmethod
def getSigmaSquare0(delta, NUMBER_OF_SAMPLES):
sigmaSquare0 = 1.0
logProbDelta_relevant = SpikeAndSlabProposedModelSearch.getBetaGivenNu1EtaSquare1(delta, NUMBER_OF_SAMPLES)
logProbAtDelta_notRelevant = samplingHelper.truncatedNormal_not_relevant_logDensity(delta, sigmaSquare0, delta)
print("logProbDelta_relevant = ", logProbDelta_relevant)
print("logProbAtDelta_notRelevant = ", logProbAtDelta_notRelevant)
ERROR_TOLERANCE = 0.001
previousDirectionDown = True
assert(logProbAtDelta_notRelevant > logProbDelta_relevant)
base = 1.0
iterations = 0
while numpy.abs(logProbDelta_relevant - logProbAtDelta_notRelevant) > ERROR_TOLERANCE:
if logProbAtDelta_notRelevant > logProbDelta_relevant:
if not previousDirectionDown:
base = base / 2.0
previousDirectionDown = True
sigmaSquare0 = sigmaSquare0 / (1.0 + base)
else:
if previousDirectionDown:
base = base / 2.0
previousDirectionDown = False
sigmaSquare0 = sigmaSquare0 * (1.0 + base)
logProbAtDelta_notRelevant = samplingHelper.truncatedNormal_not_relevant_logDensity(delta, sigmaSquare0, delta)
iterations += 1
if iterations >= 1000:
print("ERROR: DID NOT CONVERGE !")
assert(False)
print("FINAL ESTIMATE:")
print("logProbDelta_relevant = ", logProbDelta_relevant)
print("logProbAtDelta_notRelevant = ", logProbAtDelta_notRelevant)
print("sigmaSquare0 = ", sigmaSquare0)
return sigmaSquare0
def setX(self, allObservedCovariates):
self.p = allObservedCovariates.shape[1]
assert(allObservedCovariates.shape[0] == self.n)
self.X = allObservedCovariates
self.XTX = numpy.matmul(self.X.transpose(), self.X)
self.yX = numpy.matmul(self.y.transpose(), self.X)
self.invXTX_regularized = numpy.linalg.inv(self.XTX + numpy.eye(self.p))
return
# checked2
def sampleFullBeta(self, z, sigmaSquareR, slabVariance):
fullBeta = numpy.zeros(self.p)
if numpy.sum(z) == 0:
return fullBeta
fullX = self.X[:, z == 1]
mu, sigma, _ = SpikeAndSlabProposedModelSearch.getFullNormalParameters(self.y, fullX, sigmaSquareR, slabVariance)
betaNonZero = numpy.random.multivariate_normal(mu, sigma)
fullBeta[z == 1] = betaNonZero
return fullBeta
# checked2
# gets the parameters of the normal which specifies p(beta | y, fullX, sigmaSquareR, slabVariance)
@staticmethod
def getFullNormalParameters(y, fullX, sigmaSquareR, slabVariance):
invSigma = (1.0 / sigmaSquareR) * numpy.matmul(fullX.transpose(), fullX) + (1.0 / slabVariance) * numpy.eye(fullX.shape[1])
sigma = numpy.linalg.inv(invSigma)
mu = (1.0 / sigmaSquareR) * numpy.matmul( numpy.matmul(y.transpose(), fullX), sigma)
return mu, sigma, invSigma
# reading checked2
# returns the mean and variance of p(beta_{s+1} | beta_1, ..., beta_s, sigmaSquareR, slabVariance)
# j specifies the index of the new beta coefficient (i.e. corresponds to s+1)
# conditionedBeta corresponds to beta_1, ..., beta_s
def getNormalParamertersForU(self, z, j, fullBeta, sigmaSquareR, slabVariance):
zWithoutJ = numpy.copy(z)
zWithoutJ[j] = 0
conditionedBeta = fullBeta[zWithoutJ == 1]
sPlusOneIndicies = numpy.where(zWithoutJ == 1)[0]
sPlusOneIndicies = numpy.append([j], sPlusOneIndicies)
s = numpy.sum(zWithoutJ)
assert(conditionedBeta.shape[0] == s)
assert(len(sPlusOneIndicies) == s + 1)
fullX = self.X[:, sPlusOneIndicies]
assert(fullX.shape[1] == len(sPlusOneIndicies))
mu, sigma, invSigma = SpikeAndSlabProposedModelSearch.getFullNormalParameters(self.y, fullX, sigmaSquareR, slabVariance)
invSimga22 = numpy.linalg.inv( sigma[1:(s+1), 1:(s+1)] )
sigma12 = sigma[0,1:(s+1)]
tmpVec = numpy.matmul(sigma12, invSimga22)
newMean = mu[0] + numpy.dot(tmpVec, conditionedBeta - mu[1:(s+1)])
newVariance = 1.0 / invSigma[0,0]
assert(newVariance > 0.0)
return newMean, newVariance
# reading checked2
# calculates log p(beta, sigmaSquareR, sigmaSquare, y | X, S)
def getJointLogProb_forRJMCMC(self, z, fullBeta, sigmaSquareR, slabVariance):
assert(sigmaSquareR > 0.0)
restrictedX = self.X[:, z == 1]
restrictedBeta = fullBeta[z == 1]
s = restrictedBeta.shape[0]
assert(s == numpy.sum(z))
jointLogProb = - (float(self.n) / 2.0) * numpy.log(2.0 * numpy.pi)
jointLogProb -= (((SpikeAndSlabProposedModelSearch.NU_R + self.n) / 2.0) + 1.0) * numpy.log(sigmaSquareR)
jointLogProb -= (1.0 / (2.0 * sigmaSquareR)) * (SpikeAndSlabProposedModelSearch.NU_R * SpikeAndSlabProposedModelSearch.ETA_SQUARE_R + numpy.sum(numpy.square(self.y - numpy.matmul(restrictedX, restrictedBeta))))
jointLogProb -= s * numpy.log(2.0 * numpy.pi * slabVariance)
jointLogProb -= (1.0 / (2.0 * slabVariance)) * numpy.sum(numpy.square(restrictedBeta))
# add prior on z
jointLogProb += SpikeAndSlabProposedModelSearch.getLogPriorZ(s, self.fullp)
return jointLogProb
# reading checked2
# calculates log p(beta, sigmaSquareR, sigmaSquare, y | X, S)
def getJointLogProb_forSimple(self, z, restrictedBeta, sigmaSquareR, slabVariance):
assert(sigmaSquareR > 0.0)
assert(numpy.sum(z) == restrictedBeta.shape[0])
restrictedX = self.X[:, z == 1]
s = restrictedBeta.shape[0]
assert(s == numpy.sum(z))
jointLogProb = - (float(self.n) / 2.0) * numpy.log(2.0 * numpy.pi)
jointLogProb -= (((SpikeAndSlabProposedModelSearch.NU_R + self.n) / 2.0) + 1.0) * numpy.log(sigmaSquareR)
jointLogProb -= (1.0 / (2.0 * sigmaSquareR)) * (SpikeAndSlabProposedModelSearch.NU_R * SpikeAndSlabProposedModelSearch.ETA_SQUARE_R + numpy.sum(numpy.square(self.y - numpy.matmul(restrictedX, restrictedBeta))))
jointLogProb -= s * numpy.log(2.0 * numpy.pi * slabVariance)
jointLogProb -= (1.0 / (2.0 * slabVariance)) * numpy.sum(numpy.square(restrictedBeta))
# add prior on z
jointLogProb += SpikeAndSlabProposedModelSearch.getLogPriorZ(s, self.fullp)
return jointLogProb
# reading checked2
# use this if delta = 0.0
# def sampleZjConditionedOnRest_RJMCMC(self, sigmaSquareR, slabVariance, fullBeta, z, j):
#
# zWithoutJ = numpy.copy(z)
# zWithoutJ[j] = 0
# logJointProbWithoutJ = self.getJointLogProb_forRJMCMC(zWithoutJ, fullBeta, sigmaSquareR, slabVariance)
#
# uMean, uVariance = self.getNormalParamertersForU(z, j, fullBeta, sigmaSquareR, slabVariance)
#
# if z[j] == 1:
# # try to traverse to model with z[j] = 0
# logJointProbWithJ = self.getJointLogProb_forRJMCMC(z, fullBeta, sigmaSquareR, slabVariance)
# logG_toHigherDimension = scipy.stats.norm.logpdf(fullBeta[j], loc=uMean, scale=numpy.sqrt(uVariance))
#
# logRatio = (logJointProbWithoutJ + logG_toHigherDimension) - logJointProbWithJ
#
# self.totalSamplingCount_RJMCMC += 1.0
# self.totalAcceptanceRate_RJMCMC += numpy.min([1.0, numpy.exp(logRatio)])
#
# if numpy.random.uniform() < numpy.exp(logRatio):
# return 0
# else:
# return 1
#
# else:
# # try to traverse to model with z[j] = 1
# fullBeta[j] = scipy.stats.norm.rvs(loc=uMean, scale=numpy.sqrt(uVariance))
# logG_toHigherDimension = scipy.stats.norm.logpdf(fullBeta[j], loc=uMean, scale=numpy.sqrt(uVariance))
#
# zWithJ = numpy.copy(z)
# zWithJ[j] = 1
# logJointProbWithJ = self.getJointLogProb_forRJMCMC(zWithJ, fullBeta, sigmaSquareR, slabVariance)
#
# logRatio = logJointProbWithJ - (logJointProbWithoutJ + logG_toHigherDimension)
#
# self.totalSamplingCount_RJMCMC += 1.0
# self.totalAcceptanceRate_RJMCMC += numpy.min([1.0, numpy.exp(logRatio)])
#
# if numpy.random.uniform() < numpy.exp(logRatio):
# return 1
# else:
# return 0
# returns p(z, sigmaSquareR, slabVariance, y)
# def getJointLogProbZ_Sigma_y(self, sigmaSquareR, slabVariance, z):
# assert(self.delta == 0.0)
#
# # print("z = ", z)
# # print("self.X.shape = ", self.X.shape)
#
# restrictedX = self.X[:, z == 1]
# mu, _, invSigma = SpikeAndSlabProposedModelSearch.getFullNormalParameters(self.y, restrictedX, sigmaSquareR, slabVariance)
#
# jointLogProb = self.getJointLogProb_forSimple(z, mu, sigmaSquareR, slabVariance)
# posteriorLogProb = -0.5 * (invSigma.shape[0] * numpy.log(2.0 * numpy.pi) - idcHelper.getLogDet(invSigma))
#
# # posteriorLogProb = scipy.stats.multivariate_normal.logpdf(mu, mean=mu, cov=sigma)
# # print("mu = ", mu)
# # print("posteriorLogProb = ", posteriorLogProb)
# # posteriorLogProb = -0.5 * (sigma.shape[0] * numpy.log(2.0 * numpy.pi) - idcHelper.getLogDet(invSigma))
# # print("posteriorLogProb = ", posteriorLogProb)
# # assert(False)
#
# return jointLogProb - posteriorLogProb
# def sampleZjConditionedOnRest_delta0_simple(self, sigmaSquareR, slabVariance, z, j):
#
# unnormalizedLogProbZ = numpy.zeros(2)
#
# zWithoutJ = numpy.copy(z)
# zWithoutJ[j] = 0
# unnormalizedLogProbZ[0] = self.getJointLogProbZ_Sigma_y(sigmaSquareR, slabVariance, zWithoutJ)
#
# zWithJ = numpy.copy(z)
# zWithJ[j] = 1
# unnormalizedLogProbZ[1] = self.getJointLogProbZ_Sigma_y(sigmaSquareR, slabVariance, zWithJ)
#
# logNormalization = scipy.special.logsumexp(unnormalizedLogProbZ)
# zProbs = numpy.exp(unnormalizedLogProbZ - logNormalization)
# newZj = numpy.random.choice(numpy.arange(2), p=zProbs)
#
# return newZj
def sampleZ(self, NUMBER_OF_MCMC_SAMPLES_TOTAL):
invEst = numpy.linalg.inv(self.X.transpose() @ self.X + 1.0 * numpy.eye(self.p))
ridgeBetaEst = (invEst @ self.X.transpose()) @ self.y
z = numpy.zeros(self.p, dtype = numpy.int)
z[numpy.absolute(ridgeBetaEst) > self.delta] = 1
beta = ridgeBetaEst
# get a sparse initial solution in order to ensure faster convergence
maxNrInitialSelectedVars = int(self.p * 0.01)
if maxNrInitialSelectedVars > 0 and numpy.sum(z) > maxNrInitialSelectedVars:
largestIds = numpy.argsort(-numpy.absolute(ridgeBetaEst))[0:maxNrInitialSelectedVars]
z = numpy.zeros(self.p, dtype = numpy.int)
z[largestIds] = 1
beta[z == 0] = 0
sigmaSquareR = numpy.mean(numpy.square(self.y - self.X @ beta))
print("beta = ")
print(beta)
print("sigmaSquareR = ")
print(sigmaSquareR)
print("z = ")
print(z)
BURN_IN_SAMPLES = int(0.1 * NUMBER_OF_MCMC_SAMPLES_TOTAL)
assert(BURN_IN_SAMPLES >= 1)
NUMBER_OF_MCMC_SAMPLES_USED = NUMBER_OF_MCMC_SAMPLES_TOTAL - BURN_IN_SAMPLES
print("BURN_IN_SAMPLES = ", BURN_IN_SAMPLES)
print("NUMBER_OF_MCMC_SAMPLES_USED = ", NUMBER_OF_MCMC_SAMPLES_USED)
posteriorAssignments = numpy.zeros((NUMBER_OF_MCMC_SAMPLES_USED, self.p))
averagePosteriorBeta = numpy.zeros(self.p)
averageSigmaSquareR = 0.0
spikeAndSlabVar = numpy.asarray([self.sigmaSquare0, self.etaSquare1])
print("spikeAndSlabVar = ", spikeAndSlabVar)
for mcmcIt in range(NUMBER_OF_MCMC_SAMPLES_TOTAL):
print("mcmcIt = ", mcmcIt)
# if self.delta > 0:
for j in range(self.p):
# sample p(z_j | beta, z_-j, y, sigmaSquareR, X)
z[j] = self.sampleZjConditionedOnRest(sigmaSquareR, spikeAndSlabVar, beta, z, j)
# sample p(beta_j | beta_-j, z, y, sigmaSquareR, X)
meanTilde, sigmaSquareTilde, _ = self.getMeanAndVarOfBetaConditional(sigmaSquareR, spikeAndSlabVar, beta, z, j)
beta[j] = SpikeAndSlabProposedModelSearch.sampleTruncatedBeta(self.delta, meanTilde, sigmaSquareTilde, z[j] == 1)
if self.delta == 0:
# safety check for delta == 0
assert(numpy.all(beta[z == 0] == 0) and numpy.all(beta[z == 1] != 0))
# sample p(sigmaSquareR | beta, z, y, X)
etaSquareForsigmaSquareR = (SpikeAndSlabProposedModelSearch.NU_R * SpikeAndSlabProposedModelSearch.ETA_SQUARE_R + numpy.sum(numpy.square(self.y - numpy.matmul(self.X, beta)))) / (SpikeAndSlabProposedModelSearch.NU_R + self.n)
sigmaSquareR = samplingHelper.getScaledInvChiSquareSample(nu = SpikeAndSlabProposedModelSearch.NU_R + self.n, etaSquare = etaSquareForsigmaSquareR, numberOfSamples = 1)[0]
# sample p(sigmaSquare_0 | beta, z, y, X) and p(sigmaSquare_1 | beta, z, y, X)
spikeAndSlabVar[1] = self.sampleSigmaSquareConditional(True, beta, z)
print("slab variance = ", spikeAndSlabVar[1])
if mcmcIt >= BURN_IN_SAMPLES:
posteriorAssignments[mcmcIt - BURN_IN_SAMPLES] = z
averagePosteriorBeta += beta
averageSigmaSquareR += sigmaSquareR
averagePosteriorBeta = averagePosteriorBeta / float(NUMBER_OF_MCMC_SAMPLES_USED)
averageSigmaSquareR = averageSigmaSquareR / float(NUMBER_OF_MCMC_SAMPLES_USED)
# print("posteriorAssignments = ")
# print(posteriorAssignments)
# print("averagePosteriorBeta = ")
# print(averagePosteriorBeta)
countAssignments = defaultdict(lambda: 0)
for mcmcIt in range(NUMBER_OF_MCMC_SAMPLES_USED):
nonZeroPos = numpy.where(posteriorAssignments[mcmcIt] != 0)[0]
nonZeroPosAsStr = [str(num) for num in nonZeroPos]
nonZeroPosAsStr = " ".join(nonZeroPosAsStr)
countAssignments[nonZeroPosAsStr] += 1
sortedAssignmentsByFrequency = sorted(countAssignments.items(), key=lambda kv: kv[1], reverse = True)
print("sortedAssignmentsByFrequency = ")
print(sortedAssignmentsByFrequency)
mostFrequentAssignment = showResultsText.getNumpyArray(sortedAssignmentsByFrequency[0][0])
# print("mostFrequentAssignment = ", mostFrequentAssignment)
# see "Optimal predictive model selection", 2004
assignmentProbs = numpy.mean(posteriorAssignments, axis = 0)
medianProbabilityModel = numpy.where(assignmentProbs > 0.5)[0]
# print("assignmentProbs = ", assignmentProbs)
# print("medianProbabilityModel = ", medianProbabilityModel)
return mostFrequentAssignment, medianProbabilityModel, assignmentProbs, averagePosteriorBeta, averageSigmaSquareR, sortedAssignmentsByFrequency
# BRAND-NEW CHECKED
# get mean and variance of p(beta_j | beta_-j, z, y, sigmaSquareR, X)
def getMeanAndVarOfBetaConditional(self, sigmaSquareR, spikeAndSlabVar, beta, z, j):
if self.delta == 0 and z[j] == 0:
assert(spikeAndSlabVar[z[j]] is None)
return None, None, None
minusJ = numpy.delete(numpy.arange(self.p), j)
betaMinusJ = beta[minusJ]
XminusJ = self.X[:, minusJ]
yTilde = self.y - numpy.matmul(XminusJ, betaMinusJ)
xJ = self.X[:,j]
yTildeTimesXj = numpy.dot(yTilde, xJ)
sigmaSquareTilde = sigmaSquareR / (numpy.sum(numpy.square(xJ)) + (sigmaSquareR / spikeAndSlabVar[z[j]]))
meanTilde = (sigmaSquareTilde / sigmaSquareR) * yTildeTimesXj
additionalStatisticForCondZ = (meanTilde / (2.0 * sigmaSquareR)) * yTildeTimesXj
return meanTilde, sigmaSquareTilde, additionalStatisticForCondZ
# BRAND-NEW CHECKED
# sample p(z_j | beta, z_-j, y, sigmaSquareR, X)
def sampleZjConditionedOnRest(self, sigmaSquareR, spikeAndSlabVar, beta, originalZ, j):
unnormalizedLogProbZ = numpy.zeros(2)
for sspInd in [0,1]:
z = numpy.copy(originalZ)
z[j] = sspInd
if self.delta > 0.0 or sspInd == 1:
meanTilde, sigmaSquareTilde, additionalStatisticForCondZ = self.getMeanAndVarOfBetaConditional(sigmaSquareR, spikeAndSlabVar, beta, z, j)
unnormalizedLogProbZ[sspInd] += additionalStatisticForCondZ
unnormalizedLogProbZ[sspInd] += SpikeAndSlabProposedModelSearch.getTruncatedNormalLogConstant(sspInd, self.delta, sigmaSquareTilde, meanTilde)
unnormalizedLogProbZ[sspInd] -= SpikeAndSlabProposedModelSearch.getTruncatedNormalLogConstant(sspInd, self.delta, spikeAndSlabVar[sspInd], 0.0)
else:
assert(sspInd == 0 and self.delta == 0.0)
# nothing to do
# add p(z)
unnormalizedLogProbZ[sspInd] += SpikeAndSlabProposedModelSearch.getLogPriorZ(numpy.sum(z), self.fullp)
if numpy.all(unnormalizedLogProbZ == float("-inf")):
print(unnormalizedLogProbZ)
assert(False)
logNormalization = scipy.special.logsumexp(unnormalizedLogProbZ)
zProbs = numpy.exp(unnormalizedLogProbZ - logNormalization)
newZj = numpy.random.choice(numpy.arange(2), p=zProbs)
# print("unnormalizedLogProbZ = ")
# print(unnormalizedLogProbZ)
# print("zProbs = ", zProbs)
# print("numpy.arange(2) = ", numpy.arange(2))
# print("newZj = ", newZj)
return newZj
# BRAND-NEW CHECKED
@staticmethod
def getTruncatedNormalLogConstant(sspIndicator, delta, sigmaSquare, mean):
assert(sspIndicator == 0 or sspIndicator == 1)
if sspIndicator == 1:
return samplingHelper.exactLogNormalizationConstant_O_static(delta, sigmaSquare, mean)
else:
# print("sigmaSquare = ", str(sigmaSquare) + ", mean = " + str(mean))
return samplingHelper.exactLogNormalizationConstant_I_static(delta, sigmaSquare, mean)
# BRAND-NEW CHECKED
# sample p(beta_j | beta_-j, z, y, sigmaSquareR, X)
@staticmethod
def sampleTruncatedBeta(delta, mean, sigmaSquare, relevant):
if relevant:
# RELEVANT
if delta == 0.0:
return scipy.stats.norm.rvs(loc=mean, scale=numpy.sqrt(sigmaSquare))
# newBetaJ = sampleTruncatedNormalNaive(True, mean, sigmaSquareTilde)
newBetaJ = SpikeAndSlabProposedModelSearch.sampleTruncatedNormalAdvanced_outerInterval(delta, mean, sigmaSquare)
# print("j = ", j)
# print("newBetaJ = ", newBetaJ)
assert(newBetaJ <= -delta or newBetaJ >= delta)
return newBetaJ
else:
# NOT RELEVANT
if delta == 0.0:
return 0.0
ro.globalenv['sd'] = numpy.sqrt(sigmaSquare)
ro.globalenv['mean'] = mean
ro.globalenv['a'] = -delta
ro.globalenv['b'] = delta
newBetaJ = ro.r('rtruncnorm(n = 1, a=a , b=b, mean = mean, sd = sd)')[0]
assert(newBetaJ >= -delta and newBetaJ <= delta)
return newBetaJ
# must check whether this is really correct !
@staticmethod
def sampleTruncatedNormalAdvanced_outerInterval(delta, mean, sigmaSquare):
# p(beta < -delta)
lowerBoundIntegral = scipy.stats.norm.logcdf(-delta, loc=mean, scale=numpy.sqrt(sigmaSquare))
# p(beta > delta)
upperBoundIntegral = scipy.stats.norm.logsf(delta, loc=mean, scale=numpy.sqrt(sigmaSquare))
normalization = scipy.special.logsumexp([lowerBoundIntegral, upperBoundIntegral])
pLowerProb = numpy.exp(lowerBoundIntegral - normalization)
pUpperProb = numpy.exp(upperBoundIntegral - normalization)
# print("pLowerProb = ", pLowerProb)
# print("pUpperProb = ", pUpperProb)
rndUniform = scipy.stats.uniform.rvs()
if rndUniform < pLowerProb:
ro.globalenv['a'] = float("-inf")
ro.globalenv['b'] = -delta
else:
ro.globalenv['a'] = delta
ro.globalenv['b'] = float("+inf")
ro.globalenv['sd'] = numpy.sqrt(sigmaSquare)
ro.globalenv['mean'] = mean
# print("ro.globalenv['a'] = ", ro.globalenv['a'])
# print("ro.globalenv['b'] = ", ro.globalenv['b'])
# print("ro.globalenv['sd'] = ", ro.globalenv['sd'])
# print("ro.globalenv['mean'] = ", ro.globalenv['mean'])
newBetaJ = ro.r('rtruncnorm(n = 1, a=a , b=b, mean = mean, sd = sd)')[0]
return newBetaJ
# REVISED
# reading checked + experiment check
# samples from p(sigma_j^2 | beta_j, y, X, S)
# a slice sampler as in Bayesian Methods for Data Analysis, Carlin et al Third Edition, page 139
def sampleSigmaSquareConditional(self, relevantVariable, beta, z):
if relevantVariable:
# SUFFIENTLY GOOD !!
# sigma >> 1 and delta << 1
usedBetaCount = numpy.sum(z)
betaSquareSum = numpy.sum(numpy.square(beta[z == 1]))
etaSquarePrior = self.etaSquare1
priorNu = self.nu1
assumeTruncatedNorm_NotRelevant_isConstant = None
nu = priorNu + usedBetaCount
etaSquare = (priorNu * etaSquarePrior + betaSquareSum) / nu
else:
# SUFFIENTLY GOOD !!
# sigma << 1 and interval is I
betaSquareSum = numpy.sum(numpy.square(beta[z == 0]))
etaSquarePrior = self.etaSquare0
priorNu = self.nu0
usedBetaCount = self.p - numpy.sum(z)
if self.delta >= 0.01:
assumeTruncatedNorm_NotRelevant_isConstant = False
nu = priorNu + usedBetaCount
etaSquare = (priorNu * etaSquarePrior + betaSquareSum) / nu
else:
assumeTruncatedNorm_NotRelevant_isConstant = True
nu = priorNu
# nu = self.nu
# etaSquare = (self.nu * self.etaSquare0 + singleBeta ** 2 - self.delta ** 2) / self.nu
etaSquare = (priorNu * etaSquarePrior + betaSquareSum - usedBetaCount * ((self.delta / 2.0) ** 2)) / nu
# initialize with mode
sigmaSquare = (nu * etaSquare) / (nu + 2)
assert(sigmaSquare > 0.0)
numberOfSamples = 1
BURN_IN = 10
acceptanceCount = 0 # only for checking acceptance ratio
nrIterations = 0
acquiredSamples = []
while len(acquiredSamples) < numberOfSamples:
nrIterations += 1
u = scipy.stats.uniform.rvs(loc=0, scale=1, size=1)[0] * self.h(relevantVariable, sigmaSquare, usedBetaCount, assumeTruncatedNorm_NotRelevant_isConstant)
newSigmaSquare = samplingHelper.getScaledInvChiSquareSample(nu, etaSquare, 1)[0]
if nrIterations >= 2000:
print("WARNING QUIT WITHOUT PROPER ACCEPTANCE")
print("relevantVariable = ", relevantVariable)
print("mode = ", ( (nu * etaSquare) / (nu + 2)) )
print("usedBetaCount = ", usedBetaCount)
acquiredSamples.append(newSigmaSquare)
break
if u < self.h(relevantVariable, newSigmaSquare, usedBetaCount, assumeTruncatedNorm_NotRelevant_isConstant):
acceptanceCount += 1
sigmaSquare = newSigmaSquare
if acceptanceCount >= BURN_IN:
acquiredSamples.append(newSigmaSquare)
# assert(acceptanceRatio > 0.01) # should be larger than 1% (if numberOfSamples = 1, then by chance we might have sometimes low acceptance ratios of the first 100.)
# assert(acceptanceRatio > 0.0)
# if (acceptanceRatio <= 0.2):
# print("relevantVariable = ", relevantVariable)
# print("singleBeta = ", singleBeta)
# acceptanceRatio = (acceptanceCount / nrIterations)
# print("nrIterations = ", nrIterations)
# print("acceptance ratio = ", acceptanceRatio)
# assert(False)
# print("acquiredSamples = ", acquiredSamples)
assert(len(acquiredSamples) == 1)
return acquiredSamples[0]
# REVISED
# reading checked
def h(self, relevantVariable, sigmaSquare, usedBetaCount, assumeTruncatedNorm_NotRelevant_isConstant):
assert(sigmaSquare > 0.0)
assert(numpy.isscalar(sigmaSquare))
if relevantVariable:
# ALWAYS WINNER
# sigma >> 1 and interval is O
return numpy.exp( usedBetaCount * (numpy.log(numpy.sqrt(2.0 * numpy.pi * sigmaSquare)) - samplingHelper.exactLogNormalizationConstant_O_static(self.delta, sigmaSquare)))
else:
# sigma << 1 and interval is I
if assumeTruncatedNorm_NotRelevant_isConstant:
return numpy.exp( usedBetaCount * (- ((self.delta / 2) ** 2) / (2.0 * sigmaSquare) - samplingHelper.exactLogNormalizationConstant_I_static(self.delta, sigmaSquare)))
# assumes that Z(N, sigma) is roughly constant
# return numpy.exp( - (usedBetaCount * SpikeAndSlabProposed_nonContinuousMCMC.exactLogNormalizationConstant_I_static(self.delta, sigmaSquare)))
else:
# assumes that sqrt(2.0 * numpy.pi * sigmaSquare) / Z(N, sigma) is roughly constant
return numpy.exp( usedBetaCount * (numpy.log( numpy.sqrt(2.0 * numpy.pi * sigmaSquare) ) - samplingHelper.exactLogNormalizationConstant_I_static(self.delta, sigmaSquare)))
# *****************************************************************
# ********** METHODS FOR MARGINAL LIKELIHOOD ESTIMATION ***********
# *****************************************************************
@staticmethod
def truncateToValidBeta(delta, z, beta):
assert(z.shape[0] == beta.shape[0])
truncatedBeta = numpy.copy(beta)
for j in range(beta.shape[0]):
if z[j] == 1:
if (beta[j] > - delta) and (beta[j] < delta):
if beta[j] <= 0.0:
truncatedBeta[j] = -delta
else:
truncatedBeta[j] = delta
else:
if (beta[j] < -delta) or (beta[j] > delta):
if beta[j] <= 0.0:
truncatedBeta[j] = -delta
else:
truncatedBeta[j] = delta
return truncatedBeta
# checked
def estimateErrorInMSE(self, selectedVars, NUMBER_OF_MCMC_SAMPLES_TOTAL):
z = numpy.zeros(self.p, dtype = numpy.int)
z[selectedVars] = 1
numberOfFreeBeta = self.p
fixedBetaPart = numpy.zeros(self.p - numberOfFreeBeta)
fixedSigmaSquareR = None
fixedSlabVar = None
posteriorBeta, posteriorSigmaSquareR, posteriorSlabVar = self.posteriorParameterSamples(z, NUMBER_OF_MCMC_SAMPLES_TOTAL, fixedSlabVar, fixedSigmaSquareR, numberOfFreeBeta, fixedBetaPart)
irrelevantPositions = numpy.delete(numpy.arange(self.p), selectedVars)
irrelevantBetaSamples = posteriorBeta[:,irrelevantPositions]
irrelevantX = self.X[:,irrelevantPositions]
sampleCovX_irrelevant = numpy.cov(irrelevantX.transpose(), bias=True)
sampleCovBeta_irrelevant = numpy.cov(irrelevantBetaSamples.transpose(), bias=True)
sampleCovX_irrelevant = numpy.atleast_2d(sampleCovX_irrelevant)
sampleCovBeta_irrelevant = numpy.atleast_2d(sampleCovBeta_irrelevant)
estimatedAdditionalMSE = numpy.trace(numpy.matmul(sampleCovBeta_irrelevant, sampleCovX_irrelevant))
estimatedSigmaSquareR = numpy.mean(posteriorSigmaSquareR)
return estimatedSigmaSquareR, estimatedAdditionalMSE
def getSigmaSquareR_fullModel_fromCurrentModel(self, NUMBER_OF_MCMC_SAMPLES_TOTAL):
z = numpy.ones(self.p, dtype = numpy.int)
numberOfFreeBeta = self.p
fixedBetaPart = numpy.zeros(self.p - numberOfFreeBeta)
fixedSigmaSquareR = None
fixedSlabVar = None
posteriorBeta, posteriorSigmaSquareR, posteriorSlabVar = self.posteriorParameterSamples(z, NUMBER_OF_MCMC_SAMPLES_TOTAL, fixedSlabVar, fixedSigmaSquareR, numberOfFreeBeta, fixedBetaPart)
return numpy.mean(posteriorSigmaSquareR)
# z is always considered fixed
def posteriorParameterSamples(self, z, NUMBER_OF_MCMC_SAMPLES_TOTAL, fixedSlabVar, fixedSigmaSquareR, numberOfFreeBeta, fixedBetaPart):
assert(fixedSlabVar is None or fixedSlabVar > 0.0)
assert(fixedSigmaSquareR is None or fixedSigmaSquareR > 0.0)
assert(numberOfFreeBeta + fixedBetaPart.shape[0] == self.p)
invEst = numpy.linalg.inv(self.X.transpose() @ self.X + 1.0 * numpy.eye(self.p))
ridgeBetaEst = (invEst @ self.X.transpose()) @ self.y
beta = SpikeAndSlabProposedModelSearch.truncateToValidBeta(self.delta, z, ridgeBetaEst)
beta[numberOfFreeBeta:self.p] = fixedBetaPart
if fixedSigmaSquareR is None:
sigmaSquareR = numpy.mean(numpy.square(self.y - self.X @ ridgeBetaEst))
else:
sigmaSquareR = fixedSigmaSquareR
# print("z = ")
# print(z)
# print("beta = ")
# print(beta)
# assert(False)
# print("sigmaSquareR = ")
# print(sigmaSquareR)
BURN_IN_SAMPLES = int(0.1 * NUMBER_OF_MCMC_SAMPLES_TOTAL)
assert(BURN_IN_SAMPLES >= 1)
NUMBER_OF_MCMC_SAMPLES_USED = NUMBER_OF_MCMC_SAMPLES_TOTAL - BURN_IN_SAMPLES
# print("BURN_IN_SAMPLES = ", BURN_IN_SAMPLES)
# print("NUMBER_OF_MCMC_SAMPLES_USED = ", NUMBER_OF_MCMC_SAMPLES_USED)
posteriorBeta = numpy.zeros((NUMBER_OF_MCMC_SAMPLES_USED, self.p))
posteriorSigmaSquareR = numpy.zeros(NUMBER_OF_MCMC_SAMPLES_USED)
posteriorSlabVar = numpy.zeros(NUMBER_OF_MCMC_SAMPLES_USED)
spikeAndSlabVar = numpy.asarray([self.sigmaSquare0, self.etaSquare1])
if fixedSlabVar is not None:
spikeAndSlabVar[1] = fixedSlabVar
for mcmcIt in range(NUMBER_OF_MCMC_SAMPLES_TOTAL):
print("mcmcIt = ", mcmcIt)
for j in range(numberOfFreeBeta):
# sample p(beta_j | beta_-j, z, y, sigmaSquareR, X)
meanTilde, sigmaSquareTilde, _ = self.getMeanAndVarOfBetaConditional(sigmaSquareR, spikeAndSlabVar, beta, z, j)
beta[j] = SpikeAndSlabProposedModelSearch.sampleTruncatedBeta(self.delta, meanTilde, sigmaSquareTilde, z[j] == 1)
if fixedSigmaSquareR is None:
# sample p(sigmaSquareR | beta, z, y, X)
etaSquareForsigmaSquareR = (SpikeAndSlabProposedModelSearch.NU_R * SpikeAndSlabProposedModelSearch.ETA_SQUARE_R + numpy.sum(numpy.square(self.y - numpy.matmul(self.X, beta)))) / (SpikeAndSlabProposedModelSearch.NU_R + self.n)
sigmaSquareR = samplingHelper.getScaledInvChiSquareSample(nu = SpikeAndSlabProposedModelSearch.NU_R + self.n, etaSquare = etaSquareForsigmaSquareR, numberOfSamples = 1)[0]
if fixedSlabVar is None:
# sample p(sigmaSquare_1 | beta, z, y, X)
spikeAndSlabVar[1] = self.sampleSigmaSquareConditional(True, beta, z)
# print("spikeAndSlabVar = ", spikeAndSlabVar)
if mcmcIt >= BURN_IN_SAMPLES:
posteriorBeta[mcmcIt - BURN_IN_SAMPLES] = beta
posteriorSigmaSquareR[mcmcIt - BURN_IN_SAMPLES] = sigmaSquareR
posteriorSlabVar[mcmcIt - BURN_IN_SAMPLES] = spikeAndSlabVar[1]
return posteriorBeta, posteriorSigmaSquareR, posteriorSlabVar
# REVISED
@staticmethod
def truncatedBetaLogDensity(betaJVal, delta, mean, sigmaSquare, relevant):
if relevant:
if not (betaJVal <= -delta or betaJVal >= delta):
print("!!!! ERROR HERE !!!!")
print("delta = ", delta)
print("betaJVal = ", betaJVal)
assert(betaJVal <= -delta or betaJVal >= delta)
logProb = - samplingHelper.exactLogNormalizationConstant_O_static(delta, sigmaSquare, mean)
else:
assert(betaJVal >= -delta and betaJVal <= delta)
logProb = - samplingHelper.exactLogNormalizationConstant_I_static(delta, sigmaSquare, mean)
if isinstance(logProb, numpy.ndarray):
# ensure that it is not an array
assert(len(logProb) == 1)
logProb = logProb[0]
logProb -= 0.5 * (1.0 / sigmaSquare) * ( (betaJVal - mean)**2)
assert(logProb > float("-inf") and logProb < float("inf"))
assert(not numpy.isnan(logProb))
return logProb
def getLogProbBetaJGivenRest(self, sigmaSquareR, spikeAndSlabVar, beta, z, j):
meanTilde, sigmaSquareTilde, _ = self.getMeanAndVarOfBetaConditional(sigmaSquareR, spikeAndSlabVar, beta, z, j)
return SpikeAndSlabProposedModelSearch.truncatedBetaLogDensity(beta[j], self.delta, meanTilde, sigmaSquareTilde, z[j] == 1)
# REVISED
# calculates log p(beta, sigmaSquareR, sigmaSquare, y | X, S)
def getJointLogProb_forMarginalCalculation(self, z, beta, sigmaSquareR, spikeAndSlabVar, checkValidBeta = True):
assert(sigmaSquareR > 0.0)
jointLogProb = - (float(self.n) / 2.0) * numpy.log(2.0 * numpy.pi)
jointLogProb -= (((SpikeAndSlabProposedModelSearch.NU_R + self.n) / 2.0) + 1.0) * numpy.log(sigmaSquareR)
jointLogProb -= (1.0 / (2.0 * sigmaSquareR)) * (SpikeAndSlabProposedModelSearch.NU_R * SpikeAndSlabProposedModelSearch.ETA_SQUARE_R + numpy.sum(numpy.square(self.y - numpy.matmul(self.X, beta))))
jointLogProb -= SpikeAndSlabProposedModelSearch.getScaledInvChiSquareLogNormalizer(SpikeAndSlabProposedModelSearch.ETA_SQUARE_R, SpikeAndSlabProposedModelSearch.NU_R)
for j in range(self.p):
priorSigmaSquare = spikeAndSlabVar[z[j]]
assert(priorSigmaSquare > 0.0)
if z[j] == 1:
if checkValidBeta and ((beta[j] > -self.delta) and (beta[j] < self.delta)):
return float("-inf")
jointLogProb -= samplingHelper.exactLogNormalizationConstant_O_static(self.delta, priorSigmaSquare)
else:
if checkValidBeta and ((beta[j] < -self.delta) or (beta[j] > self.delta)):
return float("-inf")
jointLogProb -= samplingHelper.exactLogNormalizationConstant_I_static(self.delta, priorSigmaSquare)
jointLogProb -= (1.0 / (2.0 * priorSigmaSquare)) * (beta[j] ** 2)
# add prior on simgaSquareRelevant
# jointLogProb += SpikeAndSlabProposedModelSearch.getScaledInvChiSquareLogDensity(spikeAndSlabVar[1], self.etaSquare1, self.nu1)
# add prior on z
jointLogProb += SpikeAndSlabProposedModelSearch.getLogPriorZ(
|
numpy.sum(z)
|
numpy.sum
|
"""Helper methods for learning examples."""
import numpy
import xarray
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import interp
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import error_checking
from ml4tc.io import ships_io
from ml4tc.utils import satellite_utils
from ml4tc.utils import general_utils
GRID_SPACING_METRES = satellite_utils.GRID_SPACING_METRES
STORM_INTENSITY_KEY = ships_io.STORM_INTENSITY_KEY
SATELLITE_GRID_ROW_DIM = satellite_utils.GRID_ROW_DIM
SATELLITE_GRID_COLUMN_DIM = satellite_utils.GRID_COLUMN_DIM
SATELLITE_TIME_DIM = satellite_utils.TIME_DIM
SATELLITE_PREDICTOR_UNGRIDDED_DIM = 'satellite_predictor_name_ungridded'
SATELLITE_PREDICTOR_GRIDDED_DIM = 'satellite_predictor_name_gridded'
SHIPS_FORECAST_HOUR_DIM = ships_io.FORECAST_HOUR_DIM
SHIPS_THRESHOLD_DIM = ships_io.THRESHOLD_DIM
SHIPS_LAG_TIME_DIM = ships_io.LAG_TIME_DIM
SHIPS_VALID_TIME_DIM = ships_io.VALID_TIME_DIM
SHIPS_PREDICTOR_LAGGED_DIM = 'ships_predictor_name_lagged'
SHIPS_PREDICTOR_FORECAST_DIM = 'ships_predictor_name_forecast'
SATELLITE_PREDICTORS_UNGRIDDED_KEY = 'satellite_predictors_ungridded'
SATELLITE_PREDICTORS_GRIDDED_KEY = 'satellite_predictors_gridded'
SHIPS_PREDICTORS_LAGGED_KEY = 'ships_predictors_lagged'
SHIPS_PREDICTORS_FORECAST_KEY = 'ships_predictors_forecast'
SATELLITE_METADATA_KEYS = [
satellite_utils.SATELLITE_NUMBER_KEY,
satellite_utils.BAND_NUMBER_KEY,
satellite_utils.BAND_WAVELENGTH_KEY,
satellite_utils.SATELLITE_LONGITUDE_KEY,
satellite_utils.CYCLONE_ID_KEY,
satellite_utils.STORM_TYPE_KEY,
satellite_utils.STORM_NAME_KEY,
satellite_utils.STORM_LATITUDE_KEY,
satellite_utils.STORM_LONGITUDE_KEY,
satellite_utils.STORM_INTENSITY_NUM_KEY,
satellite_utils.GRID_LATITUDE_KEY,
satellite_utils.GRID_LONGITUDE_KEY
]
SHIPS_METADATA_KEYS = [
ships_io.CYCLONE_ID_KEY,
ships_io.STORM_LATITUDE_KEY,
ships_io.STORM_LONGITUDE_KEY,
ships_io.STORM_TYPE_KEY,
ships_io.FORECAST_LATITUDE_KEY,
ships_io.FORECAST_LONGITUDE_KEY,
ships_io.VORTEX_LATITUDE_KEY,
ships_io.VORTEX_LONGITUDE_KEY,
ships_io.THRESHOLD_EXCEEDANCE_KEY
]
SHIPS_METADATA_AND_FORECAST_KEYS = [ships_io.FORECAST_LATITUDE_KEY]
def merge_data(satellite_table_xarray, ships_table_xarray):
"""Merges satellite and SHIPS data.
:param satellite_table_xarray: Table returned by `satellite_io.read_file`.
:param ships_table_xarray: Table returned by `ships_io.read_file`.
:return: example_table_xarray: Table created by merging inputs. Metadata in
table should make fields self-explanatory.
"""
# Ensure that both tables contain data for the same cyclone.
satellite_cyclone_id_strings = numpy.array(
satellite_table_xarray[satellite_utils.CYCLONE_ID_KEY].values
)
assert len(numpy.unique(satellite_cyclone_id_strings)) == 1
ships_cyclone_id_strings = numpy.array(
ships_table_xarray[ships_io.CYCLONE_ID_KEY].values
)
assert len(numpy.unique(ships_cyclone_id_strings)) == 1
try:
satellite_cyclone_id_string = (
satellite_cyclone_id_strings[0].decode('UTF-8')
)
except AttributeError:
satellite_cyclone_id_string = satellite_cyclone_id_strings[0]
try:
ships_cyclone_id_string = ships_cyclone_id_strings[0].decode('UTF-8')
except AttributeError:
ships_cyclone_id_string = ships_cyclone_id_strings[0]
assert satellite_cyclone_id_string == ships_cyclone_id_string
# Do actual stuff.
satellite_metadata_dict = satellite_table_xarray.to_dict()['coords']
example_metadata_dict = dict()
for this_key in satellite_metadata_dict:
example_metadata_dict[this_key] = (
satellite_metadata_dict[this_key]['data']
)
ships_metadata_dict = ships_table_xarray.to_dict()['coords']
del ships_metadata_dict[ships_io.STORM_OBJECT_DIM]
for this_key in ships_metadata_dict:
example_metadata_dict[this_key] = (
ships_metadata_dict[this_key]['data']
)
satellite_dict = satellite_table_xarray.to_dict()['data_vars']
example_dict = dict()
satellite_predictor_names_ungridded = []
satellite_predictor_matrix_ungridded = numpy.array([])
for this_key in satellite_dict:
if this_key in SATELLITE_METADATA_KEYS:
example_dict[this_key] = (
satellite_dict[this_key]['dims'],
satellite_dict[this_key]['data']
)
continue
if this_key == satellite_utils.BRIGHTNESS_TEMPERATURE_KEY:
these_dim = (
satellite_dict[this_key]['dims'] +
(SATELLITE_PREDICTOR_GRIDDED_DIM,)
)
example_dict[SATELLITE_PREDICTORS_GRIDDED_KEY] = (
these_dim,
|
numpy.expand_dims(satellite_dict[this_key]['data'], axis=-1)
|
numpy.expand_dims
|
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.neighbors import NearestNeighbors
from multiprocessing import Pool
import numpy as np
class NearestNeighborsFeats(BaseEstimator, ClassifierMixin):
'''
This class should implement KNN features extraction
'''
def __init__(self, n_jobs, k_list, metric, n_classes=None, n_neighbors=None, eps=1e-6):
self.n_jobs = n_jobs
self.k_list = k_list
self.metric = metric
if n_neighbors is None:
self.n_neighbors = max(k_list)
else:
self.n_neighbors = n_neighbors
self.eps = eps
self.n_classes_ = n_classes
self.n_classes = None
def fit(self, X, y):
'''
Set's up the train set and self.NN object
'''
# Create a NearestNeighbors (NN) object. We will use it in `predict` function
self.NN = NearestNeighbors(n_neighbors=max(self.k_list),
metric=self.metric,
n_jobs=1,
algorithm='brute' if self.metric=='cosine' else 'auto')
self.NN.fit(X)
# Store labels
self.y_train = y
# Save how many classes we have
self.n_classes = np.unique(y).shape[0] if self.n_classes_ is None else self.n_classes_
def predict(self, X):
'''
Produces KNN features for every object of a dataset X
'''
if self.n_jobs == 1:
test_feats = []
for i in range(X.shape[0]):
test_feats.append(self.get_features_for_one(X[i:i+1]))
else:
'''
*Make it parallel*
Number of threads should be controlled by `self.n_jobs`
You can use whatever you want to do it
For Python 3 the simplest option would be to use
`multiprocessing.Pool` (but don't use `multiprocessing.dummy.Pool` here)
You may try use `joblib` but you will most likely encounter an error,
that you will need to google up (and eventually it will work slowly)
For Python 2 I also suggest using `multiprocessing.Pool`
You will need to use a hint from this blog
http://qingkaikong.blogspot.ru/2016/12/python-parallel-method-in-class.html
I could not get `joblib` working at all for this code
(but in general `joblib` is very convenient)
'''
# YOUR CODE GOES HERE
# test_feats = # YOUR CODE GOES HERE
# YOUR CODE GOES HERE
# test_feats = Parallel(n_jobs=self.n_jobs)(delayed(self.get_features_for_one)(x) for x in X)
p = Pool(self.n_jobs)
test_feats = p.map(self.get_features_for_one,[[x] for x in X])
p.close()
p.join()
# assert False, 'You need to implement it for n_jobs > 1'
return np.vstack(test_feats)
def get_features_for_one(self, x):
'''
Computes KNN features for a single object `x`
'''
NN_output = self.NN.kneighbors(x)
# Vector of size `n_neighbors`
# Stores indices of the neighbors
# NN_output = distances,indices
neighs = NN_output[1][0]
# Vector of size `n_neighbors`
# Stores distances to corresponding neighbors
neighs_dist = NN_output[0][0]
# Vector of size `n_neighbors`
# Stores labels of corresponding neighbors
neighs_y = self.y_train[neighs]
## ========================================== ##
## YOUR CODE BELOW
## ========================================== ##
# We will accumulate the computed features here
# Eventually it will be a list of lists or np.arrays
# and we will use np.hstack to concatenate those
return_list = []
'''
1. Fraction of objects of every class.
It is basically a KNNСlassifiers predictions.
Take a look at `np.bincount` function, it can be very helpful
Note that the values should sum up to one
'''
# print("1. [*] Fraction of object in every class")
for k in self.k_list:
# YOUR CODE GOES HERE
feats = []
bcount = {c:v for c,v in enumerate(np.bincount(neighs_y[:k]))}
for c in range(self.n_classes):
if c in bcount:
feats.append(bcount[c])
else:
feats.append(0)
feats /= np.sum(feats)
assert len(feats) == self.n_classes
return_list += [feats]
'''
2. Same label streak: the largest number N,
such that N nearest neighbors have the same label
What can help you: `np.where`
'''
# print("2. [*] Longest streak of same label")
run_ends = np.where(np.diff(neighs_y))[0] + 1
offset =
|
np.hstack((0,run_ends,neighs_y.size))
|
numpy.hstack
|
from torch.utils.data import Dataset
import numpy as np
import os
from PIL import Image
from datasets.data_io import *
import cv2
class MVSDataset(Dataset):
def __init__(self, datapath, listfile, mode, nviews, img_wh=(1600, 1200), **kwargs):
super(MVSDataset, self).__init__()
self.stages = 4
self.datapath = datapath
self.listfile = listfile
self.mode = mode
self.nviews = nviews
self.img_wh = img_wh
assert self.mode == "test"
self.metas = self.build_list()
def build_list(self):
metas = []
with open(self.listfile) as f:
scans = f.readlines()
scans = [line.rstrip() for line in scans][0:1]
for scan in scans:
pair_file = "{}/pair.txt".format(scan)
# read the pair file
with open(os.path.join(self.datapath, pair_file)) as f:
num_viewpoint = int(f.readline())
# viewpoints (49)
for view_idx in range(num_viewpoint):
ref_view = int(f.readline().rstrip())
src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]
metas.append((scan, ref_view, src_views))
print("dataset", self.mode, "metas:", len(metas))
return metas
def __len__(self):
return len(self.metas)
def read_cam_file(self, filename):
with open(filename) as f:
lines = f.readlines()
lines = [line.rstrip() for line in lines]
# extrinsics: line [1,5), 4x4 matrix
extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))
# intrinsics: line [7-10), 3x3 matrix
intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))
depth_min = float(lines[11].split()[0])
depth_max = float(lines[11].split()[1])
return intrinsics, extrinsics, depth_min, depth_max
def read_img(self, filename):
img = Image.open(filename)
# scale 0~255 to 0~1
np_img = np.array(img, dtype=np.float32) / 255.
np_img = cv2.resize(np_img, self.img_wh, interpolation=cv2.INTER_LINEAR)
h, w, _ = np_img.shape
np_img_ms = {
"stage_3": cv2.resize(np_img, (w//8, h//8), interpolation=cv2.INTER_LINEAR),
"stage_2": cv2.resize(np_img, (w//4, h//4), interpolation=cv2.INTER_LINEAR),
"stage_1": cv2.resize(np_img, (w//2, h//2), interpolation=cv2.INTER_LINEAR),
"stage_0": np_img
}
return np_img_ms
def __getitem__(self, idx):
meta = self.metas[idx]
scan, ref_view, src_views = meta
# use only the reference view and first nviews-1 source views
view_ids = [ref_view] + src_views[:self.nviews - 1]
img_w = 1600
img_h = 1200
imgs_0 = []
imgs_1 = []
imgs_2 = []
imgs_3 = []
depth_min = None
depth_max = None
proj_matrices_0 = []
proj_matrices_1 = []
proj_matrices_2 = []
proj_matrices_3 = []
for i, vid in enumerate(view_ids):
img_filename = os.path.join(self.datapath, '{}/images/{:0>8}.jpg'.format(scan, vid))
proj_mat_filename = os.path.join(self.datapath, '{}/cams_1/{:0>8}_cam.txt'.format(scan, vid))
imgs = self.read_img(img_filename)
imgs_0.append(imgs['stage_0'])
imgs_1.append(imgs['stage_1'])
imgs_2.append(imgs['stage_2'])
imgs_3.append(imgs['stage_3'])
intrinsics, extrinsics, depth_min_, depth_max_ = self.read_cam_file(proj_mat_filename)
intrinsics[0] *= self.img_wh[0]/img_w
intrinsics[1] *= self.img_wh[1]/img_h
# multiply intrinsics and extrinsics to get projection matrix
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 0.125
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_3.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_2.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] = np.matmul(intrinsics, proj_mat[:3, :4])
proj_matrices_1.append(proj_mat)
proj_mat = extrinsics.copy()
intrinsics[:2,:] *= 2
proj_mat[:3, :4] =
|
np.matmul(intrinsics, proj_mat[:3, :4])
|
numpy.matmul
|
from __future__ import division, absolute_import, print_function
import sys
import pytest
import numpy as np
from numpy.ctypeslib import ndpointer, load_library, as_array
from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
try:
cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
cdll = load_library('multiarray_d', np.core.multiarray.__file__)
except OSError:
pass
if cdll is None:
cdll = load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
@pytest.mark.skipif(not _HAS_CTYPE,
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
class TestLoadLibrary(object):
def test_basic(self):
try:
# Should succeed
load_library('multiarray', np.core.multiarray.__file__)
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
def test_basic2(self):
# Regression for #801: load_library with a full library name
# (including extension) does not work.
try:
try:
so = get_shared_lib_extension(is_python_ext=True)
# Should succeed
load_library('multiarray%s' % so, np.core.multiarray.__file__)
except ImportError:
print("No distutils available, skipping test.")
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
print(msg)
class TestNdpointer(object):
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
assert_raises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
assert_(p.from_param(np.array(1)))
assert_raises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
assert_raises(TypeError, p.from_param, np.array(1))
assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
assert_(p.from_param(np.array([[1, 2]])))
assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
assert_raises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
assert_(p.from_param(x))
assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
a1 = ndpointer(dtype=np.float64)
a2 = ndpointer(dtype=np.float64)
assert_(a1 == a2)
@pytest.mark.skipif(not _HAS_CTYPE,
reason="ctypes not available on this python installation")
class TestAsArray(object):
def test_array(self):
from ctypes import c_int
pair_t = c_int * 2
a = as_array(pair_t(1, 2))
assert_equal(a.shape, (2,))
assert_array_equal(a, np.array([1, 2]))
a = as_array((pair_t * 3)(pair_t(1, 2), pair_t(3, 4), pair_t(5, 6)))
assert_equal(a.shape, (3, 2))
assert_array_equal(a, np.array([[1, 2], [3, 4], [5, 6]]))
def test_pointer(self):
from ctypes import c_int, cast, POINTER
p = cast((c_int * 10)(*range(10)), POINTER(c_int))
a = as_array(p, shape=(10,))
assert_equal(a.shape, (10,))
assert_array_equal(a, np.arange(10))
a = as_array(p, shape=(2, 5))
assert_equal(a.shape, (2, 5))
assert_array_equal(a,
|
np.arange(10)
|
numpy.arange
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for custom tensorflow operators."""
import os
import unittest
import tempfile
import numpy as np
import skimage.color as skcolor
import tensorflow as tf
import hdrnet.hdrnet_ops as ops
class BilateralSliceTest(tf.test.TestCase):
def run_bilateral_slice(self, dev, grid_data, guide_data):
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(
grid_data, name='grid', dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(
guide_data, name='guide', dtype=tf.float32)
output_tensor = ops.bilateral_slice(grid_tensor, guide_tensor)
with self.test_session() as sess:
output_data = sess.run(output_tensor)
return output_data
def test_shape_is_correct(self):
batch_size = 3
grid_shape = [batch_size, 10, 6, 8, 12]
guide_shape = [batch_size, 101, 60]
grid_data = np.random.rand(*grid_shape).astype(np.float32)
guide_data = np.random.rand(*guide_shape).astype(np.float32)
for dev in ['/cpu:0', '/gpu:0']:
output_data = self.run_bilateral_slice(dev, grid_data, guide_data)
output_shape = list(output_data.shape)
self.assertEqual(len(output_shape), 4)
self.assertEqual(output_shape[0], guide_shape[0])
self.assertEqual(output_shape[1], guide_shape[1])
self.assertEqual(output_shape[2], guide_shape[2])
self.assertEqual(output_shape[3], grid_shape[4])
def test_interpolate(self):
for dev in ['/gpu:0']:
batch_size = 3
h = 3
w = 4
d = 3
grid_shape = [batch_size, h, w, d, 1]
grid_data = np.zeros(grid_shape).astype(np.float32)
grid_data[:, :, :, 1 :] = 1.0
grid_data[:, :, :, 2 :] = 2.0
guide_shape = [batch_size, 5, 9]
target_shape = [batch_size, 5, 9, 1]
for val in range(d):
target_data = val*np.ones(target_shape)
target_data = target_data.astype(np.float32)
guide_data = ((val+0.5)/(1.0*d))*np.ones(guide_shape).astype(np.float32)
output_data = self.run_bilateral_slice(dev, grid_data, guide_data)
diff = np.amax(np.abs(target_data-output_data))
self.assertEqual(target_shape, list(output_data.shape))
self.assertLess(diff, 5e-4)
def test_grid_gradient(self):
for dev in ['/gpu:0']:
batch_size = 3
h = 8
w = 5
gh = 6
gw = 3
d = 7
nchans = 4
grid_shape = [batch_size, gh, gw, d, nchans]
guide_shape = [batch_size, h, w]
output_shape = [batch_size, h, w, nchans]
grid_data = np.random.rand(*grid_shape).astype(np.float32)
guide_data = np.random.rand(*guide_shape).astype(np.float32)
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(grid_data,
name='data',
dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(guide_data,
name='data',
dtype=tf.float32)
output_tensor = ops.bilateral_slice(grid_tensor, guide_tensor)
with self.test_session():
err = tf.test.compute_gradient_error(
grid_tensor,
grid_shape,
output_tensor,
output_shape)
self.assertLess(err, 1e-4)
def test_guide_gradient(self):
for dev in ['/gpu:0']:
batch_size = 2
h = 7
w = 8
d = 5
gh = 3
gw = 4
nchans = 2
grid_shape = [batch_size, gh, gw, d, nchans]
guide_shape = [batch_size, h, w]
output_shape = [batch_size, h, w, nchans]
grid_data = np.random.randn(*grid_shape).astype(np.float32)
guide_data = np.random.rand(*guide_shape).astype(np.float32)
with tf.device(dev):
grid_tensor = tf.convert_to_tensor(grid_data,
name='data',
dtype=tf.float32)
guide_tensor = tf.convert_to_tensor(guide_data,
name='data',
dtype=tf.float32)
output_tensor = ops.bilateral_slice(grid_tensor, guide_tensor)
with self.test_session():
th, num = tf.test.compute_gradient(
guide_tensor,
guide_shape,
output_tensor,
output_shape, delta=1e-4)
print(th)
print(num)
thresh = 5e-3
diff = np.abs(th-num)
x, y = np.where(diff>thresh)
for i in range(len(x)):
in_x = x[i] % w
in_y = x[i] / w
out_c = y[i] % nchans
out_x = (y[i]/nchans) % w
out_y = (y[i]/nchans) / w
print("output ({},{},{}) - input ({},{})\n guide: {:f}\n theoretical: {:f}\n numerical: {:f}".format(
out_y, out_x, out_c, in_y, in_x, np.ravel(guide_data)[x[i]], th[x[i], y[i]], num[x[i],y[i]]))
print(len(x), 'of', len(np.ravel(diff)), 'errors')
print('gradient shape', th.shape)
print('guide shape', guide_data.shape)
print('grid shape', grid_data.shape)
print('output shape', output_shape)
self.assertLess(np.amax(diff), thresh)
def l2_optimizer(self, target, output, lr=1e-2):
loss = tf.reduce_sum(tf.square(target-output))
global_step = tf.Variable(
0, name='global_step', trainable=False,
collections=['global_step', tf.GraphKeys.GLOBAL_VARIABLES])
optimizer = tf.train.GradientDescentOptimizer(lr).minimize(
loss, global_step=global_step)
return optimizer, loss
def test_grid_optimize(self):
for dev in ['/gpu:0']:
bs= 1
h = 1
w = 32
nchans = 1
gh = 1
gw = 16
gd = 8
guide_data = np.linspace(0, 1, w).astype(np.float32)
guide_data = guide_data[np.newaxis, np.newaxis, :]
guide_data = np.tile(guide_data, [bs, h, 1])
grid_data = np.random.rand(bs, gh, gw, gd, nchans).astype(np.float32)
target_data = np.sin(np.linspace(0, 2*np.pi, w)).astype(np.float32)
target_data = target_data[np.newaxis, np.newaxis, :, np.newaxis]
target_data = np.tile(target_data, [bs, h, 1, 1])
grid = tf.Variable(grid_data)
guide = tf.convert_to_tensor(guide_data)
target = tf.convert_to_tensor(target_data)
output = ops.bilateral_slice(grid, guide)
checkpoint_dir = tempfile.mkdtemp()
opt, loss = self.l2_optimizer(target, output)
with self.test_session() as sess:
tf.global_variables_initializer().run()
for step in range(10000):
_, l_ = sess.run([opt, loss])
if step % 100 == 0:
print("Step {}, loss = {:.5f}".format(step, l_))
out_, target_ = sess.run([output, target])
out_ = np.squeeze(out_)
target_ = np.squeeze(target_)
assert np.sum(np.square(out_-target_)) < 0.0085
def test_guide_optimize(self):
for dev in ['/gpu:0']:
bs= 1
h = 1
w = 32
nchans = 1
gh = 1
gw = 8
gd = 2
guide_data = np.linspace(0.5/gd, 1-0.5/gd, w).astype(np.float32)
guide_data = guide_data[np.newaxis, np.newaxis, :]
guide_data = np.tile(guide_data, [bs, h, 1])
grid_data = np.linspace(-1, 1, gd).astype(np.float32)
grid_data = grid_data[np.newaxis, np.newaxis, np.newaxis, :, np.newaxis ]
grid_data = np.tile(grid_data, [bs, gh, gw, 1, nchans])
target_data = np.sin(np.linspace(0, 2*np.pi, w)).astype(np.float32)
target_data = target_data[np.newaxis, np.newaxis, :, np.newaxis]
target_data =
|
np.tile(target_data, [bs, h, 1, 1])
|
numpy.tile
|
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""
The GridLayoutGenerator module implements classes to generate full-custom layout on 'abstract' grid. It allows designers
to describe layout generation scripts in Python language and automate the layout process, abstracting design rules for
easier implementation and process portability. All numerical parameters are given in integer numbers and they are
converted to physical numbers internally and designers don't need to deal with complex design rules in modern CMOS
process.
Example
-------
For layout export, type below command in ipython console.
$ run laygo/labs/lab2_b_gridlayoutgenerator_layoutexercise.py
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
from .BaseLayoutGenerator import *
from .TemplateDB import *
from .GridDB import *
from . import PrimitiveUtil as ut
import numpy as np
import logging
#TODO: support path routing
class GridLayoutGenerator(BaseLayoutGenerator):
"""
The GridLayoutGenerator class implements functions and variables for full-custom layout generations on abstract
grids.
Parameters
----------
physical_res : float
physical grid resolution
config_file : str
laygo configuration file path
templates : laygo.TemplateDB.TemplateDB
template database
grids : laygo.GridDB.GridDB
grid database
layers: dict
layer dictionary. metal, pin, text, prbnd are used as keys
"""
templates = None
"""laygo.TemplateDB.TemplateDB: template database"""
grids = None
"""laygo.GridDB.GridDB: grid database"""
use_phantom=False #phantom cell usage
"""bool: true if phantom cells are exported (not real cells)"""
layers = {'metal':[], 'pin':[], 'text':[], 'prbnd':[]}
"""dict: layer dictionary. Keys are metal, pin, text, prbnd"""
def __init__(self, physical_res=0.005, config_file=None):
"""
Constructor
"""
self.templates = TemplateDB()
self.grids = GridDB()
if not config_file==None: #config file exists
with open(config_file, 'r') as stream:
techdict = yaml.load(stream)
self.tech = techdict['tech_lib']
self.physical_res = techdict['physical_resolution']
physical_res=self.physical_res
self.layers['metal'] = techdict['metal_layers']
self.layers['pin'] = techdict['pin_layers']
self.layers['text'] = techdict['text_layer']
self.layers['prbnd'] = techdict['prboundary_layer']
print(self.tech + " loaded sucessfully")
BaseLayoutGenerator.__init__(self, res=physical_res)
#aux functions
def _bbox_xy(self, xy):
"""
Find a bbox of xy coordinates. ex) _bbox_xy([[4, 1], [3, 5], [2, 3]])=np.array([[2, 1], [4, 5]])
Parameters
----------
xy : np.array([[int, int], [int, int]]) or
point matrix. List can be also used.
Returns
-------
np.array([[int, int], [int, int]])
bbox matrix
"""
xy = np.asarray(xy)
bx = sorted(xy[:, 0].tolist())
by = sorted(xy[:, 1].tolist())
ll = np.array([bx[0], by[0]]) # lower-left
ur = np.array([bx[-1], by[-1]]) # upper-right
bnd = np.vstack([ll, ur])
return bnd
#Placement functions
def place(self, name, templatename, gridname, xy, template_libname=None, shape=np.array([1, 1]), spacing=None,
offset=np.array([0, 0]), transform='R0', annotate_text=None, libname=None):
"""
Place an instance on abstract grid. Use relplace instead
Parameters
----------
name : str
Name of the instance.
templatename : str
Name of the template for the instance.
gridname : str
Grid name for the instance placement.
xy : np.array([int, int]) or [int, int]
Placement coordinate on the grid, specified by gridname.
libname : str, optional
Template library name. If not specified, self.templates.plib is used.
shape : np.array([x0, y0]) or None, optional
array shape parameter. If None, the instance is not considered as array. Default is None
transform : str ('R0', 'MX', 'MY'), optional
Transform parameter
Returns
-------
laygo.layoutObject.Instance
generated instance
Other Parameters
----------------
template_libname: str, optional, deprecated
Replaced with libname
spacing : np.array([int, int]), optional
Array spacing parameter for the instance. If None, the size of the instance of is used.
offset : np.array([float, float]), optional
Offset in physical coordinate.
annotate_text : str, optional
text to be annotated. Use None if no annotation is required
"""
### preprocessing starts ###
xy = np.asarray(xy) # convert to a numpy array
if shape is None:
_shape = np.array([1, 1])
else:
_shape = np.asarray(shape)
if not spacing==None: spacing = np.asarray(spacing)
offset = np.asarray(offset)
if not libname is None:
template_libname = libname
if template_libname is None:
template_libname = self.templates.plib
### preprocessing ends ###
t=self.templates.get_template(templatename, template_libname)
xy_phy=self.grids.get_phygrid_xy(gridname, xy)+offset
# instantiation
if not isinstance(spacing,np.ndarray): spacing=t.size
inst = self.add_inst(name=name, libname=template_libname, cellname=t.name, xy=xy_phy, shape=shape,
spacing=spacing, transform=transform, template=t)
if not annotate_text==None: #annotation
self.add_text(None, text=annotate_text, xy=np.vstack((xy_phy, xy_phy+0.5*np.dot(t.size*_shape,
ut.Mt(transform).T))), layer=self.layers['prbnd'])
if self.use_phantom == True: #phantom cell placement
self.add_rect(None, xy=np.vstack((xy_phy, xy_phy+np.dot(t.size*_shape, ut.Mt(transform).T))),
layer=self.layers['prbnd'])
for pinname, pin in t.pins.items(): #pin abstract
for x in range(_shape[0]):
for y in range(_shape[1]):
self.add_rect(None, xy=np.vstack((xy_phy+np.dot(pin['xy'][0]+t.size*np.array([x, y]), ut.Mt(transform).T),
xy_phy+np.dot(pin['xy'][1]+t.size*np.array([x, y]), ut.Mt(transform).T))),
layer=self.layers['prbnd'])
self.add_text(None, text=pinname+'/'+pin['netname'], xy=xy_phy, layer=self.layers['prbnd'])
self.add_text(None, text=inst.name+"/"+t.name, xy=xy_phy, layer=self.layers['prbnd'])
return inst
def relplace(self, name=None, templatename=None, gridname=None, refinstname=None, direction='right',
xy=np.array([0, 0]), offset=np.array([0, 0]), template_libname=None, shape=None,
spacing=None, transform='R0', refobj=None, libname=None, cellname=None):
"""
Place an instance on abstract grid, bound from a reference object. If reference object is not specified,
[0, 0]+offset is used as the reference point.
Equation = xy+refobj_xy+0.5*(Mt@refobj_size+Md@(refobj_size+inst_size)-Mti@inst_size).
Parameters
----------
name : str
Name of the instance.
cellname : str
Template name (cellname) of the instance.
gridname : str
Grid name for the placement.
xy : np.array([x, y]) or [int, int], optional
Placement coordinate on the grid, specified by gridname. If not specified, [0, 0] is used.
refobj : LayoutObject.Instance, optional
Reference instance handle, if None, refinstname is used. Will be extended to support other objects.
direction : str, optional
Direction of placement, bound from refobj. For example, if the instance will be place on top of refobj,
direction='top' is used
shape : np.array([x0, y0]) or None, optional
array shape parameter. If None, the instance is not considered as array. Default is None
transform : str ('R0', 'MX', 'MY')
Transform parameter. 'R0' is used by default.
libname : str, optional
Template library name. If not specified, self.templates.plib is used.
Returns
-------
laygo.layoutObject.Instance
generated instance
Other Parameters
----------------
refinstname : str, optional, deprecated
Reference instance name, if None, [0, 0] is used for the reference point.
templatename : str, deprecated
Replaced with cellname
template_libname: str, optional, deprecated
Replaced with libname
spacing : np.array([int, int]) or [int, int]
Array spacing parameter for the instance. If none, the size of the instance of is used.
offset : np.array([float, float]), optional
Placement offset in physical coordinate.
See Also
--------
place : substrate function of relplace
"""
#TODO: Alignment option, bottom/top-left/right directions
# cellname handling
if not cellname is None:
templatename=cellname
# check if it's multiple placement
if isinstance(templatename, list): # multiple placement
flag_recursive=False #recursive placement flag. If True, next placement refer the current placement
# preprocessing arguments
len_inst = len(templatename) #number of instance to be placed (for multiple placements)
if name is None:
name = [None] * len_inst #extend Name list to be matched with templatename
if refinstname is None: #for backward compatibility. Use refobj instead of refinstname if possible
if refobj is None:
flag_recursive=True
_refinstname=[None for i in range(len_inst)]
else:
#check if refobj is list. If so, do a recursive placement
if isinstance(refobj, list):
_refinstname=[i.name for i in refobj]
else:
flag_recursive=True
_refinstname=[refobj.name]+[None for i in range(len_inst-1)]
else:
#check if refinstname is list. If so, do a recursive placement
if isinstance(refinstname, list):
_refinstname=refinstname
else:
flag_recursive=True
_refinstname=[refinstname]+[None for i in range(len_inst-1)]
if isinstance(xy[0], (int, np.int64)):
xy = [xy] * len_inst
if isinstance(direction, str):
direction = [direction] * len_inst
if shape == None:
shape = [shape] * len_inst
else:
if isinstance(shape[0], (int, np.int64)):
shape = [shape] * len_inst
if spacing is None:
spacing = [None] * len_inst
elif isinstance(spacing[0], (int, np.int64)):
spacing = [spacing] * len_inst
else:
if not isinstance(spacing, list): spacing = [spacing] * len_inst
if isinstance(transform, str): transform = [transform] * len_inst
return_inst_list = []
for i, nm, _refi_name, _xy, tl, dr, sh, sp, tr in zip(range(len_inst), name, _refinstname, xy, templatename, direction, shape, spacing, transform): #row placement
refi = GridLayoutGenerator.relplace(self, nm, tl, gridname, refinstname=_refi_name, direction=dr, xy=_xy,
offset=offset, template_libname=template_libname, shape=sh, spacing=sp,
transform=tr)#, refobj=refobj)
return_inst_list.append(refi)
if flag_recursive is True:
if not i == len_inst-1:
_refinstname[i+1] = refi.name
return return_inst_list
else: # single placement
### preprocessing starts ###
if shape is None:
_shape = np.array([1, 1])
else:
_shape = np.asarray(shape)
if not spacing == None: spacing = np.asarray(spacing)
xy = np.asarray(xy)
offset = np.asarray(offset)
if not libname is None:
template_libname = libname
if template_libname is None:
template_libname = self.templates.plib
### preprocessing ends ###
t_size_grid = self.get_template_xy(templatename, gridname, libname=template_libname)
t_size_grid = t_size_grid*_shape
#reference instance check
if (refobj is None) and (refinstname is None):
ir_xy_grid = np.array([0, t_size_grid[1]/2.0])
tr_size_grid = np.array([0, 0])
mtr = ut.Mt('R0')
mti = ut.Mt('R0')
else:
if not refobj is None:
if isinstance(refobj, Instance):
ir = refobj
elif isinstance(refobj, InstanceArray):
ir = refobj
elif isinstance(refobj, Pointer):
ir = refobj.master
direction = refobj.name
else:
ir = self.get_inst(refinstname)
tr = self.templates.get_template(ir.cellname, libname=ir.libname)
#get abstract grid coordinates
ir_xy_grid = self.get_absgrid_xy(gridname, ir.xy)
tr_size_grid = self.get_absgrid_xy(gridname, tr.size+(ir.shape-np.array([1,1]))*ir.spacing)
mtr = ut.Mt(ir.transform)
mti = ut.Mt(transform)
#direction
md = ut.Md(direction)
i_xy_grid = ir_xy_grid + 0.5 * (np.dot(tr_size_grid, mtr.T) + np.dot(tr_size_grid + t_size_grid, md.T)
- np.dot(t_size_grid, mti.T))
return GridLayoutGenerator.place(self, name=name, templatename=templatename, gridname=gridname, xy=i_xy_grid+xy, offset=offset,
template_libname=template_libname, shape=shape, spacing=spacing, transform=transform)
def via(self, name=None, xy=np.array([0, 0]), gridname=None, refobj=None, refobjindex=np.array([0, 0]), offset=np.array([0, 0]), refinstname=None, refinstindex=np.array([0, 0]),
refpinname=None, transform='R0', overwrite_xy_phy=None, overlay=None):
"""
Place a via on abstract grid, bound from a reference object. If reference object is not specified,
[0, 0]+offset is used as the reference point.
Parameters
----------
name : str
Name of the via
xy : np.array([int, int]) or [int, int]
xy coordinate of the via
gridname : str
Grid name of the via
refobj : LayoutObject.LayoutObject
Reference object(Instance/Pin/Rect) handle. If None, refinstiname is used.
overlay : LayoutObject.LayoutObject
Layout object for via placement at intersection (via will be placed at the overlaid point btn refobj and overlay)
Use with refobj only. Not compatible with legacy reference parameters (refinstname)
transform : str ('R0', 'MX', 'MY'), optional
Transform parameter for grid. Overwritten by transform of refinstname if not specified.
Returns
-------
laygo.layoutObject.Instance
generated via instance
Other Parameters
----------------
offset : np.array([float, float]), optional
Offset on the physical grid, bound from xy
overwrite_xy_phy : None or np.array([float, float]), optional
If specified, final xy physical coordinates are overwritten by the argument.
refobjindex : np.array([int, int]), optional, deprecated
Index of refobj if it is a mosaic instance.
refinstname : str, optional, deprecated
Reference instance name for xy. If None, origin([0,0]) is used as the reference point.
refinstindex : str, optional, deprecated
Index of refinstname if it is a mosaic instance
refpinname : str, optional, deprecated
Reference pin of refinstname for reference point of xy. If None, the origin of refinstname0 is used.
"""
if isinstance(refobj, np.ndarray) or isinstance(overlay, np.ndarray): #mutiple placement
if isinstance(refobj, np.ndarray) and isinstance(overlay, np.ndarray): #both array
_refobj = refobj.flat
_overlay = overlay.flat
elif isinstance(refobj, np.ndarray):
_refobj = refobj.flat
_overlay = np.empty(refobj.shape, dtype=overlay.__class__)
for i, o in np.ndenumerate(_overlay):
_overlay[i] = overlay
_overlay = _overlay.flat
elif isinstance(overlay, np.ndarray):
_overlay = overlay.flat
_refobj = np.empty(overlay.shape, dtype=refobj.__class__)
for i, o in np.ndenumerate(_refobj):
_refobj[i] = refobj
_refobj = _refobj.flat
return_via_list = []
for r0, o0 in zip(_refobj, _overlay):
refv = GridLayoutGenerator.via(self, name=name, xy=xy, gridname=gridname, refobj=r0, offset=offset, transform=transform,
overwrite_xy_phy=overwrite_xy_phy, overlay=o0)
return_via_list.append(refv)
return return_via_list
else:
### preprocessing arguments starts ###
xy = np.asarray(xy)
offset = np.asarray(offset)
refinstindex = np.asarray(refinstindex)
# reading coordinate information from the reference objects
# this needs to be cleaned up
refinst = None
refrect0 = None
refrect1 = None
if not refobj is None:
if isinstance(refobj, Instance):
refinst = refobj
refinstindex=refobjindex
elif isinstance(refobj, InstanceArray):
refinst = refobj
refinstindex=refobjindex
elif isinstance(refobj, Pin):
refinst = refobj.master
refinstindex=refobjindex
refpinname=refobj.name
elif isinstance(refobj, Rect):
refrect0 = refobj
else:
if not refinstname is None:
refinst = self.get_inst(refinstname)
if not overlay is None:
if isinstance(overlay, Rect):
refrect1 = overlay
### preprocessing arguments ends ###
# get physical grid coordinates
# need to be refactored
if not refinst is None:
reftemplate = self.templates.get_template(refinst.cellname, libname=refinst.libname)
offset = offset + refinst.xy + np.dot(refinst.spacing * refinstindex, ut.Mt(refinst.transform).T)
if not refpinname == None: #if pin reference is specified
pin_xy_phy=reftemplate.pins[refpinname]['xy']
bbox=pin_xy_phy
if not refrect1 is None: #overlay
bbox0=pin_xy_phy
bbox1=np.dot(refrect1.xy - refinst.xy, ut.Mtinv(refinst.transform).T)
sx=sorted([bbox0[0][0], bbox0[1][0], bbox1[0][0], bbox1[1][0]])
sy=sorted([bbox0[0][1], bbox0[1][1], bbox1[0][1], bbox1[1][1]])
bbox=np.array([[sx[1], sy[1]], [sx[2], sy[2]]])
#pin_xy_abs=self.get_absgrid_region(gridname, pin_xy_phy[0], pin_xy_phy[1])[0,:]
pin_xy_abs=self.get_absgrid_region(gridname, bbox[0], bbox[1])[0,:]
xy=xy+pin_xy_abs
transform=refinst.transform #overwrite transform variable
if not refrect0 is None:
xy=xy+self.get_absgrid_region(gridname, refrect0.xy0, refrect0.xy1)[0,:]
if not refrect1 is None:
#TODO: implement overlay function using refrect1
pass
vianame = self.grids.get_vianame(gridname, xy)
if overwrite_xy_phy is None:
xy_phy=np.dot(self.grids.get_phygrid_xy(gridname, xy), ut.Mt(transform).T)+offset
else:
xy_phy=overwrite_xy_phy
inst=self.add_inst(name=name, libname=self.grids.plib, cellname=vianame, xy=xy_phy, transform=transform)
if self.use_phantom==True:
size=self.grids.get_route_width_xy(gridname, xy)
self.add_rect(None, xy=np.vstack((xy_phy-0.5*size, xy_phy+0.5*size)),
layer=self.layers['text'])
self.add_text(None, text=vianame, xy=xy_phy, layer=self.layers['text'])
return inst
# Route functions
def route(self, name=None, layer=None, xy0=np.array([0, 0]), xy1=np.array([0, 0]), gridname0=None, gridname1=None, direction='omni',
refobj0=None, refobj1=None, refobjindex0=np.array([0, 0]), refobjindex1=np.array([0, 0]),
refinstname0=None, refinstname1=None, refinstindex0=np.array([0, 0]), refinstindex1=np.array([0, 0]),
refpinname0=None, refpinname1=None, offset0=np.array([0,0]), offset1=None,
transform0='R0', transform1=None, endstyle0="truncate", endstyle1="truncate",
via0=None, via1=None, netname=None):
"""
Route on abstract grid, bound from reference objects. If reference objects are not specified,
[0, 0]+offset is used as reference points.
This function is a bit messy because originally its main arguments were refinst/refinstindex/refpinname,
and switched to refobj/refobjindex, and to refobj only. At some point all the codes need to be rewritten.
Parameters
----------
name : str
Route name. If None, the name will be automatically assigned by genid.
layer : [str, str], optional
Routing layer [name, purpose]. If None, it figures out the layer from grid and coordinates
xy0 : np.array([int, int]) or [int, int]
xy coordinate for start point.
xy1 : np.array([int, int]) or [int, int]
xy coordinate for end point.
gridname0 : str
Grid name0
gridname1 : str, optional
Grid name1
direction : str, optional
Routing direction (omni, x, y, ...). It will be used as the input argument of GridLayoutGenerator.Md.
refobj0 : LayoutObject.LayoutObject
Reference object(Instance/Pin/Rect) handle. If None, refinstiname0 is used.
refobj1 : LayoutObject.LayoutObject
Reference object(Instance/Pin/Rect) handle. If None, refinstiname1 is used.
transform0 : str, optional
Transform parameter for grid0. Overwritten by transform of refinstname0 if not specified.
transform1 : str, optional
Transform parameter for grid1. Overwritten by transform of refinstname1 if not specified.
endstyle0 : str ('extend', 'truncate'), optional
End style of xy0 (extend the edge by width/2 if endstyle=='extend')
endstyle1 : str ('extend', 'truncate'), optional
End style of xy1 (extend the edge by width/2 if endstyle=='extend')
via0 : None or np.array([x, y]) or np.array([[x0, y0], [x1, y1], [x2, y2], ...]), optional
Offset coordinates for via placements, bound from xy0
ex) if xy0 = [1, 2], xy1 = [1, 5], via0 = [0, 2] then a via will be placed at [1, 4]
via1 : None or np.array([x, y]) or np.array([[x0, y0], [x1, y1], [x2, y2], ...]), optional
Offset coordinates for via placements, bound from xy1
ex) if xy0 = [1, 2], xy1 = [1, 5], via1 = [0, 2] then a via will be placed at [1, 7]
netname : str, optional
net name of the route
Returns
-------
laygo.layoutObject.Rect
generated route
Other Parameters
----------------
offset0 : np.array([float, float]), optional
Coordinate offset from xy0, on the physical grid.
offset1 : np.array([float, float]), optional
Coordinate offset from xy1, on the physical grid.
refobjindex0 : np.array([int, int]), optional, deprecated
Index of refobj0 if it is a mosaic instance.
refobjindex1 : np.array([int, int]), optional, deprecated
Index of refobj1 if it is a mosaic instance.
refinstname0 : str, optional, deprecated
Reference instance name for start point. If None, origin([0,0]) is used as the reference point.
refinstname1 : str, optional, deprecated
Reference instance name for end point. If None, origin([0,0]) is used as the reference point.
refinstindex0 : np.array([int, int]), optional, deprecated
Index of refinstname0 if it is a mosaic instance.
refinstindex1 : np.array([int, int]), optional, deprecated
Index of refinstname1 if it is a mosaic instance.
refpinname0 : str, optional, deprecated
Reference pin of refinstname0 for reference point of xy0. If None, the origin of refinstname0 is used.
refpinname1 : str, optional, deprecated
Reference pin of refinstname1 for reference point of xy1. If None, the origin of refinstname1 is used.
"""
bool_r0 = isinstance(refobj0, np.ndarray) or isinstance(refobj0, InstanceArray)
bool_r1 = isinstance(refobj1, np.ndarray) or isinstance(refobj1, InstanceArray)
if bool_r0 or bool_r1: #mutiple placement
if bool_r0 and bool_r1: #both array
_refobj0 = refobj0.flat
_refobj1 = refobj1.flat
elif bool_r0:
_refobj0 = refobj0.flat
_refobj1 = np.empty(refobj0.shape, dtype=refobj1.__class__)
for i, o in np.ndenumerate(_refobj1):
_refobj1[i] = refobj1
_refobj1 = _refobj1.flat
elif bool_r1:
_refobj1 = refobj1.flat
_refobj0 = np.empty(refobj1.shape, dtype=refobj0.__class__)
for i, o in np.ndenumerate(_refobj0):
_refobj0[i] = refobj0
_refobj0 = _refobj0.flat
return_rect_list = []
for r0, r1 in zip(_refobj0, _refobj1):
refr = GridLayoutGenerator.route(self, name=name, layer=layer, xy0=xy0, xy1=xy1, gridname0=gridname0,
gridname1=gridname1, direction=direction, refobj0=r0, refobj1=r1,
offset0=offset0, offset1=offset1, transform0=transform0, transform1=transform1,
endstyle0=endstyle0, endstyle1=endstyle1, via0=via0, via1=via1, netname=netname)
#Used GridLayoutGenerator for abstracting the function in GridLayoutGenerator2
return_rect_list.append(refr)
return return_rect_list
else:
# exception handling
if xy0 is None: raise ValueError('GridLayoutGenerator.route - specify xy0')
if xy1 is None: raise ValueError('GridLayoutGenerator.route - specify xy1')
if gridname0 is None: raise ValueError('GridLayoutGenerator.route - specify gridname0')
### preprocessing arguments starts ###
xy0 = np.asarray(xy0)
xy1 = np.asarray(xy1)
refinstindex0 = np.asarray(refinstindex0)
refinstindex1 = np.asarray(refinstindex1)
refinst0 = None
refinst1 = None
offset0 = np.asarray(offset0)
if not offset1 is None: offset1 =
|
np.asarray(offset1)
|
numpy.asarray
|
#
# Test GeoImage
#
from unittest import TestCase, main
import tempfile
import shutil
# Numpy
import numpy as np
# GDAL
from osgeo.gdal import __version__ as gdal_version
# Project
from gimg import GeoImage
from gimg.common import get_dtype
from gimg.GeoImage import compute_geo_extent, compute_geo_transform
from .create_synthetic_images import create_synthetic_image_file, create_virt_image
from . import check_metadata
class TestGeoImage(TestCase):
def setUp(self):
self.gdal_version_major = int(gdal_version[0])
# Create local temp directory
self.local_temp_folder = tempfile.mkdtemp()
def tearDown(self):
# Delete temp directory
shutil.rmtree(self.local_temp_folder)
def test_with_synthetic_image(self):
is_complex = False
shape = (120, 100, 2)
depth = 2
filepath, data, geo_extent, metadata, geo_transform, epsg = create_synthetic_image_file(self.local_temp_folder,
shape, depth,
is_complex)
gimage = GeoImage(filepath)
if self.gdal_version_major > 1:
self.assertTrue(check_metadata(metadata, gimage.metadata),
"{} vs {}".format(metadata, gimage.metadata))
self.assertLess(np.sum(np.abs(geo_extent - gimage.geo_extent)), 1e-10)
self.assertEqual(epsg, gimage.get_epsg())
gimage_data = gimage.get_data()
self.assertEqual(shape, gimage_data.shape)
self.assertEqual(get_dtype(depth, is_complex), gimage_data.dtype)
# verify data
self.assertLess(np.sum(np.abs(data - gimage_data)), 1e-10)
def test_with_synthetic_image_with_select_bands(self):
is_complex = False
shape = (120, 100, 5)
depth = 2
filepath, data, geo_extent, metadata, geo_transform, epsg = create_synthetic_image_file(self.local_temp_folder,
shape, depth,
is_complex)
gimage = GeoImage(filepath)
if self.gdal_version_major > 1:
self.assertTrue(check_metadata(metadata, gimage.metadata),
"{} vs {}".format(metadata, gimage.metadata))
self.assertLess(np.sum(np.abs(geo_extent - gimage.geo_extent)), 1e-10)
self.assertEqual(epsg, gimage.get_epsg())
select_bands = [0, 2, 4]
gimage_data = gimage.get_data(select_bands=select_bands)
self.assertEqual(shape[:2], gimage_data.shape[:2])
self.assertEqual(len(select_bands), gimage_data.shape[2])
self.assertEqual(get_dtype(depth, is_complex), gimage_data.dtype)
# verify data
self.assertLess(np.sum(np.abs(data[:, :, select_bands] - gimage_data)), 1e-10)
def test_with_virtual_image(self):
dataset, data = create_virt_image(100, 120, 2, np.uint16)
gimage = GeoImage.from_dataset(dataset)
gimage_data = gimage.get_data(nodata_value=0)
# verify shape and dtype:
self.assertEqual(data.shape, gimage_data.shape)
self.assertEqual(data.dtype, gimage_data.dtype)
# verify data
self.assertLess(np.sum(np.abs(data - gimage_data)), 1e-10)
def test_with_virtual_image2(self):
dataset, data = create_virt_image(100, 120, 2, np.float32)
gimage = GeoImage.from_dataset(dataset)
gimage_data = gimage.get_data(nodata_value=-123)
# verify shape and dtype:
self.assertEqual(data.shape, gimage_data.shape)
self.assertEqual(data.dtype, gimage_data.dtype)
# verify data
self.assertLess(np.sum(np.abs(data - gimage_data)), 1e-10)
def test_from_dataset_with_select_bands(self):
dataset, data = create_virt_image(100, 120, 5, np.float32)
gimage = GeoImage.from_dataset(dataset)
select_bands = [0, 2, 4]
gimage_data = gimage.get_data(nodata_value=-123, select_bands=select_bands)
# verify shape and dtype:
self.assertEqual(data.shape[:2], gimage_data.shape[:2])
self.assertEqual(len(select_bands), gimage_data.shape[2])
self.assertEqual(data.dtype, gimage_data.dtype)
# verify data
self.assertLess(np.sum(
|
np.abs(data[:, :, select_bands] - gimage_data)
|
numpy.abs
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_cli.ipynb (unless otherwise specified).
__all__ = ['logger', 'URLs', 'download', 'VWORDS', 'N_IMGS', 'N_FRAMES_TO_KEEP', 'FPS', 'BEST_DL_MODELS',
'BEST_IR_MODELS', 'BEST_MODEL_CONFIGS', 'reproduce', 'tango']
# Cell
import logging
import io
import pickle
import pprint
import random
import requests
import subprocess
import time
import zipfile
import numpy as np
import pandas as pd
from fastcore.script import call_parse, Param
from pathlib import Path
from .prep import *
from .features import *
from .eval import *
from .model import *
from .approach import *
from .combo import *
from tqdm.auto import tqdm
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Cell
URLs = {
"tango_reproduction_package": "https://zenodo.org/record/4453765/files/tango_reproduction_package.zip",
}
# Cell
# @call_parse
def _download(
out_path
):
"""Function for downloading all data and results related to this tool's paper"""
out_path = Path(out_path)
out_path.mkdir(parents=True, exist_ok=True)
logging.info(f"Downloading and extracting datasets and models to {str(out_path)}.")
r = requests.get(URLs["tango_reproduction_package"])
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(out_path)
# Cell
@call_parse
def download(
out_path: Param("The output path to save and unzip all files.", str)
):
_download(out_path)
# Cell
# all hyperparameters used
VWORDS = [1_000, 5_000, 10_000]
N_IMGS = 15_000
N_FRAMES_TO_KEEP = [1, 5]
FPS = 30
# Fix naming issue and number of models reported...
BEST_DL_MODELS= [
"SIFT-10000vw-1ftk-bovw_weighted_lcs",
"SimCLR-1000vw-5ftk-bovw", "SimCLR-5000vw-5ftk-bovw_lcs",
"SimCLR-5000vw-5ftk-bovw_weighted_lcs", "SimCLR-1000vw-5ftk-bovw_weighted_lcs"
]
BEST_IR_MODELS = [
"ocr+ir-1ftk-all_text", "ocr+ir-5ftk-all_text",
"ocr+ir-5ftk-unique_frames", "ocr+ir-5ftk-unique_words"
]
BEST_MODEL_CONFIGS = {
"SimCLR": "SimCLR-1000vw-5ftk-bovw",
"SIFT": "SIFT-10000vw-1ftk-bovw_weighted_lcs",
"OCR+IR": "ocr+ir-5ftk-all_text"
}
# Cell
def _generate_vis_results(vid_ds, out_path, art_path, vis_model):
if vis_model == "SimCLR":
simclr = SimCLRModel.load_from_checkpoint(
checkpoint_path = str(
art_path/"models"/"SimCLR"/"checkpointepoch=98.ckpt"
)
).eval()
model = SimCLRExtractor(simclr)
sim_func = simclr_frame_sim
else:
model = SIFTExtractor(cv2.xfeatures2d.SIFT_create(nfeatures = 10))
sim_func = sift_frame_sim
logging.info(f"Computing rankings and calculating metrics for {vis_model} visual model.")
for vw in tqdm(VWORDS):
for ftk in tqdm(N_FRAMES_TO_KEEP):
evaluation_metrics = {}
cb_path = art_path/"models"/vis_model/f"cookbook_{vis_model}_{vw}vw.model"
codebook = pickle.load(open(cb_path, "rb"))
start = time.time()
vid_ds_features = gen_extracted_features(vid_ds, model, FPS, ftk)
end = time.time()
feature_gen_time = end - start
df, bovw_vid_ds_sims = gen_bovw_similarity(
vid_ds, vid_ds_features, model, codebook, vw, ftk
)
lcs_vid_ds_sims = gen_lcs_similarity(
vid_ds, vid_ds_features, sim_func, model, codebook, df, vw, ftk
)
rankings = approach(
vid_ds, vid_ds_features, bovw_vid_ds_sims, lcs_vid_ds_sims, model, sim_func,
codebook, df, vw, fps = FPS, ftk = ftk,
)
for k, v in rankings.items():
evaluation_metrics[k] = evaluate(rankings[k])
id_name = f"user_{N_IMGS}n_{vw}vw_{FPS}fps_{ftk}ftk"
results_path = out_path/"results"/vis_model
results_path.mkdir(parents=True, exist_ok=True)
logging.info(f"Saving rankings and metrics to {str(results_path)}.")
with open(results_path/f"rankings_{id_name}.pkl", "wb") as f:
pickle.dump(rankings, f, protocol=pickle.HIGHEST_PROTOCOL)
with open(results_path/f"evaluation_metrics_{id_name}.pkl", 'wb') as f:
pickle.dump(evaluation_metrics, f, protocol=pickle.HIGHEST_PROTOCOL)
# Cell
def _generate_txt_results(vid_ds, out_path, art_path, vis_model):
logging.info("Computing rankings and calculating metrics for textual model.")
csv_file_path = art_path/"user_assignment.csv"
settings_path = out_path/"evaluation_settings"
settings_path.mkdir(parents=True, exist_ok=True)
video_data = read_video_data(csv_file_path)
generate_setting2(video_data, settings_path)
convert_results_format(out_path/"results", settings_path, out_path, [vis_model])
# Check if files already exist and skip if they do because it takes a long time
txt_out_path = out_path/"extracted_text"
for ftk in N_FRAMES_TO_KEEP:
if not (txt_out_path/f"text_{ftk}").exists():
get_all_texts(vid_ds, txt_out_path, fps = ftk)
txt_path = art_path/"models"/"OCR+IR"
subprocess.check_output(
["sh", "build_run.sh", str(txt_out_path), str(settings_path)],
cwd=str(txt_path),
)
# Cell
def _get_single_model_performance(pkl_path, technique):
evals = pickle.load(open(fname, 'rb'))
mRRs = []
mAPs = []
mean_Rs = []
for app in evals[technique]:
mRRs.append(evals[technique][app]["App mRR"])
mAPs.append(evals[technique][app]["App mAP"])
mean_Rs.append(evals[technique][app]["App mean rank"])
return np.mean(mRRs), np.mean(mAPs), np.mean(mean_Rs)
# Cell
def _print_performance(model, mRR, mAP, mean_R):
print(
f"""\
Model: {model}
Overall mRR: {mRR}
Overall mAP: {mAP}
Overall Mean Rank: {mean_R}
"""
)
def _output_performance(out_path, dl_model_config, ir_model_config):
all_results = pd.read_csv(
out_path/"combined"/"tango_comb_results"/"all_results.csv", sep=";"
)
# Print the comb model results
comb_results = all_results[
all_results["model_config"] == f"({dl_model_config},{ir_model_config})"
][all_results["weight"] == "0.2-0"]
_print_performance(
f"{dl_model_config},{ir_model_config},weight=0.2-0", np.mean(comb_results['recip_rank'].values),
np.mean(comb_results['avg_precision'].values), np.mean(comb_results['first_rank'].values)
)
# Print the vis model results
vis_singl_results = all_results[
all_results["model_config"] == f"({dl_model_config},{ir_model_config})"
][all_results["weight"] == "0.0"]
_print_performance(
f"{dl_model_config}", np.mean(vis_singl_results['recip_rank'].values),
np.mean(vis_singl_results['avg_precision'].values),
|
np.mean(vis_singl_results['first_rank'].values)
|
numpy.mean
|
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
plt.style.use('seaborn')
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--cpg_file', help='Path to the cpg file', required=True)
parser.add_argument('--output_folder', help='Path of the output folder', required=False)
args = parser.parse_args()
return args
def global_cpg_info(df):
# Give some statistics on the data
num_of_pmd = df.groupby(["chromosome", "pmd_index"]).ngroups
num_of_unique_seq = len(df["sequence"].unique())
print("We have a total of %s CpG" % df.shape[0])
print("We have a total of %s PMDs" % num_of_pmd)
print("We have %s unique sequences" % num_of_unique_seq)
solo = [seq for seq in df["sequence"] if seq.count("CG") == 1]
weak_solo = [seq for seq in solo if seq[73] in ["A", "T"] and seq[76] in ["A", "T"]]
strong_solo = [seq for seq in solo if seq[73] in ["C", "G"] and seq[76] in ["C", "G"]]
print("We have %s solo CpG which is %s" % (len(solo), len(solo) / df.shape[0] * 100))
print("Include %s WCGW solo and %s SCGS solo" % (len(weak_solo), len(strong_solo)))
def histogram_of_coverage_across_patients(df):
# Create histogram of the coverage of covariance between different patients
cov_columns = df[["cov01", "cov11", "cov13"]]
values = pd.notnull(cov_columns).sum(axis=1).values
_ = plt.hist(values, bins='auto')
plt.style.use('ggplot')
plt.title("Covariance coverage across samples")
plt.xlabel("Covered by X patients")
plt.ylabel("Amount of CpG")
plt.savefig("hist_of_covariance_cov_across_samples.png")
plt.close()
def histogram_of_num_of_cg(df):
# Histogram of the number of CG in the data
num_of_cg = [seq.count("CG") for seq in df["sequence"]]
_ = plt.hist(num_of_cg)
plt.style.use('ggplot')
plt.title("Number of CpG in seq include middle. Median (%s)" % np.median(num_of_cg))
plt.xlabel("Number of CpG")
plt.ylabel("Amount")
plt.savefig("num_of_cg_in_seq_include_middle_all.png")
plt.close()
num_of_cg = [i for i in num_of_cg if i < 5]
_ = plt.hist(num_of_cg)
plt.style.use('ggplot')
plt.title("Number of CpG in seq include middle less than 5. Median (%s)" % np.median(num_of_cg))
plt.xlabel("Number of CpG")
plt.ylabel("Amount")
plt.savefig("num_of_cg_in_seq_include_middle_lower_than_5.png")
plt.close()
def histogram_on_numb_of_cpg_per_pmd(df):
# Histogram of the number of CpG in PMD
values = df.groupby(["chromosome", "pmd_index"]).count()["sequence"].values
values.sort()
_ = plt.hist(values)
plt.style.use('ggplot')
plt.title("Hist of valid CpG per PMD. Median(%s)" % np.median(values))
plt.xlabel("Number of CpG")
plt.ylabel("Amount")
plt.savefig("num_of_valid_cpg_per_pmd_all.png")
plt.close()
lower_values = [i for i in values if i <= 5000]
_ = plt.hist(lower_values)
plt.style.use('ggplot')
plt.title("Hist of valid CpG per PMD less than 5000 seq. Median (%s)" % np.median(lower_values))
plt.xlabel("Number of CpG")
plt.ylabel("Amount")
plt.savefig("num_of_valid_cpg_per_pmd_less_than_5000_seq.png")
plt.close()
def plot_methylation_vs_covariance(df):
# Plot 2d histogram, scatter plot and density plot
meth = df["meth_mean"]
cov = df["cov_mean"]
outliers_ind = np.abs(stats.zscore(cov)) < 3
df_i = df[outliers_ind]
meth_i = meth[outliers_ind]
cov_i = cov[outliers_ind]
# Draw 2d plot
h = plt.hist2d(meth_i, cov_i)
plt.colorbar(h[3])
plt.savefig("2dhist_methylation_vs_covariance_global.png")
plt.close()
# Draw only PMD
ind = df_i["pmd_index"] > 0
meth_ip = meth_i[ind]
cov_ip = cov_i[ind]
plt.plot(meth_ip, cov_ip, linestyle='', marker='o', markersize=0.5)
plt.title("Methylation level vs Covariance in PMD")
plt.xlabel("Avg methylation level")
plt.ylabel("Covariance in window")
plt.savefig("methylation_vs_covariance_pmds.png")
plt.close()
v = np.vstack((meth_ip.values, cov_ip.values))
dfs = pd.DataFrame(v.T, columns=["meth", "cov"])
sns_plot = sns.jointplot(x="meth", y="cov", data=dfs, kind="kde")
sns_plot.savefig("methylation_vs_covariance_pmds_cluster.png")
plt.close()
def plot_methylation_vs_covariance_solo(df, patient):
cov_label = "cov%s" % patient
meth_label = "meth%s" % patient
solo_rows = df[df["sequence"].str.count("CG") == 1]
solo_rows = solo_rows[~solo_rows[meth_label].isnull()]
solo_rows = solo_rows[solo_rows["pmd_index"] <= 100]
meth_mean = solo_rows[meth_label]
cov_mean = solo_rows[cov_label]
# meth_mean = np.round(meth_mean * 500).astype(np.int) / 500
# z_cov = np.abs(stats.zscore(cov_mean.values))
# outliers_ind = z_cov > 0
# plt.plot(meth_mean[outliers_ind], cov_mean[outliers_ind], linestyle='', marker='o', markersize=0.5)
plt.plot(meth_mean, cov_mean, linestyle='', marker='.', markersize=0.2)
plt.title("Methylation level vs Covariance in solo CpG for patient %s" % patient, fontsize=20)
plt.xlabel("Methylation level", fontsize=16)
plt.ylabel("Covariance", fontsize=16)
plt.savefig("solo_meth_vs_cov_scatter_patient%s.png" % patient)
plt.close()
#
# v = np.vstack((meth_mean[outliers_ind].values, cov_mean[outliers_ind].values))
# dfs = pd.DataFrame(v.T, columns=["meth", "cov"])
# sns_plot = sns.jointplot(x="meth", y="cov", data=dfs, kind="kde")
# plt.title("Methylation level vs Covariance in solo CpG for patient %s" %patient)
# sns_plot.savefig("solo_meth_vs_cov_patient%s.png" %patient)
# plt.close()
def check_flip_seq(df):
# Some code to create the reverse compl to all the sequence and check how much data it's add
sequences = df["sequence"]
seq_uniq = set(sequences)
print("We have %s uniq seq to beging with" % len(seq_uniq))
translation_table = {84: 65, 65: 84, 67: 71, 71: 67}
flipped = []
for s in seq_uniq:
seq_translated = s.translate(translation_table)
seq_flipped = seq_translated[::-1]
flipped.append(seq_flipped)
flipped_uniq = set(flipped)
comb = flipped_uniq | seq_uniq
print("Combined: %s" % len(comb))
def plot_methylation_vs_covariance_solo_vs_non_solo(df):
pmd_df = df[df["pmd_index"] <= 10]
solo_rows = pmd_df[pmd_df["sequence"].str.count("CG") == 1]
non_solo_rows = pmd_df[pmd_df["sequence"].str.count("CG") > 1]
solo_meth = solo_rows["meth_mean"]
solo_cov = solo_rows["cov_mean"]
nsolo_meth = non_solo_rows["meth_mean"]
nsolo_cov = non_solo_rows["cov_mean"]
solo_z_cov = np.abs(stats.zscore(solo_cov.values))
solo_outliers_ind = solo_z_cov < 3
nsolo_z_cov = np.abs(stats.zscore(nsolo_cov.values))
nsolo_outliers_ind = nsolo_z_cov < 3
solo = plt.scatter(solo_meth[solo_outliers_ind], solo_cov[solo_outliers_ind], marker='x', color='r')
nsolo = plt.scatter(nsolo_meth[nsolo_outliers_ind], nsolo_cov[nsolo_outliers_ind], marker='o',
color='b', alpha=0.05)
plt.title("Methylation level vs Covariance in solo(red) vs non-solo (blue)")
plt.xlabel("Avg methylation level")
plt.ylabel("Covariance in window")
plt.savefig("methylation_vs_covariance_solo_vs_non_solo.png")
plt.close()
def plot_methylation_vs_covariance_solo_weak_vs_strong(df):
pmd_df = df[df["pmd_index"] <= 10]
solo_rows = pmd_df[pmd_df["sequence"].str.count("CG") == 1]
ou = np.abs(stats.zscore(solo_rows["cov_mean"].values)) < 3
solo_rows = solo_rows[ou]
strong_rows = solo_rows[solo_rows["sequence"].str.contains("[CG]CG[CG]", regex=True)]
weak_rows = solo_rows[solo_rows["sequence"].str.contains("[AT]CG[AT]", regex=True)]
weak_meth = weak_rows["meth_mean"]
weak_cov = weak_rows["cov_mean"]
weak_z_cov = np.abs(stats.zscore(weak_cov.values))
weak_outliers_ind = weak_z_cov < 3
weak_meth_i = weak_meth
weak_cov_i = weak_cov
strong_meth = strong_rows["meth_mean"]
strong_cov = strong_rows["cov_mean"]
strong_z_cov = np.abs(stats.zscore(strong_cov.values))
strong_outliers_ind = strong_z_cov < 3
strong_meth_i = strong_meth
strong_cov_i = strong_cov
solo = plt.scatter(weak_meth, weak_cov, marker='x', color='r')
nsolo = plt.scatter(strong_meth, strong_cov, marker='o', color='b', alpha=0.05)
plt.title("Methylation level vs Covariance in WSCW(red) vs SCGS(blue)")
plt.xlabel("Avg methylation level")
plt.ylabel("Covariance in window")
plt.savefig("methylation_vs_covariance_strong_vs_weak.png")
plt.close()
def plot_cov_density(df):
pmd_df = df[np.logical_and(df["meth01"] >= 0.4, df["meth01"] <= 0.6)] # not really, just middle lines
# pmd_df = pmd_df [~pmd_df ["cov01"].isnull()]
solo_rows = pmd_df[pmd_df["sequence"].str.count("CG") == 1]
cov_solo = solo_rows["cov01"]
nsolo_rows = pmd_df[pmd_df["sequence"].str.count("CG") > 1]
cov_nsolo = nsolo_rows["cov01"]
weak_rows = solo_rows[solo_rows["sequence"].str.contains("[AT]CG[AT]", regex=True)]
strong_rows = solo_rows[solo_rows["sequence"].str.contains("[CG]CG[CG]", regex=True)]
cov_weak = weak_rows["cov01"]
cov_strong = strong_rows["cov01"]
sns.distplot(cov_nsolo, hist=False, kde=True, kde_kws={'linewidth': 3}, label="not solo")
sns.distplot(cov_solo, hist=False, kde=True, kde_kws={'linewidth': 3}, label="solo")
sns.distplot(cov_strong, hist=False, kde=True, kde_kws={'linewidth': 3}, label="SCGS")
sns.distplot(cov_weak, hist=False, kde=True, kde_kws={'linewidth': 3}, label="WSCW")
plt.show()
def plot_meth_density(df, meth_v):
# pmd_df = df[np.logical_and(df["meth01"] >=0.4, df["meth01"] <= 0.6)]# not really, just middle lines
pmd_df = df[~df[meth_v].isnull()]
x = []
solo_rows = pmd_df[pmd_df["sequence"].str.count("CG") == 1]
cov_solo = solo_rows[meth_v]
nsolo_rows = pmd_df[pmd_df["sequence"].str.count("CG") > 1]
cov_nsolo = nsolo_rows[meth_v]
weak_rows = solo_rows[solo_rows["sequence"].str.contains("[AT]CG[AT]", regex=True)]
strong_rows = solo_rows[solo_rows["sequence"].str.contains("[CG]CG[CG]", regex=True)]
cov_weak = weak_rows[meth_v]
cov_strong = strong_rows[meth_v]
sns.distplot(cov_nsolo, hist=False, kde=True, kde_kws={'linewidth': 3}, label="not solo")
sns.distplot(cov_solo, hist=False, kde=True, kde_kws={'linewidth': 3}, label="solo")
sns.distplot(cov_strong, hist=False, kde=True, kde_kws={'linewidth': 3}, label="SCGS")
sns.distplot(cov_weak, hist=False, kde=True, kde_kws={'linewidth': 3}, label="WSCW")
if meth_v != "meth_mean":
plt.title("Methylation density for CRC%s" % meth_v[-2:])
else:
plt.title("Methylation density - avg")
plt.savefig("methylation_dens_for_%s.png" % (meth_v))
plt.close()
def plot_patients_meth(df):
solo_rows = df[df["sequence"].str.count("CG") == 1]
cov1 = solo_rows["meth13"]
cov11 = solo_rows["meth11"]
v = np.vstack((cov1.values, cov11.values))
dfs = pd.DataFrame(v.T, columns=["meth13", "meth11"])
sample = dfs.sample(frac=0.005)
plt.scatter(sample["meth13"], sample["meth11"])
plt.xlabel("meth13")
plt.ylabel("meth11")
plt.title("sample of meth01 and meth11 methylation")
plt.show()
plt.close()
def get_num_of_seq_in_extreme_meth(df):
# Only create solo
solo_rows = df[df["sequence"].str.count("CG") == 1]
methylation_columns = solo_rows[["meth01", "meth11", "meth13"]]
solo_rows["min_meth"] = np.min(methylation_columns, axis=1)
solo_rows["max_meth"] = np.max(methylation_columns, axis=1)
extreme_index = np.logical_or(solo_rows["max_meth"] <= 0.2, solo_rows["min_meth"] >= 0.8)
extreme_rows = solo_rows[extreme_index]
print("We will be using %s/%s of seq" % (extreme_rows.shape[0], solo_rows.shape[0]))
def plot_densitiy_met_cov_for_cpg_numb(df, patient):
# df = df[df["chromosome"] =="1"]
for density in range(1, 7):
pmd_df = df[~df["meth%s" % patient].isnull()]
rows = pmd_df[pmd_df["sequence"].str.count("CG") == density]
meth_values = rows["meth%s" % patient]
weak_rows = rows[rows["small_seq"].str.contains("[AT]CG[AT]", regex=True)]
strong_rows = rows[rows["small_seq"].str.contains("[CG]CG[CG]", regex=True)]
meth_weak = weak_rows["meth%s" % patient]
meth_strong = strong_rows["meth%s" % patient]
sns.distplot(meth_values, hist=False, kde=True, kde_kws={'linewidth': 3}, label="any")
sns.distplot(meth_strong, hist=False, kde=True, kde_kws={'linewidth': 3}, label="SCGS")
sns.distplot(meth_weak, hist=False, kde=True, kde_kws={'linewidth': 3}, label="WSCW")
plt.title("Methylation density for CRC%swith cpg=%s" % (patient, density))
plt.savefig("methylation_dens_for_%s_cpg%s.png" % (patient, density))
plt.close()
pmd_df = df[~df["cov%s" % patient].isnull()]
rows = pmd_df[pmd_df["sequence"].str.count("CG") == density]
cov_values = rows["cov%s" % patient]
weak_rows = rows[rows["small_seq"].str.contains("[AT]CG[AT]", regex=True)]
strong_rows = rows[rows["small_seq"].str.contains("[CG]CG[CG]", regex=True)]
cov_weak = weak_rows["cov%s" % patient]
cov_strong = strong_rows["cov%s" % patient]
sns.distplot(cov_values, hist=False, kde=True, kde_kws={'linewidth': 3}, label="any")
sns.distplot(cov_strong, hist=False, kde=True, kde_kws={'linewidth': 3}, label="SCGS")
sns.distplot(cov_weak, hist=False, kde=True, kde_kws={'linewidth': 3}, label="WSCW")
plt.title("Cov density for CRC%swith cpg=%s" % (patient, density))
plt.savefig("cov_dens_for_%s_cpg%s.png" % (patient, density))
plt.close()
def plot_densitiy_cov_between_02_to_06(df):
meth_v = "meth01"
pmd_df = df[np.logical_and(df["meth01"] >= 0.2, df["meth01"] <= 0.6)] # not really, just middle lines
solo_rows = pmd_df[pmd_df["sequence"].str.count("CG") == 1]
cov_solo = solo_rows["cov01"]
nsolo_rows = pmd_df[pmd_df["sequence"].str.count("CG") > 1]
cov_nsolo = nsolo_rows["cov01"]
weak_rows = solo_rows[solo_rows["small_seq"].str.contains("[AT]CG[AT]", regex=True)]
strong_rows = solo_rows[solo_rows["small_seq"].str.contains("[CG]CG[CG]", regex=True)]
cov_weak = weak_rows["cov01"]
cov_strong = strong_rows["cov01"]
sns.distplot(cov_nsolo[np.abs(stats.zscore(cov_nsolo.values)) < 3], hist=False, kde=True,
kde_kws={'linewidth': 3}, label="not solo")
sns.distplot(cov_solo[np.abs(stats.zscore(cov_solo.values)) < 3], hist=False, kde=True,
kde_kws={'linewidth': 3}, label="solo")
sns.distplot(cov_strong[np.abs(stats.zscore(cov_strong.values)) < 3], hist=False, kde=True,
kde_kws={'linewidth': 3}, label="SCGS")
sns.distplot(cov_weak[np.abs(stats.zscore(cov_weak.values)) < 3], hist=False, kde=True,
kde_kws={'linewidth': 3}, label="WSCW")
if meth_v != "meth_mean":
plt.title("Methylation density for CRC%s" % meth_v[-2:])
else:
plt.title("Methylation density - avg")
plt.savefig("methylation_dens_for_%s_02_to_06.png" % (meth_v))
plt.close()
def plot_densitiy_met_cov_01_to_03_for_cpg(df, patient):
for density in range(1, 6):
pmd_df = df[~df["cov%s" % patient].isnull()]
pmd_df = pmd_df[
|
np.logical_and(pmd_df["cov%s" % patient] >= -0.01, pmd_df["cov%s" % patient] <= 0.03)
|
numpy.logical_and
|
#!/usr/bin/env python3
import sys
import numpy as np
from interfaceBuilder import utils as ut
def len2mat(vec, ang):
"""
Transforms cell lengths and angles to cell vectors.
vec in order [a, b, c]
ang in order [alpha, beta, gamma], (conventionally defined)
"""
"""Round it to have e.g. cos(90) be 0 and not XE-17"""
prec = 10
"""M = [A, B, C]"""
mat = np.zeros((3, 3))
"""A = [ax; 0; 0]"""
mat[0, 0] = vec[0]
"""B = [bx; by; 0]"""
mat[0, 1] = vec[1] * np.round(np.cos(np.deg2rad(ang[2])), prec)
mat[1, 1] = vec[1] * np.round(np.sin(np.deg2rad(ang[2])), prec)
"""C = [cx; cy; cz]"""
mat[0, 2] = vec[2] * np.round(np.cos(np.deg2rad(ang[1])), prec)
mat[1, 2] = vec[2] * np.round((np.cos(np.deg2rad(ang[0])) - \
np.cos(np.deg2rad(ang[2])) * \
np.cos(np.deg2rad(ang[1]))) / \
np.sin(np.deg2rad(ang[2])), prec)
mat[2, 2] = np.round(np.sqrt(vec[2]**2 - mat[0, 2]**2 - mat[1, 2]**2), prec)
return mat
def mat2LammpsBox(mat):
"""Function for transforming a set of basis vectors to
a lammps simulation box"""
lx = mat[0, 0]
ly = mat[1, 1]
lz = mat[2, 2]
xy = mat[0, 1]
xz = mat[0, 2]
yz = mat[1, 2]
x_lo_b = np.min([0, lx]) + np.min([0, xy, xz, xy + xz])
x_hi_b = np.max([0, lx]) + np.max([0, xy, xz, xy + xz])
y_lo_b = np.min([0, ly]) + np.min([0, yz])
y_hi_b = np.max([0, ly]) + np.max([0, yz])
z_lo_b = np.min([0, lz])
z_hi_b = np.max([0, lz])
box = [x_lo_b, x_hi_b, y_lo_b, y_hi_b, z_lo_b, z_hi_b]
return box
def writeKPTS(cell, density = 2, N1 = None, N2 = None, N3 = None, version = "Gamma",\
S1 = 0, S2 = 0, S3 = 0, verbose = 1):
"""Function to write a vasp KPOINTS file based on cell parameters"""
"""Calculate the x-y area of the cell"""
area = np.linalg.norm(np.cross(cell[:, 0], cell[:, 1]))
"""Forms a vasp reciprical cell, without 2*pi and not Transposed
vs. the deafult cartesian cell used in the structure class"""
r_cell = np.linalg.inv(cell)
r_area = np.linalg.norm(np.cross(r_cell[0, :], r_cell[1, :]))
total_kpts = np.ceil(r_area * density)
"""The rows make up the lattice vectors"""
r_norm = np.linalg.norm(r_cell, axis = 1)
if version.lower()[0] == "g":
cmp = 0
elif version.lower()[0] == "m":
cmp = 1
if N1 is None:
N1 = np.ceil(r_norm[0] * density)
if N1 % 2 == cmp:
N1 += 1
if N2 is None:
N2 = np.ceil(r_norm[1] * density)
if N2 % 2 == cmp:
N2 += 1
if N3 is None:
N3 =
|
np.ceil(r_norm[2] * density)
|
numpy.ceil
|
#!/usr/bin/env python3
"""Regressions against experimental/reference values.
This also tests the high-level application programming interface."""
import numpy as np
import pytest
from scipy import stats
import overreact as rx
from overreact import _constants as constants
from overreact import _datasets as datasets
# TODO(schneiderfelipe): transfer all comparisons with experimental/reference
# values to this file.
def test_basic_example_for_solvation_equilibria():
"""Reproduce literature data for AcOH(g) <=> AcOH(aq).
Data is as cited in DOI:10.1021/jp810292n and DOI:10.1063/1.1416902, and
is experimental except when otherwise indicated in the comments.
"""
model = rx.parse_model("data/acetate/Orca4/model.k")
temperature = 298.15
pK = 4.756 # DOI:10.1063/1.1416902
acid_energy = -constants.R * temperature * np.log(10 ** -pK) / constants.kcal
solv_energy = (
-229.04018997
- -229.075245654407
+ -228.764256345282
- (-229.02825429 - -229.064152538732 + -228.749485597775)
) * (constants.hartree * constants.N_A / constants.kcal)
charged_solv_energy = (
-228.59481510
- -228.617274320359
+ -228.292486796947
- (-228.47794098 - -228.500117698893 + -228.169992151890)
) * (constants.hartree * constants.N_A / constants.kcal)
delta_freeenergies_ref = [
acid_energy,
-acid_energy,
solv_energy,
-solv_energy,
charged_solv_energy,
-charged_solv_energy,
]
concentration_correction = -temperature * rx.change_reference_state(
temperature=temperature
)
for qrrho in [False, (False, True), True]:
# TODO(schneiderfelipe): log the contribution of reaction symmetry
delta_freeenergies = rx.get_delta(
model.scheme.A,
rx.get_freeenergies(model.compounds, temperature=temperature, qrrho=qrrho),
) - temperature * rx.get_reaction_entropies(
model.scheme.A, temperature=temperature
)
assert delta_freeenergies / constants.kcal == pytest.approx(
delta_freeenergies_ref, 7e-3
)
# the following tests the solvation free energy from DOI:10.1021/jp810292n
assert delta_freeenergies[2] / constants.kcal == pytest.approx(
-6.70 + concentration_correction / constants.kcal, 1.5e-1
)
# the following tests the reaction free energy from DOI:10.1063/1.1416902
assert delta_freeenergies[0] == pytest.approx(27.147 * constants.kilo, 7e-3)
assert delta_freeenergies[0] == pytest.approx(
-constants.R * temperature * np.log(10 ** -pK), 7e-3
)
k = rx.get_k(
model.scheme, model.compounds, temperature=temperature, qrrho=qrrho
)
assert -np.log10(k[0] / k[1]) == pytest.approx(pK, 7e-3)
def test_basic_example_for_solvation_phase_kinetics():
"""Reproduce literature data for NH3(w) + OH·(w) -> NH2·(w) + H2O(w).
This uses raw data from from DOI:10.1002/qua.25686 and no calls from
overreact.api.
"""
temperatures = np.array([298.15, 300, 310, 320, 330, 340, 350])
delta_freeenergies = np.array([10.5, 10.5, 10.8, 11.1, 11.4, 11.7, 11.9])
# 3-fold symmetry TS
sym_correction = (
-temperatures
* rx.change_reference_state(3, 1, temperature=temperatures)
/ constants.kcal
)
assert delta_freeenergies + sym_correction == pytest.approx(
[9.8, 9.9, 10.1, 10.4, 10.6, 10.9, 11.2], 8e-3
)
delta_freeenergies -= (
temperatures
* rx.change_reference_state(temperature=temperatures)
/ constants.kcal
) # 1 atm to 1 M
assert delta_freeenergies == pytest.approx(
[8.6, 8.6, 8.8, 9.0, 9.2, 9.4, 9.6], 6e-3
)
# only concentration correction, no symmetry and no tunneling
k = rx.rates.eyring(delta_freeenergies * constants.kcal, temperature=temperatures)
assert k == pytest.approx([3.3e6, 3.4e6, 4.0e6, 4.7e6, 5.5e6, 6.4e6, 7.3e6], 8e-2)
assert np.log10(k) == pytest.approx(
np.log10([3.3e6, 3.4e6, 4.0e6, 4.7e6, 5.5e6, 6.4e6, 7.3e6]), 6e-3
)
# only concentration correction and symmetry, no tunneling
delta_freeenergies += sym_correction
assert delta_freeenergies == pytest.approx(
[7.9, 7.9, 8.1, 8.3, 8.5, 8.7, 8.8], 7e-3
)
k = rx.rates.eyring(delta_freeenergies * constants.kcal, temperature=temperatures)
assert k == pytest.approx([9.8e6, 1.0e7, 1.2e7, 1.4e7, 1.7e7, 1.9e7, 2.2e7], 8e-2)
assert np.log10(k) == pytest.approx(
np.log10([9.8e6, 1.0e7, 1.2e7, 1.4e7, 1.7e7, 1.9e7, 2.2e7]), 5e-3
)
# concentration correction, symmetry and tunneling included
kappa = rx.tunnel.eckart(
986.79, 3.3 * constants.kcal, 16.4 * constants.kcal, temperature=temperatures
)
assert kappa == pytest.approx([2.3, 2.3, 2.2, 2.1, 2.0, 1.9, 1.9], 9e-2)
k *= kappa
assert k == pytest.approx([2.3e7, 2.4e7, 2.7e7, 3.0e7, 3.3e7, 3.7e7, 4.1e7], 1.1e-1)
assert np.log10(k) == pytest.approx(
np.log10([2.3e7, 2.4e7, 2.7e7, 3.0e7, 3.3e7, 3.7e7, 4.1e7]), 6e-3
)
def test_basic_example_for_gas_phase_kinetics():
"""Reproduce literature data for CH4 + Cl⋅ -> CH3· + HCl.
This uses raw data from from DOI:10.1002/qua.25686 and no calls from
overreact.api.
"""
temperatures = np.array([200, 298.15, 300, 400])
delta_freeenergies = np.array([8.0, 10.3, 10.3, 12.6])
# 4-fold symmetry TS
sym_correction = (
-temperatures
* rx.change_reference_state(4, 1, temperature=temperatures)
/ constants.kcal
)
assert delta_freeenergies + sym_correction == pytest.approx(
[7.4, 9.4, 9.5, 11.5], 9e-3
)
delta_freeenergies -= (
temperatures
* rx.change_reference_state(temperature=temperatures)
/ constants.kcal
) # 1 atm to 1 M
assert delta_freeenergies == pytest.approx([6.9, 8.4, 8.4, 9.9], 8e-3)
# only concentration correction, no symmetry and no tunneling
k = rx.rates.eyring(delta_freeenergies * constants.kcal, temperature=temperatures)
k = rx.rates.convert_rate_constant(k, "cm3 particle-1 s-1", molecularity=2)
assert 1e16 * k == pytest.approx(
1e16 * np.array([2.2e-16, 7.5e-15, 7.9e-15, 5.6e-14]), 7e-2
)
assert np.log10(k) == pytest.approx(
np.log10([2.2e-16, 7.5e-15, 7.9e-15, 5.6e-14]), 2e-3
)
# only concentration correction and symmetry, no tunneling
delta_freeenergies += sym_correction
assert delta_freeenergies == pytest.approx([6.3, 7.6, 7.6, 8.8], 9e-3)
k = rx.rates.eyring(delta_freeenergies * constants.kcal, temperature=temperatures)
k = rx.rates.convert_rate_constant(k, "cm3 particle-1 s-1", molecularity=2)
assert 1e16 * k == pytest.approx(
1e16 * np.array([8.8e-16, 3.0e-14, 3.1e-14, 2.2e-13]), 8e-2
)
assert np.log10(k) == pytest.approx(
np.log10([8.8e-16, 3.0e-14, 3.1e-14, 2.2e-13]), 3e-3
)
# concentration correction, symmetry and tunneling included
kappa = rx.tunnel.wigner(1218, temperature=temperatures)
assert kappa[0] == pytest.approx(4.2, 3e-4)
assert kappa[2] == pytest.approx(2.4, 1e-2)
kappa = rx.tunnel.eckart(
1218, 4.1 * constants.kcal, 3.4 * constants.kcal, temperature=temperatures
)
assert kappa == pytest.approx([17.1, 4.0, 3.9, 2.3], 2.1e-2)
k *= kappa
assert 1e16 * k == pytest.approx(
1e16 * np.array([1.5e-14, 1.2e-13, 1.2e-13, 5.1e-13]), 7e-2
)
assert np.log10(k) == pytest.approx(
np.log10([1.5e-14, 1.2e-13, 1.2e-13, 5.1e-13]), 3e-3
)
def test_rate_constants_for_hickel1992():
"""Reproduce literature data for NH3(w) + OH·(w) -> NH2·(w) + H2O(w).
Data is as cited in DOI:10.1002/qua.25686 and is experimental except when
otherwise indicated in the comments.
Those tests check for consistency with the literature in terms of
reaction rate constants.
"""
theory = "UM06-2X"
basisset = "6-311++G(d,p)"
model = rx.parse_model(f"data/hickel1992/{theory}/{basisset}/model.k")
temperatures = np.array([298.15, 300, 310, 320, 330, 340, 350])
k_cla_ref = np.array([9.8e6, 1.0e7, 1.2e7, 1.4e7, 1.7e7, 1.9e7, 2.2e7])
k_eck_ref = np.array([2.3e7, 2.4e7, 2.7e7, 3.0e7, 3.3e7, 3.7e7, 4.1e7])
k_cla = []
k_eck = []
for temperature in temperatures:
k_cla.append(
rx.get_k(
model.scheme,
model.compounds,
tunneling=None,
qrrho=(False, True),
scale="M-1 s-1",
temperature=temperature,
)[0]
)
k_eck.append(
rx.get_k(
model.scheme,
model.compounds,
# tunneling="eckart", # this is default
qrrho=(False, True),
scale="M-1 s-1",
temperature=temperature,
)[0]
)
k_cla = np.asarray(k_cla).flatten()
k_eck = np.asarray(k_eck).flatten()
assert k_eck / k_cla == pytest.approx([2.3, 2.3, 2.2, 2.1, 2.0, 1.9, 1.9], 7e-2)
assert k_cla == pytest.approx(k_cla_ref, 1.2e-1)
assert k_eck == pytest.approx(k_eck_ref, 9e-2)
assert np.log10(k_cla) == pytest.approx(np.log10(k_cla_ref), 8e-3)
assert np.log10(k_eck) == pytest.approx(np.log10(k_eck_ref), 5e-3)
for k, k_ref, tols in zip(
[k_cla, k_eck],
[k_cla_ref, k_eck_ref],
[(1.0e-1, 0.62, 2e-3, 5e-8, 3e-2), (1.1e-1, 0.75, 2e-3, 3e-8, 2e-2)],
):
linregress = stats.linregress(np.log10(k), np.log10(k_ref))
assert linregress.slope == pytest.approx(1.0, tols[0])
assert linregress.intercept == pytest.approx(0.0, abs=tols[1])
assert linregress.rvalue ** 2 == pytest.approx(1.0, tols[2])
assert linregress.pvalue == pytest.approx(0.0, abs=tols[3])
assert linregress.pvalue < 0.01
assert linregress.stderr == pytest.approx(0.0, abs=tols[4])
def test_rate_constants_for_tanaka1996():
"""Reproduce literature data for CH4 + Cl⋅ -> CH3· + HCl.
Data is as cited in DOI:10.1007/BF00058703 and DOI:10.1002/qua.25686 and
is experimental except when otherwise indicated in the comments.
Those tests check for consistency with the literature in terms of
reaction rate constants.
"""
theory = "UMP2"
basisset = "cc-pVTZ" # not the basis used in the ref., but close enough
model = rx.parse_model(f"data/tanaka1996/{theory}/{basisset}/model.k")
temperatures = np.array(
[
200.0,
# 210.0,
# 220.0,
# 230.0,
# 240.0,
# 250.0,
# 260.0,
# 270.0,
# 280.0,
# 290.0,
298.15,
300.0,
400.0,
]
)
k_cla_ref = np.array([8.8e-16, 3.0e-14, 3.1e-14, 2.2e-13])
k_eck_ref = np.array([1.5e-14, 1.2e-13, 1.2e-13, 5.1e-13])
k_exp = np.array(
[
1.0e-14,
# 1.4e-14,
# 1.9e-14,
# 2.5e-14,
# 3.22e-14,
# 4.07e-14,
# 5.05e-14,
# 6.16e-14,
# 7.41e-14,
# 8.81e-14,
10.0e-14,
10.3e-14,
# no data for 400K?
]
)
k_cla = []
k_wig = []
k_eck = []
for temperature in temperatures:
k_cla.append(
rx.get_k(
model.scheme,
model.compounds,
tunneling=None,
qrrho=True,
scale="cm3 particle-1 s-1",
temperature=temperature,
)[0]
)
k_wig.append(
rx.get_k(
model.scheme,
model.compounds,
tunneling="wigner",
qrrho=True,
scale="cm3 particle-1 s-1",
temperature=temperature,
)[0]
)
k_eck.append(
rx.get_k(
model.scheme,
model.compounds,
# tunneling="eckart", # this is default
qrrho=True,
scale="cm3 particle-1 s-1",
temperature=temperature,
)[0]
)
k_cla = np.asarray(k_cla).flatten()
k_wig = np.asarray(k_wig).flatten()
k_eck = np.asarray(k_eck).flatten()
assert k_eck / k_cla == pytest.approx([17.1, 4.0, 3.9, 2.3], 1.7e-1)
assert 1e16 * k_cla == pytest.approx(1e16 * k_cla_ref, 1.9e-1)
assert 1e16 * k_eck == pytest.approx(1e16 * k_eck_ref, 3.2e-1)
assert 1e16 * k_eck[:-1] == pytest.approx(1e16 * k_exp, 8e-2)
assert np.log10(k_cla) == pytest.approx(np.log10(k_cla_ref), 6e-3)
assert np.log10(k_eck) == pytest.approx(np.log10(k_eck_ref), 2e-2)
assert np.log10(k_eck[:-1]) == pytest.approx(np.log10(k_exp), 3e-3)
for k, k_ref, tols in zip(
[k_cla, k_eck, k_eck[:-1]],
[k_cla_ref, k_eck_ref, k_exp],
[
(2e-2, 0.08, 9e-6, 5e-6, 3e-3),
(5e-2, 0.52, 4e-4, 2e-4, 2e-2),
(5e-2, 0.60, 3e-6, 2e-3, 2e-3),
],
):
linregress = stats.linregress(np.log10(k), np.log10(k_ref))
assert linregress.slope == pytest.approx(1.0, tols[0])
assert linregress.intercept == pytest.approx(0.0, abs=tols[1])
assert linregress.rvalue ** 2 == pytest.approx(1.0, tols[2])
assert linregress.pvalue == pytest.approx(0.0, abs=tols[3])
assert linregress.pvalue < 0.01
assert linregress.stderr == pytest.approx(0.0, abs=tols[4])
def test_delta_energies_for_hickel1992():
"""Reproduce literature data for NH3(w) + OH·(w) -> NH2·(w) + H2O(w).
Data is as cited in DOI:10.1002/qua.25686 and is experimental except when
otherwise indicated in the comments.
Those tests check for consistency with the literature in terms of
chemical kinetics and thermochemistry.
"""
theory = "UM06-2X"
basisset = "6-311++G(d,p)"
model = rx.parse_model(f"data/hickel1992/{theory}/{basisset}/model.k")
temperatures = np.array([298.15, 300, 310, 320, 330, 340, 350])
delta_freeenergies_ref = [9.8, 9.9, 10.1, 10.4, 10.6, 10.9, 11.2]
delta_freeenergies = []
for temperature in temperatures:
freeenergies = rx.get_freeenergies(
model.compounds, temperature=temperature, qrrho=(False, True)
)
delta_freeenergy = (
rx.get_delta(model.scheme.B, freeenergies)
- temperature
* rx.get_reaction_entropies(model.scheme.B, temperature=temperature)
)[0]
delta_freeenergies.append(delta_freeenergy)
delta_freeenergies = np.asarray(delta_freeenergies)
assert delta_freeenergies / constants.kcal == pytest.approx(
delta_freeenergies_ref
- temperatures
* rx.change_reference_state(temperature=temperatures)
/ constants.kcal,
2e-2,
) # M06-2X/6-311++G(d,p) from DOI:10.1002/qua.25686
# extra symmetry is required for this reaction since the transition state
# is nonsymmetric
assert model.compounds["NH3·OH#(w)"].symmetry == 3
delta_freeenergies_ref = [7.9, 7.9, 8.1, 8.3, 8.5, 8.7, 8.8]
assert delta_freeenergies / constants.kcal == pytest.approx(
delta_freeenergies_ref, 2e-2
) # M06-2X/6-311++G(d,p) from DOI:10.1002/qua.25686
def test_delta_energies_for_tanaka1996():
"""Reproduce literature data for CH4 + Cl⋅ -> CH3· + HCl.
Data is as cited in DOI:10.1007/BF00058703 and DOI:10.1002/qua.25686 and
is experimental except when otherwise indicated in the comments.
Those tests check for consistency with the literature in terms of
chemical kinetics and thermochemistry.
"""
theory = "UMP2"
basisset = "6-311G(2d,p)"
model = rx.parse_model(f"data/tanaka1996/{theory}/{basisset}/model.k")
temperatures = [0.0]
delta_freeenergies_ref = [5.98]
delta_freeenergies = []
for temperature in temperatures:
freeenergies = rx.get_freeenergies(model.compounds, temperature=temperature)
delta_freeenergy = (
rx.get_delta(model.scheme.B, freeenergies)
- temperature
* rx.get_reaction_entropies(model.scheme.B, temperature=temperature)[0]
)[0]
delta_freeenergies.append(delta_freeenergy)
delta_freeenergies = np.asarray(delta_freeenergies)
assert delta_freeenergies / constants.kcal == pytest.approx(
delta_freeenergies_ref, 4e-2
) # UMP2/6-311G(2d,p) DOI:10.1007/BF00058703
# testing now another level of theory!
basisset = "cc-pVTZ" # not the basis used in the ref., but close enough
model = rx.parse_model(f"data/tanaka1996/{theory}/{basisset}/model.k")
# no extra symmetry required for this reaction since the transition state
# is symmetric
assert model.compounds["H3CHCl‡"].symmetry is None
temperatures =
|
np.array([200, 298.15, 300, 400])
|
numpy.array
|
"""Define the API for geneparse."""
# This file is part of geneparse.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Pharmacogenomics Centre
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import io
import numpy as np
from .exceptions import InvalidChromosome
VALID_CHROMOSOMES = set(
[str(i + 1) for i in range(22)] + ["X", "Y", "XY", "MT"]
)
_NUCLEOTIDE_COMPLEMENT = {"A": "T", "T": "A", "C": "G", "G": "C"}
class Chromosome(object):
__slots__ = ("name")
def __init__(self, name):
self.name = str(name)
def __repr__(self):
return "{}".format(self.name)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
other_name = getattr(other, "name", str(other))
if self.name == "?" or other_name == "?":
return False
return self.name == other_name
VALID_CHROMOSOMES = {k: Chromosome(k) for k in VALID_CHROMOSOMES}
UNKNOWN_CHROMOSOME = Chromosome("?")
class Variant(object):
# Subclasses should declare a __slots__ containing only the additional
# slots.
__slots__ = ("name", "chrom", "pos", "alleles")
def __init__(self, name, chrom, pos, alleles):
self.name = str(name) if name is not None else None
self.chrom = Variant._encode_chr(chrom)
self.pos = int(pos)
self.alleles = self._encode_alleles(alleles)
@staticmethod
def _encode_chr(chrom):
# Accept instances of Chromosome as is (useful for contigs or non
# default chromosomes).
if isinstance(chrom, Chromosome):
return chrom
# We have a special value for unknown chromosome.
elif chrom is None:
return UNKNOWN_CHROMOSOME
# See if the Chromosome is already known.
chrom = str(chrom).upper()
if chrom.startswith("CHR"):
chrom = chrom[3:]
if chrom not in VALID_CHROMOSOMES:
raise InvalidChromosome(chrom)
return VALID_CHROMOSOMES[chrom]
@staticmethod
def _encode_alleles(iterable):
if iterable is None:
return None
return tuple(sorted(str(s).upper() for s in iterable))
def copy(self):
return Variant(self.name, self.chrom, self.pos, self.alleles)
def complementary_strand_copy(self):
alleles = [complement_alleles(i) for i in self.alleles]
return Variant(self.name, self.chrom, self.pos, alleles)
def __hash__(self):
# Two variants will have the same hash if they have the same
# chromosome and position and **exactly the same alleles**.
# Is this the behaviour we want?
return hash((self.chrom, self.pos, self.alleles))
def alleles_ambiguous(self):
return self.alleles == ("C", "G") or self.alleles == ("A", "T")
@property
def alleles_set(self):
if self.alleles is None:
return None
return set(self.alleles)
def primitive_locus_eq(self, chrom, pos):
return self.chrom == chrom and self.pos == pos
def locus_eq(self, other):
return self.primitive_locus_eq(other.chrom, other.pos)
def iterable_alleles_eq(self, alleles):
return self.alleles == self._encode_alleles(alleles)
def alleles_eq(self, other):
return self.iterable_alleles_eq(other.alleles)
def complement_alleles(self):
"""Complement the alleles of this variant.
This will call this module's `complement_alleles` function.
Note that this will not create a new object, but modify the state of
the current instance.
"""
self.alleles = self._encode_alleles(
[complement_alleles(i) for i in self.alleles]
)
def __eq__(self, other):
"""Tests for the equality between two variants.
If any variant has undefined alleles, we return the locus equality.
Else, we return True if at least two alleles are the same in both
variants.
"""
locus_match = self.locus_eq(other)
if self.alleles is None or other.alleles is None:
return locus_match
overlap = len(self.alleles_set & other.alleles_set) >= 2
return locus_match and overlap
def __repr__(self):
return "<{} chr{}:{}_{}>".format(self.__class__.__name__, self.chrom,
self.pos, self.alleles)
class ImputedVariant(Variant):
__slots__ = ("quality", )
def __init__(self, name, chrom, pos, alleles, quality):
super().__init__(name, chrom, pos, alleles)
self.quality = float(quality)
if not 0 <= self.quality <= 1:
raise ValueError(
"The 'quality' field for ImputedVariant instances is expected "
"to be a float value between 0 and 1."
)
class Genotypes(object):
__slots__ = ("variant", "genotypes", "reference", "coded", "multiallelic")
def __init__(self, variant, genotypes, reference, coded, multiallelic):
"""Class holding information on a variant as well as a vector of
genotypes.
The "reference" allele corresponds to 0 and the "coded" allele
corresponds to 1.
"""
self.variant = variant
self.genotypes = genotypes
self.reference = str(reference).upper()
self.multiallelic = multiallelic
if variant.alleles and (self.reference not in variant.alleles):
raise ValueError(
"reference allele not in the known alleles for the variant "
"({} not in {}).".format(self.reference, variant.alleles)
)
self.coded = str(coded).upper()
if variant.alleles and (self.coded not in variant.alleles):
raise ValueError(
"coded allele not in the known alleles for the variant "
"({} not in {}).".format(self.coded, variant.alleles)
)
def copy(self):
return Genotypes(
self.variant, np.copy(self.genotypes), self.reference, self.coded,
self.multiallelic
)
def flip(self):
"""Flips the reference and coded alleles of this instance."""
self.flip_coded()
def flip_coded(self):
"""Flips the coding of the alleles."""
self.genotypes = 2 - self.genotypes
self.reference, self.coded = self.coded, self.reference
def flip_strand(self):
"""Flips the strand of the alleles."""
self.reference = complement_alleles(self.reference)
self.coded = complement_alleles(self.coded)
self.variant.complement_alleles()
def maf(self):
freq = self.coded_freq()
if freq > 0.5:
return 1 - freq
else:
return freq
def coded_freq(self):
"""Gets the frequency of the coded allele."""
return np.nanmean(self.genotypes) / 2
def code_minor(self):
"""Encode the genotypes with respect to the minor allele.
This confirms that "reference" is the major allele and that "coded" is
the minor allele.
In other words, this function can be used to make sure that the
genotype value is the number of minor alleles for an individual.
"""
coded_freq = self.coded_freq()
if coded_freq > 0.5:
self.flip_coded()
def __eq__(self, other):
# If not the same locus, not equals.
if not self.variant.locus_eq(other.variant):
return False
# If same alleles, return the genotype comparison as-is.
alleles_eq = (self.reference == other.reference and
self.coded == other.coded)
if alleles_eq:
return _np_eq(self.genotypes, other.genotypes)
# Check if it's the same alleles but they are flipped.
if self.reference == other.coded and self.coded == other.reference:
return _np_eq(self.genotypes, (2 - other.genotypes))
raise RuntimeError("Failed equality check between genotypes.")
def __repr__(self):
return (
"<Genotypes for {} Reference:{} Coded:{}, {}>"
"".format(self.variant, self.reference, self.coded, self.genotypes)
)
def __setstate__(self, state):
for field in self.__slots__:
if field != "genotypes":
setattr(self, field, state[field])
self.genotypes = np.load(io.BytesIO(state["genotypes_data"]))["arr_0"]
def __getstate__(self):
state = {}
for field in self.__slots__:
if field != "genotypes":
state[field] = getattr(self, field)
genotypes_file = io.BytesIO()
np.savez_compressed(genotypes_file, self.genotypes)
state["genotypes_data"] = genotypes_file.getvalue()
return state
class SplitChromosomeReader(object):
def __init__(self, chrom_to_reader):
"""Reader to handle genotype access using files split by chromosome.
A dict mapping chromosomes to instances of GenotypesReader should be
passed.
"""
self.chrom_to_reader = chrom_to_reader
samples = None
self.n_vars = 0
for chrom, reader in self.chrom_to_reader.items():
# Keep track of the total number of variants.
self.n_vars += reader.get_number_variants()
# Check that the sample order is the same.
cur_samples = reader.get_samples()
if samples is None:
samples = cur_samples
else:
if samples != cur_samples:
raise ValueError(
"Not all sub-readers have the same sample order."
)
samples = cur_samples
self.samples = samples
@staticmethod
def _unknown_chrom_message(chrom):
return (
"Unable to find a reader instance for chromosome '{}'."
"".format(chrom)
)
def iter_variants(self):
for chrom, reader in self.chrom_to_reader.items():
for v in reader.iter_variants():
yield v
def iter_genotypes(self):
for chrom, reader in self.chrom_to_reader.items():
for g in reader.iter_genotypes():
yield g
def get_variant_genotypes(self, variant):
try:
return self.chrom_to_reader[
variant.chrom
].get_variant_genotypes(variant)
except KeyError:
raise ValueError(self._unknown_chrom_message(variant.chrom))
def get_variant_by_name(self, name):
out = []
for chrom, reader in self.chrom_to_reader.items():
out.extend(reader.get_variant_by_name(name))
return out
def get_variants_in_region(self, chrom, start, end):
try:
return self.chrom_to_reader[
chrom
].get_variants_in_region(chrom, start, end)
except KeyError:
raise ValueError(self._unknown_chrom_message(chrom))
def get_samples(self):
return self.samples
def get_number_samples(self):
return len(self.samples)
def get_number_variants(self):
return self.n_vars
class GenotypesReader(object):
def __init__(self):
"""Abstract class to read genotypes data."""
raise NotImplementedError()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
pass
def __repr__(self):
return "<{} {:,d} samples; {:,d} variants>".format(
self.__class__.__name__,
self.get_number_samples(),
self.get_number_variants(),
)
# API methods
def iter_variants(self):
"""Iterate over variants without reading the actual genotypes.
This is a generator of Variant instances. Also not that subclasses
can define their own Variant subclasses to represent additional
fields.
To improve consistency, the ImputedVariant class is provided. It
defines a single additional field ("quality") containing a float
between 0 and 1.
"""
raise NotImplementedError()
def iter_genotypes(self):
"""Iterate over variants and read the genotypes.
This method yields instances of Genotypes.
"""
raise NotImplementedError()
def get_variant_genotypes(self, variant):
"""Get the genotypes for a given variant.
Args:
variant (Variant): A variant for which to retrieve genotypes.
Returns:
list: A list of Genotypes. This is a list because for
multi-allelics the representation needs multiple entries.
"""
raise NotImplementedError()
def get_variant_by_name(self, name):
"""Get the genotypes for a given variant (by name).
Args:
name (str): The name of the variant to retrieve the genotypes.
Returns:
list: A list of Genotypes. This is a list in order to keep the same
behaviour as the other functions.
"""
raise NotImplementedError()
def iter_variants_by_names(self, names):
"""Iterates over the genotypes for variants using a list of names.
Args:
names (list): The list of names for variant extraction.
"""
for name in names:
for result in self.get_variant_by_name(name):
yield result
def get_variants_in_region(self, chrom, start, end):
"""Get the variants in a region.
Args:
chrom (str): The chromosome (e.g. 'X' or '3').
start (int): The start position for the region.
end (int): The end position for the region.
"""
raise NotImplementedError()
def get_samples(self):
"""Get an ordered collection of the samples in the genotype container.
"""
raise NotImplementedError()
def get_number_samples(self):
"""Return the number of samples."""
raise NotImplementedError()
def get_number_variants(self):
"""Return the number of variants in the file."""
raise NotImplementedError()
def complement_alleles(s):
"""Complement an allele string.
This will apply the following translation table to the alleles:
A -> T
G -> C
and vice versa.
Other characters will be left as-is.
"""
trans = str.maketrans("ATGCatgc", "TACGtacg")
return s.translate(trans)[::-1]
def _np_eq(a, b):
nan_a =
|
np.isnan(a)
|
numpy.isnan
|
from collections import OrderedDict
import numpy as np
from robosuite.models.arenas.bins_arena import BinArena
from robosuite.utils.transform_utils import convert_quat
from robosuite.utils.mjcf_utils import CustomMaterial
from robosuite.environments.manipulation.single_arm_env import SingleArmEnv
from robosuite.models.arenas import TableArena
from robosuite.models.objects import BoxObject
from robosuite.models.tasks import ManipulationTask
from robosuite.utils.placement_samplers import UniformRandomSampler
from robosuite.utils.observables import Observable, sensor
class BinDividerPick(SingleArmEnv):
"""
This class corresponds to the lifting task for a single robot arm.
Args:
robots (str or list of str): Specification for specific robot arm(s) to be instantiated within this env
(e.g: "Sawyer" would generate one arm; ["Panda", "Panda", "Sawyer"] would generate three robot arms)
Note: Must be a single single-arm robot!
env_configuration (str): Specifies how to position the robots within the environment (default is "default").
For most single arm environments, this argument has no impact on the robot setup.
controller_configs (str or list of dict): If set, contains relevant controller parameters for creating a
custom controller. Else, uses the default controller for this specific task. Should either be single
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
gripper_types (str or list of str): type of gripper, used to instantiate
gripper models from gripper factory. Default is "default", which is the default grippers(s) associated
with the robot(s) the 'robots' specification. None removes the gripper, and any other (valid) model
overrides the default gripper. Should either be single str if same gripper type is to be used for all
robots or else it should be a list of the same length as "robots" param
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
:`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial
joint positions. Setting this value to `None` or 0.0 results in no noise being applied.
If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied,
If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range
:`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform"
Should either be single dict if same noise value is to be used for all robots or else it should be a
list of the same length as "robots" param
:Note: Specifying "default" will automatically use the default noise settings.
Specifying None will automatically create the required dict with "magnitude" set to 0.0.
table_full_size (3-tuple): x, y, and z dimensions of the table.
table_friction (3-tuple): the three mujoco friction parameters for
the table.
use_camera_obs (bool): if True, every observation includes rendered image(s)
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
placement_initializer (ObjectPositionSampler): if provided, will
be used to place objects on every reset, else a UniformRandomSampler
is used by default.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering
render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
the user using the mouse
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise.
render_gpu_device_id (int): corresponds to the GPU device id to use for offscreen rendering.
Defaults to -1, in which case the device will be inferred from environment variables
(GPUS or CUDA_VISIBLE_DEVICES).
control_freq (float): how many control signals to receive in every second. This sets the amount of
simulation time that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
hard_reset (bool): If True, re-loads model, sim, and render object upon a reset call, else,
only calls sim.reset and resets all robosuite-internal variables
camera_names (str or list of str): name of camera to be rendered. Should either be single str if
same name is to be used for all cameras' rendering or else it should be a list of cameras to render.
:Note: At least one camera must be specified if @use_camera_obs is True.
:Note: To render all robots' cameras of a certain type (e.g.: "robotview" or "eye_in_hand"), use the
convention "all-{name}" (e.g.: "all-robotview") to automatically render all camera images from each
robot's camera list).
camera_heights (int or list of int): height of camera frame. Should either be single int if
same height is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_widths (int or list of int): width of camera frame. Should either be single int if
same width is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_depths (bool or list of bool): True if rendering RGB-D, and RGB otherwise. Should either be single
bool if same depth setting is to be used for all cameras or else it should be a list of the same length as
"camera names" param.
Raises:
AssertionError: [Invalid number of robots specified]
"""
def __init__(
self,
robots,
env_configuration="default",
controller_configs=None,
gripper_types="default",
initialization_noise="default",
bin1_pos=(0, 0.0, 0.8),
table_full_size=(0.8, 0.8, 0.05),
table_friction=(1., 5e-3, 1e-4),
use_camera_obs=True,
use_object_obs=True,
reward_scale=1.0,
reward_shaping=False,
placement_initializer=None,
placement_initializer_kwargs=None,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
render_gpu_device_id=-1,
control_freq=20,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
use_cube_shift_left_reward=False,
use_reaching_reward=False,
use_grasping_reward=False,
):
self.use_cube_shift_left_reward = use_cube_shift_left_reward
self.use_reaching_reward = use_reaching_reward
self.use_grasping_reward = use_grasping_reward
# settings for table top
self.table_full_size = table_full_size
self.table_friction = table_friction
self.table_offset = np.array((0, 0, 0.8))
# settings for bin position
self.bin1_pos = np.array(bin1_pos)
# reward configuration
self.reward_scale = reward_scale
self.reward_shaping = reward_shaping
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
# object placement initializer
self.placement_initializer = placement_initializer
self.placement_initializer_kwargs = placement_initializer_kwargs
super().__init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
mount_types="default",
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
render_gpu_device_id=render_gpu_device_id,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
)
def reward(self, action=None):
"""
Reward function for the task.
Sparse un-normalized reward:
- a discrete reward of 2.25 is provided if the cube is lifted
Un-normalized summed components if using reward shaping:
- Reaching: in [0, 1], to encourage the arm to reach the cube
- Grasping: in {0, 0.25}, non-zero if arm is grasping the cube
- Lifting: in {0, 1}, non-zero if arm has lifted the cube
The sparse reward only consists of the lifting component.
Note that the final reward is normalized and scaled by
reward_scale / 2.25 as well so that the max score is equal to reward_scale
Args:
action (np array): [NOT USED]
Returns:
float: reward value
"""
reward = 0.
# sparse completion reward
if self._check_success():
reward = 2.25
# use a shaping reward
elif self.reward_shaping:
# reaching reward
cube_pos = self.sim.data.body_xpos[self.cube_body_id]
gripper_site_pos = self.sim.data.site_xpos[self.robots[0].eef_site_id]
dist = np.linalg.norm(gripper_site_pos - cube_pos)
reaching_reward = 1 - np.tanh(10.0 * dist)
if self.use_reaching_reward:
reward += reaching_reward*0.5
if self.use_cube_shift_left_reward:
if cube_pos[1] > -.04:
reward += (1 - np.tanh(10.0 * cube_pos[1]))*.5
# grasping reward
if self.use_grasping_reward and self._check_grasp(gripper=self.robots[0].gripper, object_geoms=self.cube):
reward += 0.5
# Scale reward if requested
if self.reward_scale is not None:
reward *= self.reward_scale / 2.25
return reward
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
# Adjust base pose accordingly
xpos = self.robots[0].robot_model.base_xpos_offset["table"](self.table_full_size[0])
self.robots[0].robot_model.set_base_xpos(xpos)
self.robots[0].init_qpos = np.array([ 0.107, 0.426 , 0.076 ,-2.055 ,-0.1 , 2.501, 0.999])
# load model for table top workspace
# mujoco_arena = TableArena(
# table_full_size=self.table_full_size,
# table_friction=self.table_friction,
# table_offset=self.table_offset,
# )
mujoco_arena = BinArena(
bin1_pos=self.bin1_pos,
table_offset=self.table_offset,
table_full_size=self.table_full_size,
table_friction=self.table_friction
)
# Arena always gets set to zero origin
mujoco_arena.set_origin([0, 0, 0])
# initialize objects of interest
tex_attrib = {
"type": "cube",
}
mat_attrib = {
"texrepeat": "1 1",
"specular": "0.4",
"shininess": "0.1",
}
redwood = CustomMaterial(
texture="WoodRed",
tex_name="redwood",
mat_name="redwood_mat",
tex_attrib=tex_attrib,
mat_attrib=mat_attrib,
)
self.cube = BoxObject(
name="cube",
size_min=[0.025, 0.025, 0.025], # [0.015, 0.015, 0.015],
size_max=[0.025, 0.025, 0.025], # [0.018, 0.018, 0.018])
rgba=[168/255, 127/255, 214/255, 1],
# material=redwood,
)
# Create placement initializer
if self.placement_initializer is not None:
self.placement_initializer.reset()
self.placement_initializer.add_objects(self.cube)
elif self.placement_initializer_kwargs is not None:
self.placement_initializer = UniformRandomSampler(**self.placement_initializer_kwargs)
self.placement_initializer.reset()
self.placement_initializer.add_objects(self.cube)
else:
self.placement_initializer = UniformRandomSampler(
name="ObjectSampler",
mujoco_objects=self.cube,
x_range=[-0.165, .165],
y_range=[0.035, 0.165],
rotation=0,
ensure_object_boundary_in_range=False,
ensure_valid_placement=True,
reference_pos=self.table_offset,
z_offset=0.12,
)
# task includes arena, robot, and objects of interest
self.model = ManipulationTask(
mujoco_arena=mujoco_arena,
mujoco_robots=[robot.robot_model for robot in self.robots],
mujoco_objects=self.cube,
)
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._setup_references()
# Additional object references from this env
self.cube_body_id = self.sim.model.body_name2id(self.cube.root_body)
def _setup_observables(self):
"""
Sets up observables to be used for this environment. Creates object-based observables if enabled
Returns:
OrderedDict: Dictionary mapping observable names to its corresponding Observable object
"""
observables = super()._setup_observables()
# low-level object information
if self.use_object_obs:
# Get robot prefix and define observables modality
pf = self.robots[0].robot_model.naming_prefix
modality = "object"
# cube-related observables
@sensor(modality=modality)
def cube_pos(obs_cache):
return
|
np.array(self.sim.data.body_xpos[self.cube_body_id])
|
numpy.array
|
#! /usr/bin/env python3
# This Python analysis script is part of the code Hipace++
#
# It compares the transverse field By with the theoretical value, plots both
# the simulation result and the theory on the same plot, and asserts that the
# difference is small.
#
# To use it, run the simulation and execute this script with
# > ../../build/bin/hipace inputs_SI
# > python analysis.py
# Note: the simulation may take some time, as the box size must be high to have
# decent agreement
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import scipy.constants as scc
import argparse
import sys
from openpmd_viewer import OpenPMDTimeSeries
parser = argparse.ArgumentParser(description='Script to analyze the correctness of the beam in vacuum')
parser.add_argument('--normalized-units',
dest='norm_units',
action='store_true',
default=False,
help='Run the analysis in normalized units')
parser.add_argument('--do-plot',
dest='do_plot',
action='store_true',
default=False,
help='Plot figures and save them to file')
parser.add_argument('--output-dir',
dest='output_dir',
default='diags/hdf5',
help='Path to the directory containing output files')
args = parser.parse_args()
ts = OpenPMDTimeSeries(args.output_dir)
if args.norm_units:
c = 1.
jz0 = -1.
rho0 = -1.
mu_0 = 1.
eps_0 = 1.
R = 1.
else:
# Density of the can beam
dens = 2.8239587008591567e23 # at this density, 1/kp = 10um, allowing for an easy comparison with normalized units
# Define array for transverse coordinate and theory for By and Bx
jz0 = - scc.e * scc.c * dens
rho0 = - scc.e * dens
c = scc.c
mu_0 = scc.mu_0
eps_0 = scc.epsilon_0
# Radius of the can beam
R = 10.e-6
# Load Hipace++ data for By in SI units
Bx_sim, Bx_meta = ts.get_field(field='Bx', iteration=0, slice_across=['x','z'], slice_relative_position=[0,0])
By_sim, By_meta = ts.get_field(field='By', iteration=0, slice_across=['y','z'], slice_relative_position=[0,0])
jz_sim = ts.get_field(field='jz_beam', iteration=0, slice_across=['y','z'], slice_relative_position=[0,0])[0]
rho_sim = ts.get_field(field='rho', iteration=0, slice_across=['y','z'], slice_relative_position=[0,0])[0]
Ex_sim = ts.get_field(field='ExmBy', iteration=0, slice_across=['y','z'], slice_relative_position=[0,0])[0] + c*By_sim
Ey_sim = ts.get_field(field='EypBx', iteration=0, slice_across=['x','z'], slice_relative_position=[0,0])[0] - c*Bx_sim
y = Bx_meta.y
x = By_meta.x
By_th = mu_0 * jz0 * x / 2.
By_th[abs(x)>=R] = mu_0 * jz0 * R**2/(2*x[abs(x)>R])
Ex_th = rho0 / eps_0 * x / 2.
Ex_th[abs(x)>=R] = rho0 / eps_0 * R**2/(2*x[abs(x)>R])
Bx_th = -mu_0 * jz0 * y / 2.
Bx_th[abs(y)>=R] = -mu_0 * jz0 * R**2/(2*y[abs(y)>R])
Ey_th = rho0 / eps_0 * y / 2.
Ey_th[abs(y)>=R] = rho0 / eps_0 * R**2/(2*y[abs(y)>R])
jz_th = np.ones_like(x) * jz0
jz_th[abs(x)>=R] = 0.
rho_th = np.ones_like(x) * rho0
rho_th[abs(x)>=R] = 0.
# Plot simulation result and theory
if args.do_plot:
matplotlib.rcParams.update({'font.size': 14})
plt.figure(figsize=(12,4))
if not args.norm_units:
plt.subplot(131)
plt.plot(1.e6*y, Bx_sim, '+-', label='Hipace++')
plt.plot(1.e6*y, Bx_th, 'k--', label='theory')
plt.grid()
plt.legend()
plt.xlim(-50., 50.)
plt.xlabel('y (um)')
plt.ylabel('Bx (T)')
plt.subplot(132)
plt.plot(1.e6*x, By_sim, '+-', label='Hipace++')
plt.plot(1.e6*x, By_th, 'k--', label='theory')
plt.grid()
plt.legend()
plt.xlim(-50., 50.)
plt.xlabel('x (um)')
plt.ylabel('By (T)')
plt.subplot(133)
plt.plot(1.e6*x, jz_sim, '+-', label='Hipace++')
plt.plot(1.e6*x, jz_th, 'k--', label='theory')
plt.grid()
plt.legend()
plt.xlim(-50., 50.)
plt.xlabel('x (um)')
plt.ylabel('jz (A/m2)')
else:
plt.subplot(131)
plt.plot(y, Bx_sim, '+-', label='Hipace++')
plt.plot(y, Bx_th, 'k--', label='theory')
plt.grid()
plt.legend()
plt.xlim(-5., 5.)
plt.xlabel('kp y')
plt.ylabel('c Bx / E0')
plt.subplot(132)
plt.plot(x, By_sim, '+-', label='Hipace++')
plt.plot(x, By_th, 'k--', label='theory')
plt.grid()
plt.legend()
plt.xlim(-5., 5.)
plt.xlabel('kp x')
plt.ylabel('c By / E0')
plt.subplot(133)
plt.plot(x, jz_sim, '+-', label='Hipace++')
plt.plot(x, jz_th, 'k--', label='theory')
plt.grid()
plt.legend()
plt.xlim(-5., 5.)
plt.xlabel('kp x')
plt.ylabel('jz /IA')
plt.tight_layout()
plt.savefig("beam_in_vacuum.png", bbox_inches="tight")
# Assert that the simulation result is close enough to theory
error_jz = np.sum((jz_sim-jz_th)**2) / np.sum((jz_th)**2)
print("total relative error jz: " + str(error_jz) + " (tolerance = 0.1)")
error_Bx =
|
np.sum((Bx_sim-Bx_th)**2)
|
numpy.sum
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph algorithm generators.
Currently implements the following:
- Depth-first search (Moore, 1959)
- Breadth-first search (Moore, 1959)
- Topological sorting (Knuth, 1973)
- Articulation points
- Bridges
- Kosaraju's strongly-connected components (Aho et al., 1974)
- Kruskal's minimum spanning tree (Kruskal, 1956)
- Prim's minimum spanning tree (Prim, 1957)
- Bellman-Ford's single-source shortest path (Bellman, 1958)
- Dijkstra's single-source shortest path (Dijkstra, 1959)
- DAG shortest path
- Floyd-Warshall's all-pairs shortest paths (Floyd, 1962)
- Edmonds-Karp bipartite matching (Edmund & Karp, 1972)
See "Introduction to Algorithms" 3ed (CLRS3) for more information.
"""
# pylint: disable=invalid-name
from typing import Tuple
import chex
from clrs._src import probing
from clrs._src import specs
import numpy as np
_Array = np.ndarray
_Out = Tuple[_Array, probing.ProbesDict]
_OutputClass = specs.OutputClass
def dfs(A: _Array) -> _Out:
"""Depth-first search (Moore, 1959)."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['dfs'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
color = np.zeros(A.shape[0], dtype=np.int32)
pi = np.arange(A.shape[0])
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
pi[v] = u
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
break
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)})
probing.finalize(probes)
return pi, probes
def bfs(A: _Array, s: int) -> _Out:
"""Breadth-first search (Moore, 1959)."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['bfs'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
's': probing.mask_one(s, A.shape[0]),
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
reach = np.zeros(A.shape[0])
pi = np.arange(A.shape[0])
reach[s] = 1
while True:
prev_reach = np.copy(reach)
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'reach_h': np.copy(prev_reach),
'pi_h': np.copy(pi)
})
for i in range(A.shape[0]):
for j in range(A.shape[0]):
if A[i, j] > 0 and prev_reach[i] == 1:
if pi[j] == j and j != s:
pi[j] = i
reach[j] = 1
if np.all(reach == prev_reach):
break
probing.push(probes, specs.Stage.OUTPUT, next_probe={'pi': np.copy(pi)})
probing.finalize(probes)
return pi, probes
def topological_sort(A: _Array) -> _Out:
"""Topological sorting (Knuth, 1973)."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['topological_sort'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
color = np.zeros(A.shape[0], dtype=np.int32)
topo = np.arange(A.shape[0])
s_prev = np.arange(A.shape[0])
topo_head = 0
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
while True:
if color[u] == 0:
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
break
if s_last == u:
color[u] = 2
if color[topo_head] == 2:
topo[u] = topo_head
topo_head = u
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'topo_h': np.copy(topo),
'topo_head_h': probing.mask_one(topo_head, A.shape[0]),
'color': probing.array_cat(color, 3),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0])
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(
probes,
specs.Stage.OUTPUT,
next_probe={
'topo': np.copy(topo),
'topo_head': probing.mask_one(topo_head, A.shape[0])
})
probing.finalize(probes)
return topo, probes
def articulation_points(A: _Array) -> _Out:
"""Articulation points."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['articulation_points'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
color = np.zeros(A.shape[0], dtype=np.int32)
pi = np.arange(A.shape[0])
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
low = np.zeros(A.shape[0])
child_cnt = np.zeros(A.shape[0])
is_cut = np.zeros(A.shape[0])
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
low[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
pi[v] = u
color[v] = 1
s_prev[v] = s_last
s_last = v
child_cnt[u] += 0.01
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
break
elif v != pi[u]:
low[u] = min(low[u], d[v])
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
for v in range(A.shape[0]):
if pi[v] == u:
low[u] = min(low[u], low[v])
if pi[u] != u and low[v] >= d[u]:
is_cut[u] = 1
if pi[u] == u and child_cnt[u] > 0.01:
is_cut[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_cut_h': np.copy(is_cut),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
'child_cnt': np.copy(child_cnt),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(
probes,
specs.Stage.OUTPUT,
next_probe={'is_cut': np.copy(is_cut)},
)
probing.finalize(probes)
return is_cut, probes
def bridges(A: _Array) -> _Out:
"""Bridges."""
chex.assert_rank(A, 2)
probes = probing.initialize(specs.SPECS['bridges'])
A_pos = np.arange(A.shape[0])
adj = probing.graph(np.copy(A))
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': adj
})
color = np.zeros(A.shape[0], dtype=np.int32)
pi = np.arange(A.shape[0])
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
low = np.zeros(A.shape[0])
is_bridge = (
np.zeros((A.shape[0], A.shape[0])) + _OutputClass.MASKED.value + adj)
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
low[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
pi[v] = u
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
break
elif v != pi[u]:
low[u] = min(low[u], d[v])
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_last == u:
color[u] = 2
time += 0.01
f[u] = time
for v in range(A.shape[0]):
if pi[v] == u:
low[u] = min(low[u], low[v])
if low[v] > d[u]:
is_bridge[u, v] = 1
is_bridge[v, u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'is_bridge_h': np.copy(is_bridge),
'pi_h': np.copy(pi),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
'low': np.copy(low),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time
})
if s_prev[u] == u:
assert s_prev[s_last] == s_last
break
pr = s_prev[s_last]
s_prev[s_last] = s_last
s_last = pr
u = s_last
probing.push(
probes,
specs.Stage.OUTPUT,
next_probe={'is_bridge': np.copy(is_bridge)},
)
probing.finalize(probes)
return is_bridge, probes
def strongly_connected_components(A: _Array) -> _Out:
"""Kosaraju's strongly-connected components (Aho et al., 1974)."""
chex.assert_rank(A, 2)
probes = probing.initialize(
specs.SPECS['strongly_connected_components'])
A_pos = np.arange(A.shape[0])
probing.push(
probes,
specs.Stage.INPUT,
next_probe={
'pos': np.copy(A_pos) * 1.0 / A.shape[0],
'A': np.copy(A),
'adj': probing.graph(np.copy(A))
})
scc_id = np.arange(A.shape[0])
color = np.zeros(A.shape[0], dtype=np.int32)
d = np.zeros(A.shape[0])
f = np.zeros(A.shape[0])
s_prev = np.arange(A.shape[0])
time = 0
A_t = np.transpose(A)
for s in range(A.shape[0]):
if color[s] == 0:
s_last = s
u = s
v = s
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 0
})
while True:
if color[u] == 0 or d[u] == 0.0:
time += 0.01
d[u] = time
color[u] = 1
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev': np.copy(s_prev),
's': probing.mask_one(s, A.shape[0]),
'u': probing.mask_one(u, A.shape[0]),
'v': probing.mask_one(v, A.shape[0]),
's_last': probing.mask_one(s_last, A.shape[0]),
'time': time,
'phase': 0
})
for v in range(A.shape[0]):
if A[u, v] != 0:
if color[v] == 0:
color[v] = 1
s_prev[v] = s_last
s_last = v
probing.push(
probes,
specs.Stage.HINT,
next_probe={
'scc_id_h': np.copy(scc_id),
'A_t': probing.graph(np.copy(A_t)),
'color': probing.array_cat(color, 3),
'd': np.copy(d),
'f': np.copy(f),
's_prev':
|
np.copy(s_prev)
|
numpy.copy
|
"""
Main Data Structure of the topoGenesis
"""
import numpy as np
import pyvista as pv
import itertools
import concurrent.futures
import warnings
import os
__author__ = "<NAME>, and <NAME>"
__copyright__ = "???"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "???"
__version__ = "0.0.2"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Dev"
file_directory = os.path.dirname(os.path.abspath(__file__))
class lattice(np.ndarray):
def __new__(subtype, bounds, unit=1, dtype=float, buffer=None, offset=0,
strides=None, order=None, default_value=None):
# extracting min and max from bound and discrtizing it
bounds = np.array(bounds)
minbound = np.rint(bounds[0] / unit).astype(int)
maxbound = np.rint(bounds[1] / unit).astype(int)
bounds = np.array([minbound, maxbound])*unit
# unit np array
unit = np.array(unit)
# raise value error if the size of unit is neighter 1 nor the length of the minimum
if unit.size != 1 and unit.size != minbound.size:
raise ValueError(
'the length of unit array needs to be either 1 or equal to the min/max arrays')
# calculating shape based on bounds and unit
shape = 1 + maxbound - minbound
# set default value
if default_value != None:
buffer = np.tile(
default_value, shape)
#obj = obj * 0 + default_value
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to lattice.__array_finalize__
obj = super(lattice, subtype).__new__(subtype, shape, dtype,
buffer, offset, strides,
order)
# set the 'bounds' attribute
obj.bounds = bounds
# set the attribute 'unit' to itself if it has the same size as the minimum,
# if the size is 1, tile it with the size of minimum vector
obj.unit = unit if unit.size == minbound.size else np.tile(
unit, minbound.size)
# init an empty connectivity
obj.connectivity = None
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(lattice, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. lattice():
# obj is None
# (we're in the middle of the lattice.__new__
# constructor, and self.bounds will be set when we return to
# lattice.__new__)
if obj is None:
return
# From view casting - e.g arr.view(lattice):
# obj is arr
# (type(obj) can be lattice)
# From new-from-template - e.g lattice[:3]
# type(obj) is lattice
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'bounds', because this
# method sees all creation of default objects - with the
# lattice.__new__ constructor, but also with
# arr.view(lattice).
self.bounds = getattr(obj, 'bounds', None)
self.bounds = getattr(obj, 'bounds', None)
self.unit = getattr(obj, 'unit', None)
self.connectivity = getattr(obj, 'connectivity', None)
# We do not need to return anything
@property
def minbound(self):
return self.bounds[0]
@property
def maxbound(self):
return self.bounds[1]
@property
def centroids(self):
# extract the indices of the True values # with sparse matrix we don't need to search
point_array = np.argwhere(self == True)
# convert to float
point_array = point_array.astype(float)
# multiply by unit
point_array *= self.unit
# move to minimum
point_array += self.minbound
# return as a point cloud
return cloud(point_array, dtype=float)
def fast_vis(self, plot, show_outline=True, show_centroids=True):
# Set the grid dimensions: shape + 1 because we want to inject our values on the CELL data
grid = pv.UniformGrid()
grid.dimensions = np.array(self.shape) + 1
# The bottom left corner of the data set
grid.origin = self.minbound - self.unit * 0.5
grid.spacing = self.unit # These are the cell sizes along each axis
# Add the data values to the cell data
grid.cell_arrays["values"] = self.flatten(
order="F").astype(float) # Flatten the array!
# filtering the voxels
threshed = grid.threshold([0.9, 1.1])
# adding the voxels: light red
plot.add_mesh(threshed, show_edges=True, color="#ff8fa3",
opacity=0.3, label="Cells")
if show_outline:
# adding the boundingbox wireframe
plot.add_mesh(grid.outline(), color="grey", label="Domain")
if show_centroids:
# adding the voxel centeroids: red
plot.add_mesh(pv.PolyData(self.centroids), color='#ff244c', point_size=5,
render_points_as_spheres=True, label="Cell Centroidss")
return plot
def fast_notebook_vis(self, plot, show_outline=True, show_centroids=True):
# Set the grid dimensions: shape + 1 because we want to inject our values on the CELL data
grid = pv.UniformGrid()
grid.dimensions = np.array(self.shape) + 1
# The bottom left corner of the data set
grid.origin = self.minbound - self.unit * 0.5
grid.spacing = self.unit # These are the cell sizes along each axis
# Add the data values to the cell data
grid.cell_arrays["values"] = self.flatten(
order="F").astype(float) # Flatten the array!
# filtering the voxels
threshed = grid.threshold([0.9, 1.1])
# adding the voxels: light red
plot.add_mesh(threshed, color="#ff8fa3", opacity=0.3)
# plot.add_mesh(threshed, show_edges=True, color="#ff8fa3", opacity=0.3, label="Cells")
if show_outline:
# adding the boundingbox wireframe
plot.add_mesh(grid.outline(), color="grey")
# plot.add_mesh(grid.outline(), color="grey", label="Domain")
if show_centroids:
# adding the voxel centeroids: red
plot.add_points(pv.PolyData(self.centroids), color='#ff244c')
# plot.add_mesh(pv.PolyData(self.centroids), color='#ff244c', point_size=5, render_points_as_spheres=True, label="Cell Centroidss")
return plot
def boolean_marching_cubes(self):
# construct the boolean_marching_cubes stencil
mc_stencil = create_stencil("boolean_marching_cube", 1)
# getting shifts by expanding the stencil in the Fortran Order
shifts = mc_stencil.expand('F')
# pad the volume with zero in every direction
# TODO make this an option instead of default
volume = np.pad(self, (1, 1), mode='constant', constant_values=(0, 0))
# the id of voxels (0,1,2, ... n)
volume_inds = np.arange(volume.size).reshape(volume.shape)
# gattering all the replacements in the collumns
replaced_columns = [
np.roll(volume_inds, shift, np.arange(3)).ravel() for shift in shifts]
# stacking the columns
cell_corners = np.stack(replaced_columns, axis=-1)
# converting volume value (TODO: this needs to become a method of its own)
volume_flat = volume.ravel()
volume_flat[volume_flat > 0.5] = 1
volume_flat[volume_flat < 0.5] = 0
# replace neighbours by their value in volume
neighbor_values = volume_flat[cell_corners]
# computing the cell tile id
# the powers of 2 in an array
legend = 2**np.arange(8)
# multiply the corner with the power of two, sum them, and reshape to the original volume shape
tile_id = np.sum(legend * neighbor_values,
axis=1).reshape(volume.shape)
# drop the last column, row and page (since cube-grid is 1 less than the voxel grid in every dimension)
# TODO consider that removing padding would eliminate the need for this line
cube_grid = tile_id[:-1, :-1, :-1]
# initializing the lattice
cube_lattice = lattice([self.minbound, self.maxbound + self.unit],
unit=self.unit, dtype=np.uint8, buffer=cube_grid, default_value=False)
# set the values that are bigger than 0 (transfering values)
cube_lattice[cube_grid > 0] = cube_grid[cube_grid > 0]
return cube_lattice
def find_connectivity(self, stencil):
raise NotImplementedError
class cloud(np.ndarray):
def __new__(subtype, point_array, dtype=float, buffer=None, offset=0,
strides=None, order=None):
# extracting the shape from point_array
shape = point_array.shape
# using the point_array as the buffer
buffer = point_array.flatten(order="C")
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to cloud.__array_finalize__
obj = super(cloud, subtype).__new__(subtype, shape, dtype,
buffer, offset, strides,
order)
# set the 'bounds' attribute
obj.bounds = np.array([obj.min(axis=0), obj.max(axis=0)])
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(cloud, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. cloud():
# obj is None
# (we're in the middle of the cloud.__new__
# constructor, and self.bounds will be set when we return to
# cloud.__new__)
if obj is None:
return
# From view casting - e.g arr.view(cloud):
# obj is arr
# (type(obj) can be cloud)
# From new-from-template - e.g cloud[:3]
# type(obj) is cloud
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'bounds', because this
# method sees all creation of default objects - with the
# cloud.__new__ constructor, but also with
# arr.view(cloud).
self.bounds = getattr(obj, 'bounds', None)
# We do not need to return anything
@property
def minbound(self):
return self.bounds[0]
@property
def maxbound(self):
return self.bounds[1]
def regularize(self, unit, **kwargs):
"""[summary]
Arguments:
unit {[float or array of floats]} -- [the unit separation between cells of lattice]
Keyword Arguments:
closed {[Boolean]} -- [False by default. If the cell intervals are closed intervals or not.]
Raises:
ValueError: [unit needs to be either a float or an array of floats that has the same dimension of the points in the point cloud]
Returns:
[lattice] -- [a boolian latice representing the rasterization of point cloud]
"""
####################################################
# INPUTS
####################################################
unit = np.array(unit)
if unit.size != 1 and unit.size != self.bounds.shape[1]:
raise ValueError(
'the length of unit array needs to be either 1 or equal to the dimension of point cloud')
elif unit.size == 1:
unit = np.tile(unit, (1, self.bounds.shape[1]))
closed = kwargs.get('closed', False)
####################################################
# PROCEDURE
####################################################
if closed:
# retrieve the identity matrix as a list of main axes
axes = np.identity(unit.size).astype(int)
# R3 to Z3 : finding the closest voxel to each point
point_scaled = self / unit
# shift the hit points in each 2-dimension (n in 1-axes) backward and formard (s in [-1,1]) and rint all the possibilities
vox_ind = [np.rint(point_scaled + unit * n * s * 0.001)
for n in (1-axes) for s in [-1, 1]]
print([unit * n * s for n in (1-axes) for s in [-1, 1]])
vox_ind = np.vstack(vox_ind)
print("close")
else:
vox_ind = np.rint(self / unit).astype(int)
print("here")
# removing repetitions
unique_vox_ind = np.unique(vox_ind, axis=0).astype(int)
# mapping the voxel indices to real space
reg_pnt = unique_vox_ind * unit
# initializing the volume
l = lattice([self.minbound, self.maxbound], unit=unit,
dtype=bool, default_value=False)
# map the indices to start from zero
mapped_ind = unique_vox_ind - np.rint(l.bounds[0]/l.unit).astype(int)
# setting the occupied voxels to True
l[mapped_ind[:, 0], mapped_ind[:, 1], mapped_ind[:, 2]] = True
####################################################
# OUTPUTS
####################################################
return l
def fast_vis(self, plot, color='#2499ff'):
# adding the original point cloud: blue
plot.add_mesh(pv.PolyData(self), color=color, point_size=3,
render_points_as_spheres=True, label="Point Cloud")
return plot
def fast_notebook_vis(self, plot, color='#2499ff'):
# adding the original point cloud: blue
plot.add_points(pv.PolyData(self), color=color)
return plot
class stencil(np.ndarray):
def __new__(subtype, point_array, ntype="Custom", origin=np.array([0, 0, 0]), dtype=int, buffer=None, offset=0,
strides=None, order=None):
# extracting the shape from point_array
shape = point_array.shape
# using the point_array as the buffer
buffer = point_array.flatten(order="C")
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to stencil.__array_finalize__
obj = super(stencil, subtype).__new__(subtype, shape, dtype,
buffer, offset, strides,
order)
# set the neighbourhood type
obj.ntype = ntype
# set the origin
obj.origin = origin
# set the 'bounds' attribute
shape_arr = np.array(shape)
obj.bounds = np.array([shape_arr * 0, shape_arr - 1]) - origin
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(stencil, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. stencil():
# obj is None
# (we're in the middle of the stencil.__new__
# constructor, and self.bounds will be set when we return to
# stencil.__new__)
if obj is None:
return
# From view casting - e.g arr.view(stencil):
# obj is arr
# (type(obj) can be stencil)
# From new-from-template - e.g stencil[:3]
# type(obj) is stencil
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'properties', because this
# method sees all creation of default objects - with the
# stencil.__new__ constructor, but also with
# arr.view(stencil).
self.bounds = getattr(obj, 'bounds', None)
self.ntype = getattr(obj, 'ntype', None)
self.origin = getattr(obj, 'origin', None)
# We do not need to return anything
def __array_wrap__(self, array, context=None):
temp = np.array(array)
# checking if the array has any value other than 0, and 1
np.place(temp, temp > 0.5, [1])
np.place(temp, temp < 0.5, [0])
return stencil(temp, ntype="custom", origin=self.origin)
@property
def minbound(self):
return self.bounds[0]
@property
def maxbound(self):
return self.bounds[1]
def expand(self, sort="dist"):
# list the locations
locations = self.origin - np.argwhere(self)
# check the sorting method
if sort == "dist": # Sorted Based on the distance from origin
# calculating the distance of each neighbour
sums = np.abs(locations).sum(axis=1)
# sorting to identify the main cell
order = np.argsort(sums)
elif sort == "F": # Fortran Sort, used for Boolean Marching Cubes
order = np.arange(self.size).reshape(self.shape).flatten('F')
# sort and return
return locations[order].astype(int)
def set_index(self, index, value):
ind = np.array(index) + self.origin
if ind.size != 3:
raise ValueError(" the index needs to have three components")
self[ind[0], ind[1], ind[2]] = value
def create_stencil(type_str, steps, clip=None):
# check if clip is specified. if it is not, set it to the steps
if clip == None:
clip = steps
# von neumann neighborhood
if type_str == "von_neumann":
# https://en.wikipedia.org/wiki/Von_Neumann_neighborhood
# claculating all the possible shifts to apply to the array
shifts = np.array(list(itertools.product(
list(range(-clip, clip+1)), repeat=3)))
# the number of steps that the neighbour is appart from the cell (setp=1 : 6 neighbour, step=2 : 18 neighbours, step=3 : 26 neighbours)
shift_steps = np.sum(np.absolute(shifts), axis=1)
# check the number of steps
chosen_shift_ind = np.argwhere(shift_steps <= steps).ravel()
# select the valid indices from shifts variable, transpose them to get separate indicies in rows, add the number of steps to make this an index
locs = np.transpose(shifts[chosen_shift_ind]) + clip
# inilize the stencil
s = np.zeros((clip*2+1, clip*2+1, clip*2+1)).astype(int)
# fill in the stencil
s[locs[0], locs[1], locs[2]] = 1
return stencil(s, ntype=type_str, origin=np.array([clip, clip, clip]))
elif type_str == "moore":
# https://en.wikipedia.org/wiki/Moore_neighborhood
# claculating all the possible shifts to apply to the array
shifts = np.array(list(itertools.product(
list(range(-clip, clip+1)), repeat=3)))
# the number of steps that the neighbour is appart from the origin cell
shift_steps = np.max(np.absolute(shifts), axis=1)
# check the number of steps
chosen_shift_ind = np.argwhere(shift_steps <= steps).ravel()
# select the valid indices from shifts variable, transpose them to get separate indicies in rows, add the number of steps to make this an index
locs = np.transpose(shifts[chosen_shift_ind]) + clip
# inilize the stencil
s = np.zeros((clip*2+1, clip*2+1, clip*2+1)).astype(int)
# fill in the stencil
s[locs[0], locs[1], locs[2]] = 1
return stencil(s, ntype=type_str, origin=np.array([clip, clip, clip]))
elif type_str == "boolean_marching_cube":
# # shifts to check 8 corner of cube (multiply by -1 since shift goes backward)
# shifts = np.array([
# [0, 0, 0], # 1
# [1, 0, 0], # 2
# [0, 1, 0], # 4
# [1, 1, 0], # 8
# [0, 0, 1], # 16
# [1, 0, 1], # 32
# [0, 1, 1], # 64
# [1, 1, 1] # 128
# ])*-1
# # the number of steps that the neighbour is appart from the origin cell
# shift_steps = np.max(np.absolute(shifts), axis=1)
# # check the number of steps
# chosen_shift_ind = np.argwhere(shift_steps <= steps).ravel()
# # select the valid indices from shifts variable, transpose them to get separate indicies in rows, add the number of steps to make this an index
# locs = np.transpose(shifts[chosen_shift_ind]) + clip
# inilize the stencil
s = np.ones((2, 2, 2)).astype(int)
# # fill in the stencil
# s[locs[0], locs[1], locs[2]] = 1
return stencil(s, ntype=type_str, origin=np.array([0, 0, 0]))
else:
raise ValueError(
'non-valid neighborhood type for stencil creation')
def scatter(bounds, count):
"""[summary]
Arguments:
bounds {[2d array]} -- [array of two vectors, indicating the bounding box of the scattering envelope with a minimum and maximum of the bounding box]
count {[int]} -- [number of the points to scatter within the bounding box]
Returns:
[cloud] -- [returns a cloud object countaing the coordinates of the scattered points]
"""
point_array = np.random.uniform(
bounds[0], bounds[1], (count, bounds.shape[1]))
return cloud(point_array)
def cloud_from_csv(file_path, delimiter=','):
point_array = np.genfromtxt(file_path, delimiter=delimiter)
return cloud(point_array)
def lattice_from_csv(file_path):
# read the voxel 3-dimensional indices
ind_flat = np.genfromtxt(file_path, delimiter=',',
skip_header=8, usecols=(0, 1, 2)).astype(int)
# read the voxel values
vol_flat = np.genfromtxt(
file_path, delimiter=',', skip_header=8, usecols=(3)).astype(int)
# read volume meta data
meta = np.genfromtxt(
file_path, delimiter='-', skip_header=1, max_rows=3, usecols=(1, 2, 3))
unit = meta[0]
min_bound = meta[1]
volume_shape = meta[2].astype(int)
max_bound = min_bound + unit * volume_shape
# reshape the 1d array to get 3d array
vol = vol_flat.reshape(volume_shape)
# initializing the lattice
l = lattice([min_bound, max_bound], unit=unit,
dtype=bool, default_value=False)
# setting the latice equal to volume
l[ind_flat[:, 0], ind_flat[:, 1], ind_flat[:, 2]
] = vol[ind_flat[:, 0], ind_flat[:, 1], ind_flat[:, 2]]
return l
def find_neighbours(lattice, stencil):
# flatten the lattice
lattice_flat = lattice.ravel()
# the id of voxels (0,1,2, ... n)
lattice_inds = np.arange(lattice.size).reshape(lattice.shape)
# removing the indecies that are not filled in the volume
lattice_inds = ((lattice_inds + 1) * lattice) - 1
# offset the 1-dimensional indices of the voxels that is rshaped to volume shape with value -1
lattice_inds_paded = np.pad(lattice_inds, (1, 1), mode='constant',
constant_values=(-1, -1))
# flatten
lattice_inds_paded_flat = lattice_inds_paded.ravel()
# index of padded cells in flatten
origin_flat_ind = np.argwhere(lattice_inds_paded_flat != -1).ravel()
# retrievig all the possible shifts corresponding to the neighbours defined in stencil
shifts = expand_stencil(stencil)
# gattering all the replacements in the collumns
replaced_columns = [
np.roll(lattice_inds_paded, shift, np.arange(3)).ravel() for shift in shifts]
# stacking the columns and removing the pads (and also removing the neighbours of the empty voxels since we have tagged them -1 like paddings)
cell_neighbors = np.stack(replaced_columns, axis=-1)[origin_flat_ind]
return cell_neighbors
def marching_cube_vis(p, cube_lattice, style_str):
# extract cube indicies
cube_ind = np.transpose(np.indices(cube_lattice.shape),
(1, 2, 3, 0)).reshape(-1, 3)
# extract cube positions
cube_pos = (cube_ind - 0.5) * cube_lattice.unit + cube_lattice.minbound
# extract cube tid
cube_tid = cube_lattice.ravel()
# remove the cube position and tid where tid is 0
filled_cube_pos = cube_pos[cube_tid > 0]
filled_cube_tid = cube_tid[cube_tid > 0]
if style_str != "chamfer":
raise ValueError(
"Meshing style is not valid. Valid styles are: ['chamfer']")
# load tiles
tiles = [0]
for i in range(1, 256):
tile_path = os.path.join(
file_directory, "resources/mc_tiles", style_str, "Tile_{0:03d}.obj".format(i))
tile = pv.read(tile_path)
tile.points *= cube_lattice.unit
tiles.append(tile)
new_points = tiles[filled_cube_tid[0]].points + filled_cube_pos[0]
new_faces = tiles[filled_cube_tid[0]].faces.reshape(-1, 4)
# merge tiles
for i in range(1, filled_cube_tid.size):
tile = tiles[filled_cube_tid[i]]
# add the faces list, changing the point numbers
new_faces = np.concatenate(
(new_faces, tile.faces.reshape(-1, 4) + np.array([0, 1, 1, 1])*new_points.shape[0]), axis=0)
# add the new points, change the position based on the location
new_points = np.concatenate(
(new_points, tile.points + filled_cube_pos[i]), axis=0)
# construct the new mesh and add it to plot
new_tile = pv.PolyData(new_points, new_faces)
p.add_mesh(new_tile, color='#abd8ff')
return p
def mesh_sampling(mesh, unit, tol=1e-06, **kwargs):
"""This algorithm samples a mesh based on unit size
Args:
geo_mesh ([COMPAS Mesh]): [description]
unit ([numpy array]): [Unit represents the unit size in the sampling grid. It needs to be one float value or an array-like with three elements. In case that a scalar is given it will used for all three dimensions]
tol ([type], optional): [description]. Defaults to 1e-06.
Returns:
[type]: [description]
"""
####################################################
# INPUTS
####################################################
unit =
|
np.array(unit)
|
numpy.array
|
import numpy as np
import numexpr as ne
def score(Y):
'''
Returns the score function evaluated for each sample
'''
return ne.evaluate('tanh(Y / 2)')
def score_der(psiY):
'''
Returns the derivative of the score
'''
return ne.evaluate('(- psiY ** 2 + 1.) / 2.')
def loss(Y, W):
'''
Computes the loss function for (Y, W)
'''
T = Y.shape[1]
log_det = np.linalg.slogdet(W)[1]
logcoshY = np.sum(ne.evaluate('abs(Y) + 2. * log1p(exp(-abs(Y)))'))
return - log_det + logcoshY / float(T)
def gradient(Y, psiY):
'''
Returns the gradient at Y, using the score psiY
'''
N, T = Y.shape
return np.inner(psiY, Y) / float(T) - np.eye(N)
def compute_h(Y, psidY, precon=2):
'''
Returns the diagonal coefficients of H 1/ H2 in a N x N matrix
'''
N, T = Y.shape
if precon == 2:
return np.inner(psidY, Y ** 2) / float(T)
else:
Y_squared = Y ** 2
sigma2 = np.mean(Y_squared, axis=1)
psidY_mean = np.mean(psidY, axis=1)
h1 = psidY_mean[:, None] * sigma2[None, :]
diagonal_term = np.mean(Y_squared * psidY)
h1[np.diag_indices_from(h1)] = diagonal_term
return h1
def regularize_h(h, lambda_min, mode=0):
'''
Regularizes the hessian approximation h using the constant lambda_min.
Mode selects the regularization algorithm
0 -> Shift each eigenvalue below lambda_min to lambda_min
1 -> add lambda_min x Id to h
'''
if mode == 0:
# Compute the eigenvalues of the Hessian
eigenvalues = 0.5 * (h + h.T - np.sqrt((h-h.T) ** 2 + 4.))
# Regularize
problematic_locs = eigenvalues < lambda_min
|
np.fill_diagonal(problematic_locs, False)
|
numpy.fill_diagonal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 17:26:25 2019
@author: ben
"""
import numpy as np
import pointCollection as pc
import os
import h5py
class data(pc.data):
def __init__(self, bin_W=[1.e4, 1.e4], **kwargs):
self.bin_W=bin_W
super().__init__(**kwargs)
def to_file(self, D, out_file, time_field='time', append=True, ind_fields=['x','y','time']):
y_bin_function=np.round(D.y/self.bin_W[0])
x_bin_function=np.round(D.x/self.bin_W[1])
# 6/23/21: added +1 to the scales (so that max(x_bin_fn)-min(x_bin_fn)<x_scale)
x_scale=np.nanmax(x_bin_function)-np.nanmin(x_bin_function)+1
t=getattr(D, time_field)
t_scale=np.nanmax(t)-np.nanmin(t)+1
xy_bin_function=(y_bin_function-np.nanmin(y_bin_function))*x_scale+(x_bin_function-np.nanmin(x_bin_function))
xyt_bin_function= xy_bin_function + (t-np.nanmin(t))/t_scale
ind=np.argsort(xyt_bin_function)
bin_dict={}
xy_bin_fn_sort=xy_bin_function[ind]
fn_delta=np.concatenate([[-1], np.flatnonzero(np.diff(xy_bin_fn_sort)), [xy_bin_fn_sort.size]])
for ii in range(len(fn_delta)-1):
this_ind=ind[(fn_delta[ii]+1):(fn_delta[ii+1]+1)]
bin_dict[(x_bin_function[this_ind[0]], y_bin_function[this_ind[0]])]=this_ind
key_arr=np.array([key for key in bin_dict.keys()])
key_order=np.argsort(key_arr[:,1]-
|
np.min(key_arr[:,1])
|
numpy.min
|
from os.path import join, dirname
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, Slider, TextInput
from bokeh.plotting import figure
import os
#os.chdir(r'C:\Users\user\Desktop\POSMOT\DISSERTACAO\ARTIGO_PERIODIC_BEAM')
def beam(lC1, lC2, wi, hC1, hC2):
Ia = wi*hC1**3/12
Ib = wi*hC2**3/12
Aa = wi*hC1
Ab = wi*hC2
E = 69e9
La = lC1/5
Lb = lC2/5
rho = 2700
N = 1000
omega = np.linspace(0,50000,N)
#freq = omega/(2*np.pi)
def periodic_beam(w, E, I, L, A, rho):
K = (E*I/L**3)* np.array([[12,6*L,-12,6*L],[6*L,4*L**2,-6*L,2*L**2],[-12,-6*L,12,-6*L],[6*L,2*L**2,-6*L,4*L**2]])
M = (rho*A*L/420)*np.array([[156,22*L,54,-13*L],[22*L,4*L**2,13*L,-3*L**2],[54,13*L,156,-22*L],[-13*L,-3*L**2,-22*L,4*L**2]])
D = K - w**2*M
Dlr = np.array([[ D[0][2], D[0][3]], [D[1][2], D[1][3]]])
Drl = np.array([[ D[2][0], D[2][1]], [D[3][0], D[3][1]]])
Dll = np.array([[ D[0][0], D[0][1]], [D[1][0], D[1][1]]])
Drr = np.array([[ D[2][2], D[2][3]], [D[3][2], D[3][3]]])
T1 = np.matmul(-np.linalg.inv(Dlr) ,Dll)
T2 = np.linalg.inv(Dlr)
T3 = np.matmul(np.matmul(Drr ,np.linalg.inv(Dlr)) , Dll) - Drl
T4 = np.matmul(-Drr , np.linalg.inv(Dlr))
T = np.zeros(shape=(4,4), dtype=complex)
T[0:2, 0:2] = T1
T[0:2, 2:4] = T2
T[2:4, 0:2] = T3
T[2:4, 2:4] = T4
return T
final = np.zeros(shape=(N,4), dtype=complex)
i = 0
for w in omega:
eq1= periodic_beam(w, E, Ia, La, Aa, rho)
eq2 = periodic_beam(w, E ,Ib, Lb, Ab,rho)
Tfinal = eq1@eq1@ eq1 @ eq1 @ eq1@ eq2 @ eq2@eq2@eq2@eq2
eig_final = np.linalg.eigvals(Tfinal)
eig_final =
|
np.sort(eig_final)
|
numpy.sort
|
# Runs various registration algorithms
# start with the setup
import importlib.util
import os
import sys
sys.path.insert(0,os.path.abspath('..'))
sys.path.insert(0,os.path.abspath('../mermaid'))
sys.path.insert(0,os.path.abspath('../mermaid/libraries'))
import unittest
import torch
from mermaid.data_wrapper import AdaptVal
import numpy as np
import numpy.testing as npt
import random
import mermaid.example_generation as eg
import mermaid.module_parameters as pars
import mermaid.multiscale_optimizer as MO
import mermaid.smoother_factory as SF
try:
importlib.util.find_spec('HtmlTestRunner')
foundHTMLTestRunner = True
import HtmlTestRunner
except ImportError:
foundHTMLTestRunner = False
# test it
class Test_registration_algorithms(unittest.TestCase):
def createImage(self,ex_len=64):
example_img_len = ex_len
dim = 2
szEx = np.tile(example_img_len, dim) # size of the desired images: (sz)^dim
I0, I1, self.spacing = eg.CreateSquares(dim).create_image_pair(szEx,self.params) # create a default image size with two sample squares
self.sz = np.array(I0.shape)
# create the source and target image as pyTorch variables
self.ISource = AdaptVal(torch.from_numpy(I0.copy()))
self.ITarget = AdaptVal(torch.from_numpy(I1))
# smooth both a little bit
self.params[('image_smoothing', {}, 'image smoothing settings')]
self.params['image_smoothing'][('smooth_images', True, '[True|False]; smoothes the images before registration')]
self.params['image_smoothing'][('smoother', {}, 'settings for the image smoothing')]
self.params['image_smoothing']['smoother'][('gaussian_std', 0.05, 'how much smoothing is done')]
self.params['image_smoothing']['smoother'][('type', 'gaussian', "['gaussianSpatial'|'gaussian'|'diffusion']")]
cparams = self.params['image_smoothing']
s = SF.SmootherFactory(self.sz[2::], self.spacing).create_smoother(cparams)
self.ISource = s.smooth(self.ISource)
self.ITarget = s.smooth(self.ITarget)
def setUp(self):
torch.manual_seed(2019)
torch.cuda.manual_seed(2019)
np.random.seed(2019)
random.seed(2019)
def tearDown(self):
pass
def test_svf_image_single_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_svf_image_single_scale_config.json')
self.createImage( 32 )
so = MO.SimpleSingleScaleRegistration( self.ISource, self.ITarget, self.spacing, self.sz, self.params)
so.get_optimizer().set_visualization(False)
so.register()
# E=[ 1.80229616], similarityE=[ 0.71648604], regE=[ 1.08581007], relF=[ 0.0083105]
energy = so.get_energy()
npt.assert_almost_equal( energy[0], 1.7919, decimal=1 )
npt.assert_almost_equal( energy[1], 0.5309, decimal=1 )
npt.assert_almost_equal( energy[2], 1.2610, decimal=1 )
def test_lddmm_shooting_scalar_momentum_image_single_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_scalar_momentum_image_single_scale_config.json')
self.createImage()
so = MO.SimpleSingleScaleRegistration(self.ISource, self.ITarget, self.spacing, self.sz, self.params)
so.get_optimizer().set_visualization(False)
so.register()
# E = [0.03198373], similarityE = [0.0210261], regE = [0.01095762], relF = [0.]
energy = so.get_energy()
npt.assert_almost_equal(energy[0], 0.0319, decimal=4 )
npt.assert_almost_equal(energy[1], 0.0210, decimal=4 )
npt.assert_almost_equal(energy[2], 0.0110, decimal=4 )
def test_lddmm_shooting_image_single_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_image_single_scale_config.json')
self.createImage()
so = MO.SimpleSingleScaleRegistration(self.ISource, self.ITarget, self.spacing, self.sz, self.params)
so.get_optimizer().set_visualization(False)
so.register()
# E=[ 0.02896098], similarityE=[ 0.0170299], regE=[ 0.01193108], relF=[ 0.00193194]
energy = so.get_energy()
npt.assert_almost_equal(energy[0], 0.0308, decimal=2 )
npt.assert_almost_equal(energy[1], 0.0187, decimal=2 )
npt.assert_almost_equal(energy[2], 0.0121, decimal=2 )
def test_lddmm_shooting_scalar_momentum_image_multi_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_scalar_momentum_image_multi_scale_config.json')
self.createImage()
mo = MO.SimpleMultiScaleRegistration(self.ISource, self.ITarget, self.spacing, self.sz, self.params)
mo.get_optimizer().set_visualization(False)
mo.register()
# E=[ 0.03197587], similarityE=[ 0.02087387], regE=[ 0.01110199], relF=[ 0.00138645]
energy = mo.get_energy()
npt.assert_almost_equal(energy[0], 0.0318, decimal=4 )
npt.assert_almost_equal(energy[1], 0.0207, decimal=4 )
npt.assert_almost_equal(energy[2], 0.0111, decimal=4 )
def test_lddmm_shooting_image_multi_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_image_multi_scale_config.json')
self.createImage()
mo = MO.SimpleMultiScaleRegistration(self.ISource, self.ITarget, self.spacing, self.sz, self.params)
mo.get_optimizer().set_visualization(False)
mo.register()
# E = [0.04338037], similarityE = [0.03070126], regE = [0.01267911], relF = [0.01936091]
energy = mo.get_energy()
npt.assert_almost_equal(energy[0], 0.0432, decimal=4 )
npt.assert_almost_equal(energy[1], 0.0306, decimal=4 )
npt.assert_almost_equal(energy[2], 0.0127, decimal=4 )
def test_lddmm_shooting_scalar_momentum_map_multi_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_scalar_momentum_map_multi_scale_config.json')
self.createImage()
mo = MO.SimpleMultiScaleRegistration(self.ISource, self.ITarget, self.spacing, self.sz, self.params)
mo.get_optimizer().set_visualization(False)
mo.register()
# E = [0.08930502], simE = [0.08034889], regE = [0.00895613], optParE = [0.], relF = [0.03883468]
energy = mo.get_energy()
npt.assert_almost_equal(energy[0], 0.0434, decimal=4 )
npt.assert_almost_equal(energy[1], 0.0324, decimal=4 )
npt.assert_almost_equal(energy[2], 0.0110, decimal=4 )
def test_lddmm_shooting_map_multi_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_map_multi_scale_config.json')
self.createImage()
mo = MO.SimpleMultiScaleRegistration(self.ISource, self.ITarget, self.spacing, self.sz, self.params)
mo.get_optimizer().set_visualization(False)
mo.register()
# E = [0.07970674], simE = [0.06657108], regE = [0.01313565], optParE = [0.], relF = [0.02088663]
energy = mo.get_energy()
npt.assert_almost_equal(energy[0], 0.0721, decimal=4 )
npt.assert_almost_equal(energy[1], 0.0580, decimal=4 )
npt.assert_almost_equal(energy[2], 0.0141, decimal=4 )
def test_svf_map_single_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_svf_map_single_scale_config.json')
self.createImage( 32 )
so = MO.SimpleSingleScaleRegistration( self.ISource, self.ITarget, self.spacing, self.sz, self.params)
so.get_optimizer().set_visualization(False)
so.register()
# E = [36.42594528], similarityE = [16.22630882], regE = [20.19963646], relF = [0.0422723]
energy = so.get_energy()
npt.assert_almost_equal( energy[0], 16.9574, decimal=0 )
npt.assert_almost_equal( energy[1], 6.7187, decimal=0 )
npt.assert_almost_equal( energy[2], 10.2387, decimal=0 )
def test_lddmm_shooting_scalar_momentum_map_single_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_scalar_momentum_map_single_scale_config.json')
self.createImage()
so = MO.SimpleSingleScaleRegistration( self.ISource, self.ITarget, self.spacing, self.sz, self.params)
so.get_optimizer().set_visualization(False)
so.register()
# E=[ 0.04196917], similarityE=[ 0.03112457], regE=[ 0.0108446], relF=[ 5.37358646e-05]
energy = so.get_energy()
npt.assert_almost_equal( energy[0], 0.0419, decimal=4 )
npt.assert_almost_equal( energy[1], 0.0311, decimal=4 )
npt.assert_almost_equal( energy[2], 0.0108, decimal=4 )
def test_lddmm_shooting_map_single_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/test_lddmm_shooting_map_single_scale_config.json')
self.createImage()
so = MO.SimpleSingleScaleRegistration( self.ISource, self.ITarget, self.spacing, self.sz, self.params)
so.get_optimizer().set_visualization(False)
so.register()
# E = [0.05674197], similarityE = [0.04364978], regE = [0.01309219], relF = [0.01391943]
energy = so.get_energy()
npt.assert_almost_equal( energy[0], 0.0549, decimal=3)
npt.assert_almost_equal( energy[1], 0.0415, decimal=3)
npt.assert_almost_equal( energy[2], 0.0133, decimal=3)
def test_svf_scalar_momentum_image_single_scale(self):
self.params = pars.ParameterDict()
self.params.load_JSON('./json/svf_momentum_base_config.json')
self.params['model']['deformation']['use_map'] = False
self.params['model']['registration_model']['type'] = 'svf_scalar_momentum_image'
self.createImage()
so = MO.SimpleSingleScaleRegistration(self.ISource, self.ITarget, self.spacing, self.sz, self.params)
so.get_optimizer().set_visualization(False)
so.register()
# E=[0.12413108], simE=[0.11151054], regE=0.012620546855032444
energy = so.get_energy()
npt.assert_almost_equal(energy[0], 0.1242, decimal=4)
|
npt.assert_almost_equal(energy[1], 0.1116, decimal=4)
|
numpy.testing.assert_almost_equal
|
## load packages
import numpy as np
import scipy
import pickle
import os, sys, glob, time
from scipy.ndimage.interpolation import shift, map_coordinates
## Load other sub-packages
from .. import visual_tools, get_img_info, corrections, alignment_tools
from .. import _image_dtype
## Load shared parameters
from . import _distance_zxy, _image_size, _allowed_colors, _corr_channels, _correction_folder
from . import _num_buffer_frames, _num_empty_frames
from .crop import decide_starting_frames, translate_crop_by_drift
def get_num_frame(dax_filename, frame_per_color=_image_size[0], buffer_frame=10, verbose=False):
"""Function to extract image size and number of colors"""
## check input
if '.dax' not in dax_filename:
raise ValueError(
f"Wrong input type, .dax file expected for {dax_filename}")
if not os.path.isfile(dax_filename):
raise IOError(f"input file:{dax_filename} doesn't exist!")
_info_filename = dax_filename.replace('.dax', '.inf')
with open(_info_filename, 'r') as _info_hd:
_infos = _info_hd.readlines()
# get frame number and color information
_num_frame, _num_color = 0, 0
_dx, _dy = 0, 0
for _line in _infos:
_line = _line.rstrip()
if "number of frames" in _line:
_num_frame = int(_line.split('=')[1])
_num_color = (_num_frame - 2*buffer_frame) / frame_per_color
if _num_color != int(_num_color):
raise ValueError("Wrong num_color, should be integer!")
_num_color = int(_num_color)
if "frame dimensions" in _line:
_dx = int(_line.split('=')[1].split('x')[0])
_dy = int(_line.split('=')[1].split('x')[1])
_im_shape = [_num_frame, _dx, _dy]
return _im_shape, _num_color
# used by the old pipline
def multi_crop_image_fov(filename, channels, crop_limit_list,
all_channels=_allowed_colors, single_im_size=_image_size,
num_buffer_frames=10, num_empty_frames=0,
drift=np.array([0,0,0]), shift_order=1,
return_limits=False, verbose=False):
"""Function to load images for multiple cells in a fov
Inputs:
filename: .dax filename for given image, string of filename
channels: color_channels for the specific data, list of int or str
crop_limit_list: list of 2x2 or 3x2 array specifying where to crop, list of np.ndarray
all_channels: all allowed colors in given data, list (default: _allowed_colors)
single_im_size: image size for single color full image, list/array of 3 (default:[30,2048,2048])
num_buffer_frame: number of frames before z-scan starts, int (default:10)
num_empty_frames: number of empty frames at beginning of zscan, int (default: 0)
drift: drift to ref-frame of this image, np.array of 3 (default:[0,0,0])
return_limits: whether return drifted limits for cropping, bool (default: False)
verbose: say something!, bool (default:False)
Outputs:
_cropped_im_list: cropped image list by crop_limit_list x channels
list of len(crop_limit_list) x size of channels
(optional) _drifted_limits: drifted list of crop limits
"""
# load
if 'DaxReader' not in locals():
from ..visual_tools import DaxReader
if 'get_num_frame' not in locals():
from ..get_img_info import get_num_frame
## 0. Check inputs
# filename
if not os.path.isfile(filename):
raise ValueError(f"file {filename} doesn't exist!")
# channels
if isinstance(channels, list):
_channels = [str(ch) for ch in channels]
elif isinstance(channels, int) or isinstance(channels, str):
_channels = [str(channels)]
else:
raise TypeError(f"Wrong input type for channels:{type(channels)}, should be list/str/int")
# check channel values in all_channels
for _ch in _channels:
if _ch not in all_channels:
raise ValueError(f"Wrong input for channel:{_ch}, should be among {all_channels}")
# check num_buffer_frames and num_empty_frames
num_buffer_frames = int(num_buffer_frames)
num_empty_frames = int(num_empty_frames)
## 1. Load image
if verbose:
print(f"-- crop {len(crop_limit_list)} images with channels:{_channels}")
# extract image info
_full_im_shape, _num_channels = get_num_frame(filename,
frame_per_color=single_im_size[0],
buffer_frame=num_buffer_frames)
# load the whole image
if verbose:
print(f"--- load image from file:{filename}", end=', ')
_load_start = time.time()
_full_im = DaxReader(filename, verbose=verbose).loadAll()
# splice buffer frames
_start_frames = decide_starting_frames(_channels, _num_channels, all_channels=all_channels,
num_buffer_frames=num_buffer_frames, num_empty_frames=num_empty_frames,
verbose=verbose)
_splitted_ims = [_full_im[_sf:-num_buffer_frames:_num_channels] for _sf in _start_frames]
if verbose:
print(f"in {time.time()-_load_start}s")
## 2. Prepare crops
if verbose:
print(f"-- start cropping: ", end='')
_start_time = time.time()
_old_crop_list = []
_drift_crop_list = []
for _crop in crop_limit_list:
if len(_crop) == 2:
_n_crop = np.array([
|
np.array([0, single_im_size[0]])
|
numpy.array
|
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import random
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt, path_and_nodules, mode):
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess_mode == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess_mode == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
elif opt.preprocess_mode == 'scale_shortside_and_crop':
ss, ls = min(w, h), max(w, h) # shortside and longside
width_is_shorter = w == ss
ls = int(opt.load_size * ls / ss)
new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss)
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0,
|
np.maximum(0, new_h - opt.crop_size)
|
numpy.maximum
|
""" Module to create plots from automated analysis of motion on the centerlines
Reads Excel output per patient
Created November 2017
Copyright 2017-2019, <NAME>
"""
from stentseg.utils import PointSet
import openpyxl
from stentseg.utils.datahandling import select_dir
import sys, os
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import itertools
from matplotlib import gridspec
from lspeas.analysis.utils_analysis import _initaxis
from lspeas.utils.normality_statistics import normality_check, independent_samples_ttest
class ExcelAnalysisNellix():
""" Create graphs from excel data
"""
# exceldir = select_dir(r'F:\Nellix_chevas\CT_SSDF\SSDF')
exceldir = select_dir(r'F:\Nellix_chevas\CT_SSDF\SSDF_automated',
r'D:\Nellix_chevas_BACKUP\CT_SSDF\SSDF_automated')
dirsaveIm = select_dir(r'C:\Users\Maaike\Desktop','D:\Profiles\koenradesma\Desktop')
def __init__(self):
self.exceldir = ExcelAnalysisNellix.exceldir
self.dirsaveIm = ExcelAnalysisNellix.dirsaveIm
self.workbook_analysis = 'ChevasStoreOutput'
self.patients =['chevas_01',
'chevas_02',
'chevas_03',
'chevas_04',
'chevas_05',
'chevas_06',
'chevas_07',
'chevas_08',
'chevas_10',
'chevas_09', # reintervention of pt 1
'chevas_11' # reintervention of pt 7
]
self.distsAll = [] # distances between all stents that were analyzed
self.distsRelAll = [] # relative from avgreg distance
self.posAll = []
self.relPosAll = []
self.fontsize1 = 16 # 14
self.fontsize2 = 16 # 15
self.fontsize3 = 10.3
def get_angle_change(self, patients=None, analysis='ChimNel', chimneys=['LRA', 'RRA', 'SMA'], angletype='pointdeflection'):
""" Read angle change for the chimneys or for the angle between prox chimney and nellix
or for angle between dist chimney and vessel (end-stent angle)
Analysis: 'ChimNel' or 'Chim' or 'ChimVessel'
chimneys: ['LRA', 'RRA', 'SMA'] or ['LRA'] get single chimney
angletype: 'pointdeflection' --> point with greatest angle change
'peakangle' --> max diff between peak angle over phases
"""
if patients == None:
patients = self.patients
self.angleChange = []
self.angleMin = []
self.angleMax = []
self.locationOnChimney = [] # location on chimney as percentage distance from prox / length chimney
self.locationChange = [] # location change of peak angle
self.lengthchimneys = []
# read workbooks
for patient in patients:
workbook_stent = os.path.join(self.exceldir,patient, self.workbook_analysis+patient[7:]+'.xlsx')
# read sheet
wb = openpyxl.load_workbook(workbook_stent, data_only=True)
sheetnames = wb.get_sheet_names()
if analysis == 'Chim': # chimney angle
for a in chimneys:
# set row for type of angle for chim angle analysis
if angletype == 'pointdeflection':
row1 = 3
# see which sheetname, not known if NelL or NelR
for sheetname in sheetnames:
if sheetname.startswith('Ang_'+a):
sheet = wb.get_sheet_by_name(sheetname)
# read change
angchange = readMaxChange(sheet, row=row1, colStart=1)
self.angleChange.append(angchange)
# where was point of max deflection on ccl?
pointlocation, distfromproxendchimney, lengthchimney = readLocationPointDeflection(sheet, row=9, colStart=1)
self.locationOnChimney.append(pointlocation) # percentage of chimney length
self.lengthchimneys.append(lengthchimney)
break # next a
elif angletype == 'peakangle':
row1 = 16
# see which sheetname, not known if NelL or NelR
for sheetname in sheetnames:
if sheetname.startswith('Ang_'+a):
sheet = wb.get_sheet_by_name(sheetname)
# read change
angchange = readMaxChange(sheet, row=row1, colStart=1)
self.angleChange.append(angchange)
angmin, angmax = readMinMax(sheet, row=row1+1, colStart=1, correctorientation=True)
self.angleMin.append(angmin)
self.angleMax.append(angmax)
# how did location of peakangle change during cycle?
locationchange = readLocationChange(sheet, row=19, colStart=1, nphases=10)
self.locationChange.append(locationchange)
# where was point of peak angle at mid cycle?
pointlocation, distfromproxendchimney, lengthchimney = readLocationPointDeflection(sheet, row=24, colStart=1)
self.locationOnChimney.append(pointlocation) # percentage of chimney length
self.lengthchimneys.append(lengthchimney)
break # next a
elif analysis == 'ChimNel': # chimney-to-Nellix angle
for a in chimneys:
# see which sheetname, not known if NelL or NelR
for sheetname in sheetnames:
if sheetname.startswith('Ang_'+a+'_Nel'):
sheet = wb.get_sheet_by_name(sheetname)
# read change
angchange = readMaxChange(sheet, row=4, colStart=1)
self.angleChange.append(angchange)
angmin, angmax = readMinMax(sheet, row=5, colStart=1, correctorientation=True)
self.angleMin.append(angmin)
self.angleMax.append(angmax)
break # next a
elif analysis == 'ChimVessel': # chimney-to-Nellix angle
for a in chimneys:
# see which sheetname, not known if NelL or NelR
for sheetname in sheetnames:
if sheetname.startswith('Ang_'+a+'_Vessel'):
sheet = wb.get_sheet_by_name(sheetname)
# read change
angchange = readMaxChange(sheet, row=4, colStart=1)
self.angleChange.append(angchange)
angmin, angmax = readMinMax(sheet, row=5, colStart=1, correctorientation=True)
self.angleMin.append(angmin)
self.angleMax.append(angmax)
break # next a
# check normality anglechange
W, pValue, normality = normality_check(self.angleChange, alpha=0.05, showhist=False)
print('')
print('AngleChange distribution normal:{} (pValue of {:.3f})'.format(normality, pValue))
print('')
print('Average maximum angle change: {:.1f} ± {:.1f} ({:.1f}-{:.1f})'.format(
np.mean(self.angleChange),
np.std(self.angleChange),
np.min(self.angleChange),
np.max(self.angleChange)
))
# location and min max peak angle
if analysis == 'Chim':
print('')
print('Average location on chimney as percentage: {:.1f} ± {:.1f} ({:.1f}-{:.1f})'.format(
np.mean(self.locationOnChimney),
np.std(self.locationOnChimney),
np.min(self.locationOnChimney),
np.max(self.locationOnChimney)
)) # for peakangle this is at mid cardiac cycle
print('Average length of chimney stents (mm): {:.1f} ± {:.1f} ({:.1f}-{:.1f})'.format(
np.mean(self.lengthchimneys),
np.std(self.lengthchimneys),
np.min(self.lengthchimneys),
np.max(self.lengthchimneys)
))
if angletype == 'peakangle':
print('Average location change of peak angle: {:.1f} ± {:.1f} ({:.1f}-{:.1f})'.format(
np.mean(self.locationChange),
np.std(self.locationChange),
np.min(self.locationChange),
np.max(self.locationChange)
))
print('')
print('Average minimum Peak angle cycle: {:.1f} ± {:.1f} ({:.1f}-{:.1f})'.format(
np.mean(self.angleMin),
np.std(self.angleMin),
np.min(self.angleMin),
np.max(self.angleMin)
))
print('')
print('Average maximum Peak angle cycle: {:.1f} ± {:.1f} ({:.1f}-{:.1f})'.format(
np.mean(self.angleMax),
|
np.std(self.angleMax)
|
numpy.std
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
# @Date: 2020-08-05 17:42:12
# @Last Modified by: <NAME>
# @Last Modified time: 2020-08-05 20:31:11
''' Create Cm lookup table. '''
import os
import logging
import numpy as np
from PySONIC.utils import logger, isIterable, alert
from PySONIC.core import BilayerSonophore, Batch, Lookup, AcousticDrive
from PySONIC.parsers import MechSimParser
@alert
def computeCmLookup(bls, fref, Aref, mpi=False, loglevel=logging.INFO):
descs = {'f': 'US frequencies', 'A': 'US amplitudes'}
# Populate reference vectors dictionary
refs = {
'f': fref, # Hz
'A': Aref # Pa
}
# Check validity of all reference vectors
for key, values in refs.items():
if not isIterable(values):
raise TypeError(f'Invalid {descs[key]} (must be provided as list or numpy array)')
if not all(isinstance(x, float) for x in values):
raise TypeError(f'Invalid {descs[key]} (must all be float typed)')
if len(values) == 0:
raise ValueError(f'Empty {key} array')
if key == 'f' and min(values) <= 0:
raise ValueError(f'Invalid {descs[key]} (must all be strictly positive)')
if key == 'A' and min(values) < 0:
raise ValueError(f'Invalid {descs[key]} (must all be positive or null)')
# Get references dimensions
dims = np.array([x.size for x in refs.values()])
# Create simulation queue
drives = AcousticDrive.createQueue(fref, Aref)
queue = [[drive, 0.] for drive in drives]
# Run simulations and populate outputs
logger.info(f'Starting Cm simulation batch for {bls}')
batch = Batch(bls.getRelCmCycle, queue)
rel_Cm_cycles = batch(mpi=mpi, loglevel=loglevel)
# Make sure outputs size matches inputs dimensions product
nout, nsamples = len(rel_Cm_cycles), rel_Cm_cycles[0].size
assert nout == dims.prod(), 'Number of outputs does not match number of combinations'
dims = np.hstack([dims, nsamples])
refs['t'] = np.linspace(0., 1., nsamples)
# Reshape effvars into nD arrays and add them to lookups dictionary
logger.info('Reshaping output into lookup table')
rel_Cm_cycles = np.array(rel_Cm_cycles).reshape(dims)
# Construct and return lookup object
return Lookup(refs, {'Cm_rel': rel_Cm_cycles})
def main():
parser = MechSimParser(outputdir='.')
parser.addTest()
parser.defaults['radius'] = 32.0 # nm
parser.defaults['freq'] = np.array([20., 100., 500., 1e3, 2e3, 3e3, 4e3]) # kHz
parser.defaults['amp'] = np.insert(
np.logspace(np.log10(0.1),
|
np.log10(600)
|
numpy.log10
|
# Workaround to disable Intel Fortran Control+C console event handler installed by scipy
from os import environ as os_env
os_env['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = 'T'
import numpy as np
from scipy.signal import convolve2d as conv2
import torch
import torch.nn as nn
from models.modules.architectures.CEM.imresize_CEM import calc_strides, ImRes
import collections
class CEMnet:
def __init__(self, conf, upscale_kernel=None):
self.conf = conf
self.ds_factor = np.array(conf.scale_factor, dtype=np.int32)
assert np.round(self.ds_factor)==self.ds_factor,'Currently only supporting integer scale factors'
assert upscale_kernel is None or isinstance(upscale_kernel, (str, np.ndarray)),'To support given kernels, change the Return_Invalid_Margin_Size_in_LR function and make sure everything else works'
self.imres = ImRes(None, [self.ds_factor, self.ds_factor], kernel=upscale_kernel, alg=self.conf.default_kernel_alg)
self.ds_kernel = self.imres.return_upscale_kernel(self.ds_factor)
self.ds_kernel_invalidity_half_size_LR = self.Return_Invalid_Margin_Size_in_LR('ds_kernel', self.conf.filter_pertubation_limit)
self.compute_inv_hTh()
self.invalidity_margins_LR = 2 * self.ds_kernel_invalidity_half_size_LR + self.inv_hTh_invalidity_half_size
self.invalidity_margins_HR = self.ds_factor * self.invalidity_margins_LR
def Return_Invalid_Margin_Size_in_LR(self, filter, max_allowed_perturbation):
TEST_IM_SIZE = 100
assert filter in ['ds_kernel','inv_hTh']
if filter=='ds_kernel':
output_im = self.imres.resize(np.ones([self.ds_factor*TEST_IM_SIZE, self.ds_factor*TEST_IM_SIZE]), [1/self.ds_factor], use_zero_padding=True)
elif filter=='inv_hTh':
output_im = conv2(np.ones([TEST_IM_SIZE, TEST_IM_SIZE]), self.inv_hTh, mode='same')
output_im /= output_im[int(TEST_IM_SIZE/2), int(TEST_IM_SIZE/2)]
output_im[output_im <= 0] = max_allowed_perturbation/2 # Negative output_im are invalid and would not be identified as such without this line since I'm taking their log.
invalidity_mask = np.exp(-np.abs(np.log(output_im))) < max_allowed_perturbation
# Finding invalid shoulder size, by searching for the index of the deepest invalid pixel, to accomodate cases of non-conitinous invalidity:
margin_sizes = [np.argwhere(invalidity_mask[:int(TEST_IM_SIZE/2), int(TEST_IM_SIZE/2)])[-1][0]+1,
np.argwhere(invalidity_mask[int(TEST_IM_SIZE/2), :int(TEST_IM_SIZE/2)])[-1][0]+1]
margin_sizes = np.max(margin_sizes)*np.ones([2]).astype(margin_sizes[0].dtype)
return np.max(margin_sizes)
def Pad_LR_Batch(self, batch, num_recursion=1):
for i in range(num_recursion):
batch = 1.0*np.pad(batch, pad_width=((0, 0), (self.invalidity_margins_LR, self.invalidity_margins_LR), (self.invalidity_margins_LR, self.invalidity_margins_LR), (0, 0)), mode='edge')
return batch
def Unpad_HR_Batch(self, batch, num_recursion=1):
margins_2_remove = (self.ds_factor**(num_recursion))*self.invalidity_margins_LR*num_recursion
return batch[:, margins_2_remove:-margins_2_remove, margins_2_remove:-margins_2_remove, :]
def DT_Satisfying_Upscale(self, LR_image):
margin_size = 2*self.inv_hTh_invalidity_half_size+self.ds_kernel_invalidity_half_size_LR
LR_image = Pad_Image(LR_image,margin_size)
HR_image = self.imres.resize(np.stack([conv2(LR_image[:,:,channel_num], self.inv_hTh, mode='same') for channel_num in range(LR_image.shape[-1])], -1), scale_factor=[self.ds_factor])
return Unpad_Image(HR_image,self.ds_factor*margin_size)
def WrapArchitecture(self, model=None, training_patch_size=None, only_padders=False):
invalidity_margins_4_test_LR = self.invalidity_margins_LR
invalidity_margins_4_test_HR = self.ds_factor*invalidity_margins_4_test_LR
self.LR_padder = torch.nn.ReplicationPad2d((invalidity_margins_4_test_LR, invalidity_margins_4_test_LR,invalidity_margins_4_test_LR, invalidity_margins_4_test_LR))
self.HR_padder = torch.nn.ReplicationPad2d((invalidity_margins_4_test_HR, invalidity_margins_4_test_HR,invalidity_margins_4_test_HR, invalidity_margins_4_test_HR))
self.HR_unpadder = lambda x: x[:, :, invalidity_margins_4_test_HR:-invalidity_margins_4_test_HR, invalidity_margins_4_test_HR:-invalidity_margins_4_test_HR]
self.LR_unpadder = lambda x: x[:, :, invalidity_margins_4_test_LR:-invalidity_margins_4_test_LR, invalidity_margins_4_test_LR:-invalidity_margins_4_test_LR] # Debugging tool
self.loss_mask = None
if training_patch_size is not None:
self.loss_mask = np.zeros([1, 1, training_patch_size, training_patch_size])
invalidity_margins = self.invalidity_margins_HR
self.loss_mask[:, :, invalidity_margins:-invalidity_margins, invalidity_margins:-invalidity_margins] = 1
assert np.mean(self.loss_mask) > 0, 'Loss mask completely nullifies image.'
print('Using only only %.3f of patch area for learning. The rest is considered to have boundary effects' % (np.mean(self.loss_mask)))
# TODO: while training will normally be on CUDA, pass device here instead of hardcoded cuda tensor
self.loss_mask = torch.from_numpy(self.loss_mask).type(torch.cuda.FloatTensor)
if only_padders:
return
else:
returnable = CEM(self, model)
self.OP_names = [m[0] for m in returnable.named_modules() if 'Filter_OP' in m[0]]
return returnable
def Mask_Invalid_Regions(self, im1, im2):
assert self.loss_mask is not None, 'Mask not defined, probably didn''t pass patch size'
return self.loss_mask*im1, self.loss_mask*im2
def Enforce_DT_on_Image_Pair(self, LR_source, HR_input):
same_scale_dimensions = [LR_source.shape[i] == HR_input.shape[i] for i in range(LR_source.ndim)]
LR_scale_dimensions = [self.ds_factor * LR_source.shape[i] == HR_input.shape[i] for i in range(LR_source.ndim)]
assert np.all(np.logical_or(same_scale_dimensions, LR_scale_dimensions))
LR_source = self.DT_Satisfying_Upscale(LR_source) if np.any(LR_scale_dimensions) else self.Project_2_ortho_2_NS(LR_source)
HR_projected_2_h_subspace = self.Project_2_ortho_2_NS(HR_input)
return HR_input - HR_projected_2_h_subspace + LR_source
def Project_2_ortho_2_NS(self, HR_input):
downscaled_input = imresize(HR_input, scale_factor=[1/self.ds_factor])
if downscaled_input.ndim < HR_input.ndim: # In case input was of size self.ds_factor in at least one of its axes:
downscaled_input = np.reshape(downscaled_input, list(HR_input.shape[:2]//self.ds_factor) + ([HR_input.shape[2]] if HR_input.ndim > 2 else []))
return self.DT_Satisfying_Upscale(downscaled_input)
# def Supplement_Pseudo_CEM(self, input_t):
# return self.Learnable_Upscale_OP(self.Conv_LR_with_Learnable_OP(self.Learnable_DownscaleOP(input_t)))
def compute_inv_hTh(self, NFFT_add=36):
hTh = conv2(self.ds_kernel, np.rot90(self.ds_kernel, 2)) * self.ds_factor**2
hTh = Aliased_Down_Sampling(hTh, self.ds_factor)
pad_pre = pad_post = np.array(NFFT_add/2, dtype=np.int32)
hTh_fft = np.fft.fft2(np.pad(hTh, ((pad_pre, pad_post), (pad_pre, pad_post)), mode='constant', constant_values=0))
# When ds_kernel is wide, some frequencies get completely wiped out, which causes instability when hTh is inverted.
# Therefore this filter's magnitude is bounded from below in the Fourier domain:
magnitude_increasing_map = np.maximum(1, self.conf.lower_magnitude_bound/np.abs(hTh_fft))
hTh_fft = hTh_fft * magnitude_increasing_map
# Now inverting the filter (calculating (HH^T)^-1):
# Note: the ringing happens in this step when using estimated kernels (KernelGAN).
# the estimated kernels can be zeroing out some low frequency content. Inverting
# these zero-valued parts of the kernel's FFT when calculating (HH^T)^-1 results
# in high values in self.inv_hTh (max > 2, ~30 ; min < -1, ~-20), which in turn cause
# these ringing artifacts.
# Cropping the estimated kernels to smaller sizes and increasing lower_magnitude_bound
# reduce the magnitude of these high values
self.inv_hTh = np.real(np.fft.ifft2(1/hTh_fft))
# Making sure the filter's maximal value sits in its middle:
max_row = np.argmax(self.inv_hTh)//self.inv_hTh.shape[0]
max_col = np.mod(np.argmax(self.inv_hTh), self.inv_hTh.shape[0])
if not np.all(np.equal(np.ceil(np.array(self.inv_hTh.shape)/2), np.array([max_row,max_col])-1)):
half_filter_size = np.min([self.inv_hTh.shape[0]-max_row-1, self.inv_hTh.shape[0]-max_col-1, max_row, max_col])
self.inv_hTh = self.inv_hTh[max_row-half_filter_size:max_row+half_filter_size+1, max_col-half_filter_size:max_col+half_filter_size+1]
self.inv_hTh_invalidity_half_size = self.Return_Invalid_Margin_Size_in_LR('inv_hTh', self.conf.filter_pertubation_limit)
margins_2_drop = self.inv_hTh.shape[0]//2-self.Return_Invalid_Margin_Size_in_LR('inv_hTh', self.conf.desired_inv_hTh_energy_portion)
if margins_2_drop > 0:
self.inv_hTh = self.inv_hTh[margins_2_drop:-margins_2_drop, margins_2_drop:-margins_2_drop]
class Filter_Layer(nn.Module):
def __init__(self, filter, pre_filter_func, post_filter_func=None):
super(Filter_Layer, self).__init__()
self.Filter_OP = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=filter.shape, bias=False, groups=3)
# TODO: while training will normally be on CUDA, pass device here instead of hardcoded cuda tensor
self.Filter_OP.weight = nn.Parameter(data=torch.from_numpy(np.tile(np.expand_dims(np.expand_dims(filter, 0), 0), reps=[3, 1, 1, 1])).type(torch.cuda.FloatTensor), requires_grad=False)
self.Filter_OP.filter_layer = True
self.pre_filter_func = pre_filter_func
self.post_filter_func = (lambda x:x) if post_filter_func is None else post_filter_func
def forward(self, x):
return self.post_filter_func(self.Filter_OP(self.pre_filter_func(x)))
class CEM(nn.Module):
def __init__(self, CEMnet, model):
super(CEM, self).__init__()
self.ds_factor = CEMnet.ds_factor
self.conf = CEMnet.conf
self.generated_image_model = model
inv_hTh_padding = np.floor(np.array(CEMnet.inv_hTh.shape)/2).astype(np.int32)
Replication_Padder = nn.ReplicationPad2d((inv_hTh_padding[1], inv_hTh_padding[1], inv_hTh_padding[0], inv_hTh_padding[0]))
self.Conv_LR_with_Inv_hTh_OP = Filter_Layer(CEMnet.inv_hTh, pre_filter_func=Replication_Padder)
downscale_antialiasing =
|
np.rot90(CEMnet.ds_kernel, 2)
|
numpy.rot90
|
import numpy as np
import matplotlib.pyplot as plt
from random import randrange
import copy
import csv
from sklearn.linear_model import Ridge, Lasso
from sklearn.model_selection import GridSearchCV
import warnings
import pickle
# Column Transformations
mapSex = lambda x : 0 if x == "I" else (1 if x == "M" else -1)
scaleElement = lambda x, min, max : (x-min)/(max-min)
# Function to Apply column transformations
def applyTransformation(arr, column, transformation):
arr[:,column] = [transformation(x) for x in arr[:,column]]
# Read data from file
def readData(filename):
file = open(filename, "r")
rawData = []
for line in file:
rawData.append(line.split())
return np.array(rawData)
# Normalize data
def minMaxScale(X, m, n):
for j in range(n-1):
max = -float('inf')
min = float('inf')
for i in range(m):
curE = X[i][j]
if(curE > max):
max = curE
if(curE < min):
min = curE
if(min == max):
X[:,j] = [0 * m]
else:
X[:,j] = [scaleElement(e, min, max) for e in X[:,j]]
return X
# Generate KFold splits
def generateKFolds(dataset, folds):
datasetCopy = list(dataset)
datasetSplit = []
foldSize = len(dataset) // folds
for i in range(folds):
fold = []
while foldSize > len(fold):
j = randrange(len(datasetCopy))
fold.append(datasetCopy.pop(j))
datasetSplit.append(fold)
return np.array(datasetSplit)
# Hypothesis
def hypothesis(theta, X):
return np.matmul(X, theta)
def getRmseCost(predict, actual):
m = predict.shape[0]
return (sum(np.square(np.subtract(predict, actual))) / m) ** 0.5
def signBits(X):
signArr = np.zeros((X.shape[0], 1))
for i in range(X.shape[0]):
e = X[i]
val = 0
if(e > 0):
val = 1
elif(e < 0):
val = -1
signArr[i] = val
return signArr
def gradientDescent(X, y, X_test, y_test, n, alpha, nIterations, l1=None, l2=None):
cost = np.zeros(nIterations)
cost_test = np.zeros(nIterations)
m = X.shape[0]
m_test = X_test.shape[0]
theta = np.zeros((n, 1))
h = hypothesis(theta, X)
h_test = hypothesis(theta, X_test)
for i in range(nIterations):
oldTheta = copy.deepcopy(theta)
oldTheta[0] = 0
if(l1):
theta = theta - (alpha/m) * np.transpose(np.matmul(np.transpose(h - y), X)) - (l1 * alpha / m) / 2 * signBits(oldTheta)
elif(l2):
theta = theta - (alpha/m) * np.transpose(np.matmul(np.transpose(h - y), X)) - (l2 * alpha / m) * oldTheta
else:
theta = theta - (alpha/m) * np.transpose(np.matmul(np.transpose(h - y), X))
h = hypothesis(theta, X)
h_test = hypothesis(theta, X_test)
cost[i] = getRmseCost(h, y)
cost_test[i] = getRmseCost(h_test, y_test)
return theta, cost, cost_test
# Train and Test cost applying linear regression
def linearRegression(X_train, y_train, X_test, y_test, n, alpha, nIterations):
return gradientDescent(X_train, y_train, X_test, y_test, n, alpha, nIterations)
def normalEquation(X, y):
X_t = np.transpose(X)
A = np.matmul(X_t, X)
B = np.matmul(X_t, y)
return np.matmul(np.linalg.inv(A), B)
def linearRegressionLoader(useSavedModels):
print("-" * 10 + "Part A" + "-" * 10)
print("Collecting data...")
rawData = readData("./q1dataset/abalone.data")
m, n = rawData.shape
# Clean Data
applyTransformation(rawData, 0, mapSex)
rawData = rawData.astype('float64')
normalData = minMaxScale(rawData, m, n-1)
# Create KFolds
nIterations = 500
alpha = 0.1
kFold = 5
print("Generating splits...")
splitData = generateKFolds(normalData, kFold)
listSplits = list(splitData)
# Apply Linear Regression on KFolds
rmseLinear_train = []
rmseLinear_test = []
rmseNormal_train = []
rmseNormal_test = []
normalThetas = []
if(not useSavedModels):
print("Running Model...")
for i in range(kFold):
testData = np.array(listSplits[i])
trainSplits = listSplits[:i] + listSplits[i+1:]
trainData = []
for split in trainSplits:
for element in split:
trainData.append(element)
trainData = np.array(trainData)
# Split output & input
X_train, y_train = trainData[:,:n-1], trainData[:,n-1]
X_test, y_test = testData[:,:n-1], testData[:,n-1]
# Append 1s in start
oneCol = np.ones((X_train.shape[0], 1))
X_train = np.concatenate((oneCol, X_train), axis=1)
oneCol = np.ones((X_test.shape[0], 1))
X_test = np.concatenate((oneCol, X_test), axis=1)
y_train = y_train.reshape((X_train.shape[0], 1))
y_test = y_test.reshape((X_test.shape[0], 1))
theta, rmse_train, rmse_test = linearRegression(X_train, y_train, X_test, y_test, n, alpha, nIterations)
rmseLinear_train.append(rmse_train)
rmseLinear_test.append(rmse_test)
thetaNormal = normalEquation(X_train, y_train)
normalThetas.append(thetaNormal)
hNormal_train = hypothesis(thetaNormal, X_train)
hNormal_test = hypothesis(thetaNormal, X_test)
rmseNormal_train.append(getRmseCost(hNormal_train, y_train))
rmseNormal_test.append(getRmseCost(hNormal_test, y_test))
pickle.dump(rmseLinear_train, open('./q1amodels/rmseLinear_train.sav', 'wb'))
pickle.dump(rmseLinear_test, open('./q1amodels/rmseLinear_test.sav', 'wb'))
pickle.dump(rmseNormal_train, open('./q1amodels/rmseNormal_train.sav', 'wb'))
pickle.dump(rmseNormal_test, open('./q1amodels/rmseNormal_test.sav', 'wb'))
pickle.dump(normalThetas, open('./q1amodels/normalThetas.sav', 'wb'))
else:
print("Loading Saved Models...")
rmseLinear_train = pickle.load(open('./q1amodels/rmseLinear_train.sav', 'rb'))
rmseLinear_test = pickle.load(open('./q1amodels/rmseLinear_test.sav', 'rb'))
rmseNormal_train = pickle.load(open('./q1amodels/rmseNormal_train.sav', 'rb'))
rmseNormal_test = pickle.load(open('./q1amodels/rmseNormal_test.sav', 'rb'))
normalThetas = pickle.load(open('./q1amodels/normalThetas.sav', 'rb'))
meanRmse_train = np.mean(rmseLinear_train, axis = 0)
meanRmse_test = np.mean(rmseLinear_test, axis = 0)
print("Plotting curves...")
xIteraitons = [x for x in range(1, int(nIterations + 1))]
plt.plot(xIteraitons, list(meanRmse_train), 'b')
plt.plot(xIteraitons, list(meanRmse_test), 'r')
plt.xlabel('No. of iterations')
plt.ylabel('Mean RMSE Values')
plt.gca().legend(("Training Set", "Testing Set"))
plt.title("Gradient Descent Error vs Iterations")
fig = plt.gcf()
fig.canvas.set_window_title('Gradient Descent Error vs Iterations')
plt.show()
print("Press Enter", end="")
input()
print("-" * 10 + "Part B" + "-" * 10)
print("Normal Equation RMSE")
for i in range(len(normalThetas)):
print("Train: " + str(rmseNormal_train[i]) + " Test: " + str(rmseNormal_test[i]))
print("Press Enter", end="")
input()
print("-" * 10 + "Part C" + "-" * 10)
print("Comparision of both RMSE Values")
print(" " * 10,"Gradient Descent".center(25), "Normal Equation".center(20))
print("Train".center(10), str(round(meanRmse_train[-1], 7)).center(25), str(round(np.mean(rmseNormal_train), 7)).center(20))
print("Test".center(10), str(round(meanRmse_test[-1], 7)).center(25), str(round(np.mean(rmseNormal_test), 7)).center(20))
def regularisationLoader(useSavedModels):
print("-" * 10 + "" + "-" * 10)
print("Collecting data...")
rawData = readData("./q1dataset/abalone.data")
m, n = rawData.shape
# Clean Data
applyTransformation(rawData, 0, mapSex)
rawData = rawData.astype('float64')
normalData = minMaxScale(rawData, m, n-1)
# Create KFolds
nIterations = 500
alpha = 0.1
kFold = 5
print("Generating splits...")
splitData = generateKFolds(normalData, kFold)
listSplits = list(splitData)
# Apply Linear Regression on KFolds
rmseLinear_train = []
rmseLinear_test = []
rmseNormal_train = []
rmseNormal_test = []
normalThetas = []
if(not useSavedModels):
print("Running Model...")
for i in range(kFold):
testData = np.array(listSplits[i])
trainSplits = listSplits[:i] + listSplits[i+1:]
trainData = []
for split in trainSplits:
for element in split:
trainData.append(element)
trainData = np.array(trainData)
# Split output & input
X_train, y_train = trainData[:,:n-1], trainData[:,n-1]
X_test, y_test = testData[:,:n-1], testData[:,n-1]
# Append 1s in start
oneCol = np.ones((X_train.shape[0], 1))
X_train = np.concatenate((oneCol, X_train), axis=1)
oneCol = np.ones((X_test.shape[0], 1))
X_test = np.concatenate((oneCol, X_test), axis=1)
y_train = y_train.reshape((X_train.shape[0], 1))
y_test = y_test.reshape((X_test.shape[0], 1))
theta, rmse_train, rmse_test = linearRegression(X_train, y_train, X_test, y_test, n, alpha, nIterations)
rmseLinear_train.append(rmse_train)
rmseLinear_test.append(rmse_test)
thetaNormal = normalEquation(X_train, y_train)
normalThetas.append(thetaNormal)
hNormal_train = hypothesis(thetaNormal, X_train)
hNormal_test = hypothesis(thetaNormal, X_test)
rmseNormal_train.append(getRmseCost(hNormal_train, y_train))
rmseNormal_test.append(getRmseCost(hNormal_test, y_test))
pickle.dump(rmseLinear_train, open('./q1bmodels/rmseLinear_train.sav', 'wb'))
pickle.dump(rmseLinear_test, open('./q1bmodels/rmseLinear_test.sav', 'wb'))
pickle.dump(rmseNormal_train, open('./q1bmodels/rmseNormal_train.sav', 'wb'))
pickle.dump(rmseNormal_test, open('./q1bmodels/rmseNormal_test.sav', 'wb'))
pickle.dump(normalThetas, open('./q1bmodels/normalThetas.sav', 'wb'))
else:
print("Loading Saved Models...")
rmseLinear_train = pickle.load(open('./q1bmodels/rmseLinear_train.sav', 'rb'))
rmseLinear_test = pickle.load(open('./q1bmodels/rmseLinear_test.sav', 'rb'))
rmseNormal_train = pickle.load(open('./q1bmodels/rmseNormal_train.sav', 'rb'))
rmseNormal_test = pickle.load(open('./q1bmodels/rmseNormal_test.sav', 'rb'))
normalThetas = pickle.load(open('./q1bmodels/normalThetas.sav', 'rb'))
print("Getting RMSE...")
xIteraitons = [x for x in range(1, int(nIterations + 1))]
rmseLinear_train = np.array(rmseLinear_train)
minSplitIndex = np.argmin(rmseLinear_train[:,-1])
print("Splitting 80%...")
splittedDataset = listSplits[:minSplitIndex] + listSplits[minSplitIndex+1:]
sparceDataset = []
for split in splittedDataset:
for element in split:
sparceDataset.append(element)
sparceDataset = np.array(sparceDataset)
X, y = sparceDataset[:,:n-1], sparceDataset[:,n-1]
oneCol = np.ones((X.shape[0], 1))
X = np.concatenate((oneCol, X), axis=1)
y = y.reshape((X.shape[0], 1))
L2 = None
rmseL2 = None
rmseL2_test = None
if(not useSavedModels):
print("Running L2, Ridge...")
print("-" * 10 + "Part A" + "-" * 10)
params = {'alpha': np.linspace(0.1, 1.0, num=200)}
rdg_reg = Ridge()
clf = GridSearchCV(rdg_reg, params, cv=5, scoring = 'neg_mean_squared_error')
clf.fit(X, y)
L2 = round(clf.best_params_['alpha'], 5)
print("L2, Ridge Param: " + str(L2))
else:
L2 = pickle.load(open('./q1bmodels/L2.sav', 'rb'))
rmseL2 = pickle.load(open('./q1bmodels/rmseL2.sav', 'rb'))
rmseL2_test = pickle.load(open('./q1bmodels/rmseL2_test.sav', 'rb'))
print("-" * 10 + "Part A" + "-" * 10)
print("L2, Ridge Param: " + str(L2))
L1 = None
rmseL1 = None
rmseL1_test = None
if(not useSavedModels):
print("Running L1, Lasso...")
print("-" * 10 + "Part B" + "-" * 10)
params = {'alpha': np.linspace(0.0001, 0.005, num=100)}
rdg_reg = Lasso()
clf = GridSearchCV(rdg_reg, params, cv=5, scoring = 'neg_mean_squared_error')
clf.fit(X, y)
L1 = round(clf.best_params_['alpha'], 5)
print("L1, Lasso Param: " + str(L1))
else:
L1 = pickle.load(open('./q1bmodels/L1.sav', 'rb'))
rmseL1 = pickle.load(open('./q1bmodels/rmseL1.sav', 'rb'))
rmseL1_test = pickle.load(open('./q1bmodels/rmseL1_test.sav', 'rb'))
print("-" * 10 + "Part B" + "-" * 10)
print("L1, Lasso Param: " + str(L2))
print("-" * 10 + "" + "-" * 10)
testDataset = listSplits[minSplitIndex]
X_test, y_test = testDataset[:,:n-1], testDataset[:,n-1]
oneCol = np.ones((X_test.shape[0], 1))
X_test = np.concatenate((oneCol, X_test), axis=1)
y_test = y_test.reshape((X_test.shape[0], 1))
if(not useSavedModels):
print("Getting L1 Theta...")
theta, rmseL1, rmseL1_test = gradientDescent(X, y, X_test, y_test, n, alpha, nIterations, l1=L1)
pickle.dump(L1, open('./q1bmodels/L1.sav', 'wb'))
pickle.dump(rmseL1, open('./q1bmodels/rmseL1.sav', 'wb'))
pickle.dump(rmseL1_test, open('./q1bmodels/rmseL1_test.sav', 'wb'))
print("Test RMSE with L1: " + str(round(rmseL1_test[-1], 7)))
print("Plotting L1 Curve...")
plt.plot(xIteraitons, list(rmseL1), 'b')
plt.plot(xIteraitons, list(rmseL1_test), 'r')
plt.xlabel('No. of iterations')
plt.ylabel('L1 Regularisation RMSE Values')
plt.gca().legend(("Training Set", "Testing Set"))
plt.title("L1 Regularisation Error vs Iterations")
fig = plt.gcf()
fig.canvas.set_window_title('L1 Regularisation Error vs Iterations')
plt.show()
print("-" * 10 + "" + "-" * 10)
if(not useSavedModels):
print("Getting L2 Theta...")
theta, rmseL2, rmseL2_test = gradientDescent(X, y, X_test, y_test, n, alpha, nIterations, l2=L2)
pickle.dump(L2, open('./q1bmodels/L2.sav', 'wb'))
pickle.dump(rmseL2, open('./q1bmodels/rmseL2.sav', 'wb'))
pickle.dump(rmseL2_test, open('./q1bmodels/rmseL2_test.sav', 'wb'))
print("Test RMSE with L2: " + str(round(rmseL2_test[-1], 7)))
print("Plotting L2 Curve...")
plt.plot(xIteraitons, list(rmseL2), 'b')
plt.plot(xIteraitons, list(rmseL2_test), 'r')
plt.xlabel('No. of iterations')
plt.ylabel('L2 Regularisation RMSE Values')
plt.gca().legend(("Training Set", "Testing Set"))
plt.title("L2 Regularisation Error vs Iterations")
fig = plt.gcf()
fig.canvas.set_window_title('L2 Regularisation Error vs Iterations')
plt.show()
def readCSV(filename):
raw = []
with open(filename, 'rt') as f:
data = csv.reader(f)
for row in data:
raw.append(row)
return raw
def linearRegressionLine(X, Y, n, alpha, nIterations):
theta = np.zeros((n, 1))
h = hypothesis(theta, X)
return gradientDescentLine(theta, h, X, Y, n, alpha, nIterations)
def gradientDescentRegularizedLine(X, y, n, alpha, nIterations, l1=None, l2=None):
m = X.shape[0]
theta =
|
np.zeros((n, 1))
|
numpy.zeros
|
import pandas as pd
import numpy as np
from .preference import ProbitPreferenceGP
from .validations import check_x_m
class ProbitBayesianOptimization(ProbitPreferenceGP):
"""
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data consisting of numeric real positive values.
M : array-like, shape = (n_samples, n_preferences)
Target choices. A preference is an array of positive
integers of shape = (2,). preference[0], r, is an index
of X preferred over preference[1], c, which is an
index of X.
"""
def __init__(self, X, M, GP_params={}):
super().__init__(**GP_params)
self.X = X
self.M = M
def interactive_optimization(self, bounds, method="L-BFGS-B",
n_init=1, n_solve=1, f_prior=None,
max_iter=1e4, print_suggestion=True):
"""Bayesian optimization via preferences inputs.
Parameters
----------
bounds: dictionary
Bounds of the search space for the acquisition function.
method: str or callable, optional
Type of solver.
n_init: integer, optional
Number of initialization points for the solver. Obtained
by randomly sampling the acquisition function.
n_solve: integer, optional
The solver will be run n_solve times.
Cannot be superior to n_init.
f_prior: array-like, shape = (n_samples, 1), optional (default: None)
Flat prior with mean zero is applied by default.
max_iter: integer, optional (default: 1e4)
Maximum number of iterations to be performed
for the bayesian optimization.
print_suggestion: Boolean, optional (default: True)
If set to false, max_iter must be equal to 1.
Returns
-------
optimal_values : array-like, shape = (n_features, )
suggestion : array-like, shape = (n_features, )
X : array-like, shape = (n_samples, n_features)
Feature values in training data.
M : array-like, shape = (n_samples - 1, 2)
Target choices. A preference is an array of positive
integers of shape = (2,). preference[0], r, is an index
of X preferred over preference[1], c, which is an
index of X.
f_posterior : array-like, shape = (n_samples, 1)
Posterior distribution of the Gaussian Process.
Examples
--------
>>> from GPro.kernels import Matern
>>> from GPro.posterior import Laplace
>>> from GPro.acquisitions import UCB
>>> from GPro.optimization import ProbitBayesianOptimization
>>> import numpy as np
>>> GP_params = {'kernel': Matern(length_scale=1, nu=2.5),
... 'post_approx': Laplace(s_eval=1e-5, max_iter=1000,
... eta=0.01, tol=1e-3),
... 'acquisition': UCB(kappa=2.576),
... 'random_state': None}
>>> X = np.random.sample(size=(2, 3)) * 10
>>> M = np.array([0, 1]).reshape(-1, 2)
>>> gpr_opt = ProbitBayesianOptimization(X, M, GP_params)
>>> bounds = {'x0': (0, 10)}
>>> console_opt = gpr_opt.interactive_optimization(bounds=bounds, n_solve=1,
... n_init=100)
>>> optimal_values, suggestion, X_post, M_post, f_post = console_opt
>>> print('optimal values: ', optimal_values)
>>> # Use posterior as prior
>>> gpr_opt = ProbitBayesianOptimization(X_post, M_post, GP_params)
>>> console_opt = gpr_opt.interactive_optimization(bounds=bounds, n_solve=1,
... n_init=100,
... f_prior=f_post)
>>> optimal_values, suggestion, X_post, M_post, f_post = console_opt
>>> print('optimal values: ', optimal_values)
"""
if not max_iter:
raise ValueError('max_iter must be superior to 0.')
if not print_suggestion and max_iter > 1:
raise ValueError('When print_suggestion is set to False, '
'max_iter must be set to 1.')
X, M = check_x_m(self.X, self.M)
features = list(bounds.keys())
M_ind_cpt = M.shape[0] - 1
pd.set_option('display.max_columns', None)
iteration = 0
while iteration < max_iter:
self.fit(X, M, f_prior)
x_optim = self.bayesopt(bounds, method, n_init, n_solve)
f_optim = self.predict(x_optim)
f_prior = np.concatenate((self.posterior, f_optim))
X = np.concatenate((X, x_optim))
# current preference index in X.
M_ind_current = M[M.shape[0] - 1][0]
# suggestion index in X.
M_ind_proposal = M_ind_cpt + 2
# current preference vs suggestion.
df = pd.DataFrame(data=np.concatenate((X[[M_ind_current]],
X[[M_ind_proposal]])),
columns=features,
index=['preference', 'suggestion'])
if print_suggestion:
print(df)
input_msg = "Iteration %d, preference (p) or suggestion (s)? " \
"(Q to quit): " % M_ind_cpt
preference_input = input(input_msg)
if preference_input == 'Q':
break
# left index is preferred over right index as a convention.
elif preference_input == 'p':
new_pair = np.array([M_ind_current, M_ind_proposal])
elif preference_input == 's':
new_pair = np.array([M_ind_proposal, M_ind_current])
else:
break
M = np.vstack((M, new_pair))
M_ind_cpt += 1
iteration += 1
else:
break
pd.set_option('display.max_columns', 0)
optimal_values = df.loc['preference'].values
suggestion = df.loc['suggestion'].values
f_posterior = f_prior
return optimal_values, suggestion, X, M, f_posterior
def function_optimization(self, f, bounds, max_iter=1,
method="L-BFGS-B", n_init=100, n_solve=1,
f_prior=None):
"""Bayesian optimization via function evaluation.
Parameters
----------
f: function object
A function to be optimized.
bounds: dictionary
Bounds of the search space for the acquisition function.
max_iter: integer, optional
Maximum number of iterations to be performed
for the bayesian optimization.
method: str or callable, optional
Type of solver.
n_init: integer, optional
Number of initialization points for the solver. Obtained
by randomly sampling the acquisition function.
n_solve: integer, optional
The solver will be run n_solve times.
Cannot be superior to n_init.
f_prior : array-like, shape = (n_samples, 1), optional (default: None)
Flat prior with mean zero is applied by default.
Returns
-------
optimal_values : array-like, shape = (n_features, )
X : array-like, shape = (n_samples, n_features)
Feature values in training data.
M : array-like, shape = (n_samples - 1, 2)
Target choices. A preference is an array of positive
integers of shape = (2,). preference[0], r, is an index
of X preferred over preference[1], c, which is an
index of X.
f_posterior : array-like, shape = (n_samples, 1)
Posterior distribution of the Gaussian Process.
Examples
--------
>>> from GPro.kernels import Matern
>>> from GPro.posterior import Laplace
>>> from GPro.acquisitions import UCB
>>> from GPro.optimization import ProbitBayesianOptimization
>>> from scipy.stats import multivariate_normal
>>> import numpy as np
>>> from sklearn import datasets
>>> import matplotlib.cm as cm
>>> import matplotlib.pyplot as plt
>>> # function optimization example.
>>> def random_sample(n, d, bounds, random_state=None):
>>> # Uniform sampling given bounds.
>>> if random_state is None:
>>> random_state = np.random.randint(1e6)
>>> random_state = np.random.RandomState(random_state)
>>> sample = random_state.uniform(bounds[:, 0], bounds[:, 1],
... size=(n, d))
>>> return sample
>>> def sample_normal_params(n, d, bounds, scale_sigma=1, random_state=None):
>>> # Sample parameters of a multivariate normal distribution
>>> # sample centroids.
>>> mu = random_sample(n=n, d=d, bounds=np.array(list(bounds.values())),
... random_state=random_state)
>>> # sample covariance matrices.
>>> sigma = datasets.make_spd_matrix(d, random_state) * scale_sigma
>>> theta = {'mu': mu, 'sigma': sigma}
>>> return theta
>>> d = 2
>>> bounds = {'x' + str(i): (0, 10) for i in range(0, d)}
>>> theta = sample_normal_params(n=1, d=d, bounds=bounds, scale_sigma=10, random_state=12)
>>> f = lambda x: multivariate_normal.pdf(x, mean=theta['mu'][0], cov=theta['sigma'])
>>> # X, M, init
>>> X = random_sample(n=2, d=d, bounds=np.array(list(bounds.values())))
>>> X = np.asarray(X, dtype='float64')
>>> M = sorted(range(len(f(X))), key=lambda k: f(X)[k], reverse=True)
>>> M = np.asarray([M], dtype='int8')
>>> GP_params = {'kernel': Matern(length_scale=1, nu=2.5),
... 'post_approx': Laplace(s_eval=1e-5, max_iter=1000,
... eta=0.01, tol=1e-3),
... 'acquisition': UCB(kappa=2.576),
... 'alpha': 1e-5,
... 'random_state': 2020}
>>> gpr_opt = ProbitBayesianOptimization(X, M, GP_params)
>>> function_opt = gpr_opt.function_optimization(f=f, bounds=bounds, max_iter=d*10,
... n_init=1000, n_solve=1)
>>> optimal_values, X_post, M_post, f_post = function_opt
>>> print('optimal values: ', optimal_values)
>>> # rmse
>>> print('rmse: ', .5 * sum(np.sqrt((optimal_values - theta['mu'][0]) ** 2)))
>>> # 2d plot
>>> if d == 2:
>>> resolution = 10
>>> x_min, x_max = bounds['x0'][0], bounds['x0'][1]
>>> y_min, y_max = bounds['x1'][0], bounds['x1'][1]
>>> x = np.linspace(x_min, x_max, resolution)
>>> y = np.linspace(y_min, y_max, resolution)
>>> X, Y = np.meshgrid(x, y)
>>> grid = np.empty((resolution ** 2, 2))
>>> grid[:, 0] = X.flat
>>> grid[:, 1] = Y.flat
>>> Z = f(grid)
>>> plt.imshow(Z.reshape(-1, resolution), interpolation="bicubic",
... origin="lower", cmap=cm.rainbow, extent=[x_min, x_max, y_min, y_max])
>>> plt.scatter(optimal_values[0], optimal_values[1], color='black', s=10)
>>> plt.title('Target function')
>>> plt.colorbar()
>>> plt.show()
"""
X, M = check_x_m(self.X, self.M)
new_pair = M[M.shape[0] - 1]
for M_ind_cpt in range((M.shape[0] - 1), max_iter + (M.shape[0] - 1)):
self.fit(X, M, f_prior)
x_optim = self.bayesopt(bounds, method, n_init, n_solve)
f_optim = self.predict(x_optim)
f_prior =
|
np.concatenate((self.posterior, f_optim))
|
numpy.concatenate
|
"""
Module defining halo model components for halo exclusion.
"""
import numpy as np
from hmf import Component
from cached_property import cached_property
from scipy import integrate as intg
import warnings
from hmf._internals import pluggable
try:
from numba import jit
USE_NUMBA = True
except ImportError: # pragma: no cover
USE_NUMBA = False
warnings.warn(
"Warning: Some Halo-Exclusion models have significant speedup when using Numba"
)
# ===============================================================================
# UTILITIES
# ===============================================================================
def outer(a, b):
r"""
Calculate the outer product of two vectors.
"""
return np.outer(a, b).reshape(a.shape + b.shape)
def dbltrapz(X, dx, dy=None):
"""
Double-integral over the last two dimensions of X using trapezoidal rule.
"""
dy = dy or dx
out = X.copy()
out[..., 1:-1, :] *= 2
out[..., :, 1:-1] *= 2
return dx * dy * np.sum(out, axis=(-2, -1)) / 4.0
def makeW(nx, ny):
r"""
Return a window matrix for double-intergral.
"""
W = np.ones((nx, ny))
W[1 : nx - 1 : 2, :] *= 4
W[:, 1 : ny - 1 : 2] *= 4
W[2 : nx - 1 : 2, :] *= 2
W[:, 2 : ny - 1 : 2] *= 2
return W
if USE_NUMBA:
@jit(nopython=True)
def dblsimps_(X, dx, dy): # pragma: no cover
"""
Double-integral of X **FOR SYMMETRIC FUNCTIONS**.
"""
nx = X.shape[-2]
ny = X.shape[-1]
W = makeW_(nx, ny) # only upper
tot = np.zeros_like(X[..., 0, 0])
for ix in range(nx):
tot += W[ix, ix] * X[..., ix, ix]
for iy in range(ix + 1, ny):
tot += 2 * W[ix, iy] * X[..., ix, iy]
return dx * dy * tot / 9.0
@jit(nopython=True)
def makeW_(nx, ny): # pragma: no cover
r"""
Return a window matrix for symmetric double-intergral.
"""
W = np.ones((nx, ny))
if nx % 2 == 0:
for ix in range(1, nx - 2, 2):
W[ix, -1] *= 4
W[-1, ix] *= 4
for iy in range(ny - 1):
W[ix, iy] *= 4
W[iy, ix] *= 4
for ix in range(2, nx - 2, 2):
W[ix, -1] *= 2
W[-1, ix] *= 2
for iy in range(ny - 1):
W[ix, iy] *= 2
W[iy, ix] *= 2
for ix in range(nx):
W[ix, -2] *= 2.5
W[ix, -1] *= 1.5
W[-2, ix] *= 2.5
W[-1, ix] *= 1.5
else:
for ix in range(1, nx - 1, 2):
for iy in range(ny):
W[ix, iy] *= 4
W[iy, ix] *= 4
for ix in range(2, nx - 1, 2):
for iy in range(ny):
W[ix, iy] *= 2
W[iy, ix] *= 2
return W
@jit(nopython=True)
def makeH_(nx, ny): # pragma: no cover
"""Return the window matrix for trapezoidal intergral."""
H = np.ones((nx, ny))
for ix in range(1, nx - 1):
for iy in range(ny):
H[ix, iy] *= 2
H[iy, ix] *= 2
return H
@jit(nopython=True)
def dbltrapz_(X, dx, dy): # pragma: no cover
"""Double-integral of X for the trapezoidal method."""
nx = X.shape[-2]
ny = X.shape[-1]
H = makeH_(nx, ny)
tot = np.zeros_like(X[..., 0, 0])
for ix in range(nx):
tot += H[ix, ix] * X[ix, ix]
for iy in range(ix + 1, ny):
tot += 2 * H[ix, iy] * X[ix, iy]
return dx * dy * tot / 4.0
# ===============================================================================
# Halo-Exclusion Models
# ===============================================================================
@pluggable
class Exclusion(Component):
"""
Base class for exclusion models.
All models will need to perform single or double integrals over
arrays that may have an extra two dimensions. The maximum possible
size is k*r*m*m, which for normal values of the vectors equates to
~ 1000*50*500*500 = 12,500,000,000 values, which in 64-bit reals is
1e11 bytes = 100GB. We thus limit this to a maximum of either k*r*m
or r*m*m, both of which should be less than a GB of memory.
It is possibly better to limit it to k*r or m*m, which should be quite
memory efficient, but then without accelerators (ie. Numba), these
will be very slow.
"""
def __init__(self, m, density, Ifunc, bias, r, delta_halo, mean_density):
self.density = density # 1d, (m)
self.m = m # 1d, (m)
self.Ifunc = Ifunc # 2d, (k,m)
self.bias = bias # 1d (m) or 2d (r,m)
self.r = r # 1d (r)
self.mean_density = mean_density
self.delta_halo = delta_halo
self.dlnx = np.log(m[1] / m[0])
def raw_integrand(self) -> np.ndarray:
"""
Return either a 2d (k,m) or 3d (r,k,m) array with the general integrand.
"""
if self.bias.ndim == 1:
return self.Ifunc * self.bias * self.m # *m since integrating in logspace
else:
return np.einsum("ij,kj->kij", self.Ifunc * self.m, self.bias)
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
This should pass back whatever is multiplied by P_m(k) to get the two-halo
term. Often this will be a square of an integral, sometimes a Double-integral.
"""
pass
class NoExclusion(Exclusion):
r"""A model where there's no halo exclusion."""
def integrate(self):
"""Integrate the :meth:`raw_integrand` over mass."""
return intg.simps(self.raw_integrand(), dx=self.dlnx) ** 2
class Sphere(Exclusion):
r"""Spherical halo exclusion model.
Only halo pairs where the virial radius of
either halo is smaller than half of the seperation, i.e.:
.. math:: R_{\rm vir} \le r/2
will be accounted for.
"""
def raw_integrand(self):
"""
Return either a 2d (k,m) or 3d (r,k,m) array with the general integrand.
"""
if self.bias.ndim == 1:
# *m since integrating in logspace
return outer(np.ones_like(self.r), self.Ifunc * self.bias * self.m)
else:
return np.einsum("ij,kj->kij", self.Ifunc * self.m, self.bias)
@cached_property
def density_mod(self):
"""The modified density, under new limits."""
density = np.outer(np.ones_like(self.r), self.density * self.m)
density[self.mask] = 0
return intg.simps(density, dx=self.dlnx, even="first")
@cached_property
def mask(self):
"""Elements that should be set to zero."""
return (np.outer(self.m, np.ones_like(self.r)) > self.mlim).T
@property
def mlim(self):
"""The mass threshold for the mask."""
return 4 * np.pi * (self.r / 2) ** 3 * self.mean_density * self.delta_halo / 3
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
"""
integ = self.raw_integrand() # r,k,m
integ.transpose((1, 0, 2))[:, self.mask] = 0
return intg.simps(integ, dx=self.dlnx, even="first") ** 2
class DblSphere(Sphere):
r"""Double Sphere model of halo exclusion.
Only halo pairs for which the sum of virial radii
is smaller than the separation, i.e.:
.. math:: R_{\rm vir,1}+R_{\rm vir,2} \le r
will be accounted for.
"""
@property
def r_halo(self):
"""The virial radius of the halo"""
return (3 * self.m / (4 * np.pi * self.delta_halo * self.mean_density)) ** (
1.0 / 3.0
)
@cached_property
def mask(self):
"""Elements that should be set to zero (r,m,m)."""
rvir = self.r_halo
return (outer(np.add.outer(rvir, rvir), np.ones_like(self.r)) > self.r).T
@cached_property
def density_mod(self):
"""The modified density, under new limits."""
out = np.zeros_like(self.r)
for i, r in enumerate(self.r):
integrand = np.outer(self.density * self.m, np.ones_like(self.density))
integrand[self.mask[i]] = 0
out[i] = intg.simps(
intg.simps(integrand, dx=self.dlnx, even="first"),
dx=self.dlnx,
even="first",
)
return np.sqrt(out)
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
"""
integ = self.raw_integrand() # (r,k,m)
return integrate_dblsphere(integ, self.mask, self.dlnx)
def integrate_dblsphere(integ, mask, dx):
"""
Integration function for double sphere model.
"""
out = np.zeros_like(integ[:, :, 0])
integrand = np.zeros_like(mask, dtype=float)
for ik in range(integ.shape[1]):
for ir in range(mask.shape[0]):
integrand[ir] = np.outer(integ[ir, ik, :], integ[ir, ik, :])
integrand[mask] = 0
out[:, ik] = intg.simps(
intg.simps(integrand, dx=dx, even="first"), dx=dx, even="first"
)
return out
if USE_NUMBA:
@jit(nopython=True)
def integrate_dblsphere_(integ, mask, dx): # pragma: no cover
r"""
The same as :func:`integrate_dblsphere`, but uses NUMBA to speed up.
"""
nr = integ.shape[0]
nk = integ.shape[1]
nm = mask.shape[1]
out = np.zeros((nr, nk))
integrand = np.zeros((nm, nm))
for ir in range(nr):
for ik in range(nk):
for im in range(nm):
for jm in range(im, nm):
if mask[ir, im, jm]:
integrand[im, jm] = 0
else:
integrand[im, jm] = integ[ir, ik, im] * integ[ir, ik, jm]
out[ir, ik] = dblsimps_(integrand, dx, dx)
return out
class DblSphere_(DblSphere): # pragma: no cover
r"""
The same as :class:`DblSphere`. But uses NUMBA to speed up the integration.
"""
def integrate(self):
"""Integrate the :meth:`raw_integrand` over mass."""
integ = self.raw_integrand() # (r,k,m)
return integrate_dblsphere_(integ, self.mask, self.dlnx)
class DblEllipsoid(DblSphere):
r"""
Double Ellipsoid model of halo exclusion.
Assuming a lognormal distribution
of ellipticities for halos, the probability of halo pairs **not** excluded
is:
.. math:: P(y) = 3 y^2 - 2 y^3 ,\; y = (x-0.8)/0.29,\; x = r/(R_{\rm vir,1}+R_{\rm vir,2})
taken from [1]_.
References
----------
.. [1] <NAME> al., " On the Mass-to-Light Ratio of Large-Scale Structure",
https://ui.adsabs.harvard.edu/abs/2005ApJ...631...41T.
"""
@cached_property
def mask(self):
"Unecessary for this approach."
return None
@cached_property
def prob(self):
"""
The probablity distribution used in calculating double integral.
"""
rvir = self.r_halo
x = outer(self.r, 1 / np.add.outer(rvir, rvir))
x = (x - 0.8) / 0.29 # this is y but we re-use the memory
np.clip(x, 0, 1, x)
return 3 * x ** 2 - 2 * x ** 3
@cached_property
def density_mod(self):
"""The modified density, under new limits."""
integrand = self.prob * outer(
np.ones_like(self.r),
|
np.outer(self.density * self.m, self.density * self.m)
|
numpy.outer
|
# -*- coding: utf-8 -*-
""" Imaginary part of Phase Locking Value
Imaginary Phase Locking Value (*IPLV*) was proposed to resolve PLV's sensitivity to
volume conduction and common reference effects.
IPLV is computed similarly as PLV, but taking the imaginary part of the summation:
.. math::
ImPLV = \\frac{1}{N} \\left | Im \\left ( \\sum_{t=1}^{N} e^{i (\phi_{j1}(t) - \phi_{j2}(t))} \\right ) \\right |
|
-----
.. [Sadaghiani2012] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Alpha-band phase synchrony is related to activity in the fronto-parietal adaptive control network. The Journal of Neuroscience, 32(41), 14305-14310.
"""
# Author: <NAME> <<EMAIL>>
from .estimator import Estimator
from ..analytic_signal import analytic_signal
import numpy as np
def iplv_fast(data, pairs=None):
""" Imaginary part of Phase Locking Value
"""
_, n_samples = np.shape(data)
_, u_phases = analytic_signal(data)
Q = np.exp(1j * u_phases)
Q =
|
np.matrix(Q)
|
numpy.matrix
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.