code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import numpy as np
import sklearn.metrics as skm
import fairlearn.metrics as metrics
# ======================================================
a = "a"
b = "b"
c = "c"
Y_true = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_pred = [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_true_ternary = [a, b, c, c, c, b, b, b, c, c, a, a, a, a, a, b, c, c]
Y_pred_ternary = [b, c, c, c, b, b, b, b, b, c, a, a, c, a, a, b, c, c]
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# =======================================================
# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests
supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
(skm.confusion_matrix, metrics.group_confusion_matrix),
(skm.zero_one_loss, metrics.group_zero_one_loss)]
# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
(skm.recall_score, metrics.group_recall_score),
(skm.roc_auc_score, metrics.group_roc_auc_score),
(skm.mean_squared_error, metrics.group_mean_squared_error),
(skm.r2_score, metrics.group_r2_score)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted
metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
(skm.mean_absolute_error, metrics.group_mean_absolute_error),
(skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
(skm.median_absolute_error, metrics.group_median_absolute_error)]
supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary
# =======================================================
@pytest.mark.parametrize("func_tuple", supported_metrics_unweighted)
def test_metric_unweighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups)
# We don't really care about the numbers (sklearn is responsible)
# We just want to make sure we got a result
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted_binary)
def test_metric_weighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted)
def test_metric_weighted_ternary(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true_ternary, Y_pred_ternary, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true_ternary, Y_pred_ternary, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
# ======================================================================================
def test_group_accuracy_score_unnormalized():
result = metrics.group_accuracy_score(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.accuracy_score(Y_true, Y_pred, False)
assert result.overall == expected_overall
# ======================================================================================
def test_group_confusion_matrix_labels():
labels = [0, 4]
result = metrics.group_confusion_matrix(Y_true, Y_pred, groups, labels=labels)
expected_overall = skm.confusion_matrix(Y_true, Y_pred, labels=labels)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_precision_score_ternary():
result = metrics.group_precision_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.precision_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_precision_score_pos_label():
result = metrics.group_precision_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.precision_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_recall_score_ternary():
result = metrics.group_recall_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.recall_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_recall_score_pos_label():
result = metrics.group_recall_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.recall_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_roc_auc_score_average():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, average='samples')
expected_overall = skm.roc_auc_score(Y_true, Y_pred, average='samples')
assert expected_overall == result.overall
def test_group_roc_auc_score_max_fpr():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, max_fpr=0.5)
expected_overall = skm.roc_auc_score(Y_true, Y_pred, max_fpr=0.5)
assert expected_overall == result.overall
# ======================================================================================
def test_group_zero_one_loss_unnormalized():
result = metrics.group_zero_one_loss(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.zero_one_loss(Y_true, Y_pred, False)
assert result.overall == expected_overall
# =============================================================================================
def test_group_mean_squared_error_multioutput_single_ndarray():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
# =============================================================================================
def test_group_r2_score_multioutput():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_r2_score(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.r2_score(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
mask = np.asarray(groups) == target_group
expected = skm.r2_score(y_t[mask], y_p[mask], multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
# =============================================================================================
def test_group_mean_squared_error_multioutput_list_ndarray():
y_t = [np.random.rand(2) for x in groups]
y_p = [np.random.rand(2) for x in groups]
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
y_true = []
y_pred = []
for i in range(len(groups)):
if groups[i] == target_group:
y_true.append(y_t[i])
y_pred.append(y_p[i])
expected = skm.mean_squared_error(y_true, y_pred, multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
|
[
"numpy.random.rand",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.r2_score",
"sklearn.metrics.zero_one_loss",
"fairlearn.metrics.group_accuracy_score",
"numpy.asarray",
"fairlearn.metrics.group_roc_auc_score",
"fairlearn.metrics.group_precision_score",
"fairlearn.metrics.group_mean_squared_error",
"sklearn.metrics.confusion_matrix",
"fairlearn.metrics.group_confusion_matrix",
"fairlearn.metrics.group_zero_one_loss",
"sklearn.metrics.mean_squared_error",
"fairlearn.metrics.group_r2_score",
"sklearn.metrics.accuracy_score",
"numpy.unique",
"pytest.mark.parametrize",
"numpy.array_equal",
"fairlearn.metrics.group_recall_score"
] |
[((2279, 2346), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func_tuple"""', 'supported_metrics_unweighted'], {}), "('func_tuple', supported_metrics_unweighted)\n", (2302, 2346), False, 'import pytest\n'), ((2896, 2968), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func_tuple"""', 'supported_metrics_weighted_binary'], {}), "('func_tuple', supported_metrics_weighted_binary)\n", (2919, 2968), False, 'import pytest\n'), ((3442, 3507), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func_tuple"""', 'supported_metrics_weighted'], {}), "('func_tuple', supported_metrics_weighted)\n", (3465, 3507), False, 'import pytest\n'), ((4169, 4238), 'fairlearn.metrics.group_accuracy_score', 'metrics.group_accuracy_score', (['Y_true', 'Y_pred', 'groups'], {'normalize': '(False)'}), '(Y_true, Y_pred, groups, normalize=False)\n', (4197, 4238), True, 'import fairlearn.metrics as metrics\n'), ((4263, 4304), 'sklearn.metrics.accuracy_score', 'skm.accuracy_score', (['Y_true', 'Y_pred', '(False)'], {}), '(Y_true, Y_pred, False)\n', (4281, 4304), True, 'import sklearn.metrics as skm\n'), ((4520, 4589), 'fairlearn.metrics.group_confusion_matrix', 'metrics.group_confusion_matrix', (['Y_true', 'Y_pred', 'groups'], {'labels': 'labels'}), '(Y_true, Y_pred, groups, labels=labels)\n', (4550, 4589), True, 'import fairlearn.metrics as metrics\n'), ((4613, 4664), 'sklearn.metrics.confusion_matrix', 'skm.confusion_matrix', (['Y_true', 'Y_pred'], {'labels': 'labels'}), '(Y_true, Y_pred, labels=labels)\n', (4633, 4664), True, 'import sklearn.metrics as skm\n'), ((4677, 4725), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (4691, 4725), True, 'import numpy as np\n'), ((4873, 4960), 'fairlearn.metrics.group_precision_score', 'metrics.group_precision_score', (['Y_true_ternary', 'Y_pred_ternary', 'group2'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, group2,\n average=None)\n', (4902, 4960), True, 'import fairlearn.metrics as metrics\n'), ((4980, 5045), 'sklearn.metrics.precision_score', 'skm.precision_score', (['Y_true_ternary', 'Y_pred_ternary'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, average=None)\n', (4999, 5045), True, 'import sklearn.metrics as skm\n'), ((5058, 5106), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5072, 5106), True, 'import numpy as np\n'), ((5166, 5232), 'fairlearn.metrics.group_precision_score', 'metrics.group_precision_score', (['Y_true', 'Y_pred', 'groups'], {'pos_label': '(0)'}), '(Y_true, Y_pred, groups, pos_label=0)\n', (5195, 5232), True, 'import fairlearn.metrics as metrics\n'), ((5256, 5304), 'sklearn.metrics.precision_score', 'skm.precision_score', (['Y_true', 'Y_pred'], {'pos_label': '(0)'}), '(Y_true, Y_pred, pos_label=0)\n', (5275, 5304), True, 'import sklearn.metrics as skm\n'), ((5317, 5365), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5331, 5365), True, 'import numpy as np\n'), ((5510, 5595), 'fairlearn.metrics.group_recall_score', 'metrics.group_recall_score', (['Y_true_ternary', 'Y_pred_ternary', 'group2'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, group2, average=None\n )\n', (5536, 5595), True, 'import fairlearn.metrics as metrics\n'), ((5614, 5676), 'sklearn.metrics.recall_score', 'skm.recall_score', (['Y_true_ternary', 'Y_pred_ternary'], {'average': 'None'}), '(Y_true_ternary, Y_pred_ternary, average=None)\n', (5630, 5676), True, 'import sklearn.metrics as skm\n'), ((5689, 5737), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5703, 5737), True, 'import numpy as np\n'), ((5794, 5857), 'fairlearn.metrics.group_recall_score', 'metrics.group_recall_score', (['Y_true', 'Y_pred', 'groups'], {'pos_label': '(0)'}), '(Y_true, Y_pred, groups, pos_label=0)\n', (5820, 5857), True, 'import fairlearn.metrics as metrics\n'), ((5881, 5926), 'sklearn.metrics.recall_score', 'skm.recall_score', (['Y_true', 'Y_pred'], {'pos_label': '(0)'}), '(Y_true, Y_pred, pos_label=0)\n', (5897, 5926), True, 'import sklearn.metrics as skm\n'), ((5939, 5987), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (5953, 5987), True, 'import numpy as np\n'), ((6133, 6203), 'fairlearn.metrics.group_roc_auc_score', 'metrics.group_roc_auc_score', (['Y_true', 'Y_pred', 'groups'], {'average': '"""samples"""'}), "(Y_true, Y_pred, groups, average='samples')\n", (6160, 6203), True, 'import fairlearn.metrics as metrics\n'), ((6227, 6279), 'sklearn.metrics.roc_auc_score', 'skm.roc_auc_score', (['Y_true', 'Y_pred'], {'average': '"""samples"""'}), "(Y_true, Y_pred, average='samples')\n", (6244, 6279), True, 'import sklearn.metrics as skm\n'), ((6382, 6446), 'fairlearn.metrics.group_roc_auc_score', 'metrics.group_roc_auc_score', (['Y_true', 'Y_pred', 'groups'], {'max_fpr': '(0.5)'}), '(Y_true, Y_pred, groups, max_fpr=0.5)\n', (6409, 6446), True, 'import fairlearn.metrics as metrics\n'), ((6470, 6516), 'sklearn.metrics.roc_auc_score', 'skm.roc_auc_score', (['Y_true', 'Y_pred'], {'max_fpr': '(0.5)'}), '(Y_true, Y_pred, max_fpr=0.5)\n', (6487, 6516), True, 'import sklearn.metrics as skm\n'), ((6714, 6782), 'fairlearn.metrics.group_zero_one_loss', 'metrics.group_zero_one_loss', (['Y_true', 'Y_pred', 'groups'], {'normalize': '(False)'}), '(Y_true, Y_pred, groups, normalize=False)\n', (6741, 6782), True, 'import fairlearn.metrics as metrics\n'), ((6807, 6847), 'sklearn.metrics.zero_one_loss', 'skm.zero_one_loss', (['Y_true', 'Y_pred', '(False)'], {}), '(Y_true, Y_pred, False)\n', (6824, 6847), True, 'import sklearn.metrics as skm\n'), ((7153, 7229), 'fairlearn.metrics.group_mean_squared_error', 'metrics.group_mean_squared_error', (['y_t', 'y_p', 'groups'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, groups, multioutput='raw_values')\n", (7185, 7229), True, 'import fairlearn.metrics as metrics\n'), ((7254, 7312), 'sklearn.metrics.mean_squared_error', 'skm.mean_squared_error', (['y_t', 'y_p'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, multioutput='raw_values')\n", (7276, 7312), True, 'import sklearn.metrics as skm\n'), ((7325, 7373), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (7339, 7373), True, 'import numpy as np\n'), ((7607, 7673), 'fairlearn.metrics.group_r2_score', 'metrics.group_r2_score', (['y_t', 'y_p', 'groups'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, groups, multioutput='raw_values')\n", (7629, 7673), True, 'import fairlearn.metrics as metrics\n'), ((7698, 7746), 'sklearn.metrics.r2_score', 'skm.r2_score', (['y_t', 'y_p'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, multioutput='raw_values')\n", (7710, 7746), True, 'import sklearn.metrics as skm\n'), ((7759, 7807), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (7773, 7807), True, 'import numpy as np\n'), ((7832, 7849), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (7841, 7849), True, 'import numpy as np\n'), ((8318, 8394), 'fairlearn.metrics.group_mean_squared_error', 'metrics.group_mean_squared_error', (['y_t', 'y_p', 'groups'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, groups, multioutput='raw_values')\n", (8350, 8394), True, 'import fairlearn.metrics as metrics\n'), ((8419, 8477), 'sklearn.metrics.mean_squared_error', 'skm.mean_squared_error', (['y_t', 'y_p'], {'multioutput': '"""raw_values"""'}), "(y_t, y_p, multioutput='raw_values')\n", (8441, 8477), True, 'import sklearn.metrics as skm\n'), ((8490, 8538), 'numpy.array_equal', 'np.array_equal', (['result.overall', 'expected_overall'], {}), '(result.overall, expected_overall)\n', (8504, 8538), True, 'import numpy as np\n'), ((8564, 8581), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (8573, 8581), True, 'import numpy as np\n'), ((2784, 2832), 'numpy.array_equal', 'np.array_equal', (['expected_overall', 'result.overall'], {}), '(expected_overall, result.overall)\n', (2798, 2832), True, 'import numpy as np\n'), ((3330, 3378), 'numpy.array_equal', 'np.array_equal', (['expected_overall', 'result.overall'], {}), '(expected_overall, result.overall)\n', (3344, 3378), True, 'import numpy as np\n'), ((3909, 3957), 'numpy.array_equal', 'np.array_equal', (['expected_overall', 'result.overall'], {}), '(expected_overall, result.overall)\n', (3923, 3957), True, 'import numpy as np\n'), ((7920, 7980), 'sklearn.metrics.r2_score', 'skm.r2_score', (['y_t[mask]', 'y_p[mask]'], {'multioutput': '"""raw_values"""'}), "(y_t[mask], y_p[mask], multioutput='raw_values')\n", (7932, 7980), True, 'import sklearn.metrics as skm\n'), ((7996, 8051), 'numpy.array_equal', 'np.array_equal', (['result.by_group[target_group]', 'expected'], {}), '(result.by_group[target_group], expected)\n', (8010, 8051), True, 'import numpy as np\n'), ((8224, 8241), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8238, 8241), True, 'import numpy as np\n'), ((8270, 8287), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (8284, 8287), True, 'import numpy as np\n'), ((8797, 8861), 'sklearn.metrics.mean_squared_error', 'skm.mean_squared_error', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""'}), "(y_true, y_pred, multioutput='raw_values')\n", (8819, 8861), True, 'import sklearn.metrics as skm\n'), ((8877, 8932), 'numpy.array_equal', 'np.array_equal', (['result.by_group[target_group]', 'expected'], {}), '(result.by_group[target_group], expected)\n', (8891, 8932), True, 'import numpy as np\n'), ((7866, 7884), 'numpy.asarray', 'np.asarray', (['groups'], {}), '(groups)\n', (7876, 7884), True, 'import numpy as np\n')]
|
"""
Adapted from OpenAI Baselines.
"""
import numpy as np
import tensorflow as tf # pylint: ignore-module
import random
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Saving variables and setting up experiment directories
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
def load(fname):
import cloudpickle
with open(fname, 'rb') as f:
return cloudpickle.load(f)
def save(fname, obj):
import cloudpickle
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fh:
cloudpickle.dump(obj, fh)
class Experiment(object):
def __init__(self, logdir):
self.logdir = logdir
os.makedirs(os.path.join(logdir, 'checkpoints'), exist_ok=True)
def load(self, timestep=None):
if timestep is None:
# get latest ckpt
import glob
fs = glob.glob(os.path.join(self.logdir, 'checkpoints/*'))
timesteps = []
for f in fs:
try: timesteps.append(int(os.path.basename(f)))
except: pass
if len(timesteps) == 0:
return 0
timestep = max(timesteps)
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
load_state(fname)
return timestep
def save(self, timestep):
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
save_state(fname)
def load_model_fn(self):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
assert os.path.exists(fname), "No model file saved."
return load(fname)
def save_model_fn(self, model_fn):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
save(fname, model_fn)
# ================================================================
# Model components
# ================================================================
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
x = output.reshape(nsteps, nbatch, *obs_shape), where output is the output of this function.
"""
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=0, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=0), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
# for inpt in inputs:
# if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
# assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
if value is not None:
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
def reset():
global ALREADY_INITIALIZED
ALREADY_INITIALIZED = set()
tf.reset_default_graph()
"""
Random Seeds
"""
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
|
[
"numpy.prod",
"tensorflow.tanh",
"tensorflow.split",
"tensorflow.get_default_session",
"multiprocessing.cpu_count",
"tensorflow.gradients",
"tensorflow.group",
"tensorflow.cast",
"tensorflow.variables_initializer",
"tensorflow.set_random_seed",
"tensorflow.clip_by_global_norm",
"cloudpickle.load",
"os.path.exists",
"tensorflow.Session",
"tensorflow.placeholder",
"functools.wraps",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"numpy.random.seed",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.zeros_like",
"tensorflow.ConfigProto",
"tensorflow.stack",
"numpy.random.normal",
"tensorflow.InteractiveSession",
"tensorflow.variable_scope",
"tensorflow.global_variables",
"numpy.square",
"os.path.dirname",
"cloudpickle.dump",
"tensorflow.reshape",
"numpy.linalg.svd",
"numpy.random.randn",
"tensorflow.reset_default_graph",
"tensorflow.train.Saver",
"os.path.join",
"random.seed",
"tensorflow.constant",
"tensorflow.constant_initializer",
"os.path.basename",
"tensorflow.squeeze",
"tensorflow.abs"
] |
[((1837, 1931), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'inter_op_parallelism_threads': 'num_cpu', 'intra_op_parallelism_threads': 'num_cpu'}), '(inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n', (1851, 1931), True, 'import tensorflow as tf\n'), ((2277, 2295), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (2292, 2295), False, 'import functools\n'), ((2946, 2962), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2960, 2962), True, 'import tensorflow as tf\n'), ((3105, 3121), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3119, 3121), True, 'import tensorflow as tf\n'), ((7065, 7112), 'tensorflow.split', 'tf.split', ([], {'axis': '(1)', 'num_or_size_splits': '(2)', 'value': 's'}), '(axis=1, num_or_size_splits=2, value=s)\n', (7073, 7112), True, 'import tensorflow as tf\n'), ((7506, 7538), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[c, h]'}), '(axis=1, values=[c, h])\n', (7515, 7538), True, 'import tensorflow as tf\n'), ((11241, 11269), 'tensorflow.gradients', 'tf.gradients', (['loss', 'var_list'], {}), '(loss, var_list)\n', (11253, 11269), True, 'import tensorflow as tf\n'), ((12602, 12626), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (12624, 12626), True, 'import tensorflow as tf\n'), ((12796, 12813), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (12810, 12813), True, 'import numpy as np\n'), ((12818, 12832), 'random.seed', 'random.seed', (['i'], {}), '(i)\n', (12829, 12832), False, 'import random\n'), ((676, 702), 'tensorflow.cast', 'tf.cast', (['condition', '"""bool"""'], {}), "(condition, 'bool')\n", (683, 702), True, 'import tensorflow as tf\n'), ((2030, 2069), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (2051, 2069), True, 'import tensorflow as tf\n'), ((2095, 2123), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (2105, 2123), True, 'import tensorflow as tf\n'), ((2631, 2670), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['new_variables'], {}), '(new_variables)\n', (2655, 2670), True, 'import tensorflow as tf\n'), ((2981, 3005), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (3003, 3005), True, 'import tensorflow as tf\n'), ((3054, 3076), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (3069, 3076), False, 'import os\n'), ((3137, 3161), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (3159, 3161), True, 'import tensorflow as tf\n'), ((3259, 3278), 'cloudpickle.load', 'cloudpickle.load', (['f'], {}), '(f)\n', (3275, 3278), False, 'import cloudpickle\n'), ((3341, 3363), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (3356, 3363), False, 'import os\n'), ((3422, 3447), 'cloudpickle.dump', 'cloudpickle.dump', (['obj', 'fh'], {}), '(obj, fh)\n', (3438, 3447), False, 'import cloudpickle\n'), ((4357, 4410), 'os.path.join', 'os.path.join', (['self.logdir', '"""checkpoints/model_fn.pkl"""'], {}), "(self.logdir, 'checkpoints/model_fn.pkl')\n", (4369, 4410), False, 'import os\n'), ((4426, 4447), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (4440, 4447), False, 'import os\n'), ((4555, 4608), 'os.path.join', 'os.path.join', (['self.logdir', '"""checkpoints/model_fn.pkl"""'], {}), "(self.logdir, 'checkpoints/model_fn.pkl')\n", (4567, 4608), False, 'import os\n'), ((4997, 5028), 'tensorflow.reshape', 'tf.reshape', (['h', '[nsteps, nbatch]'], {}), '(h, [nsteps, nbatch])\n', (5007, 5028), True, 'import tensorflow as tf\n'), ((5051, 5086), 'tensorflow.reshape', 'tf.reshape', (['h', '[nsteps, nbatch, -1]'], {}), '(h, [nsteps, nbatch, -1])\n', (5061, 5086), True, 'import tensorflow as tf\n'), ((5099, 5117), 'tensorflow.squeeze', 'tf.squeeze', (['v', '[0]'], {}), '(v, [0])\n', (5109, 5117), True, 'import tensorflow as tf\n'), ((6039, 6077), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', 'flat_shape'], {}), '(0.0, 1.0, flat_shape)\n', (6055, 6077), True, 'import numpy as np\n'), ((6096, 6133), 'numpy.linalg.svd', 'np.linalg.svd', (['a'], {'full_matrices': '(False)'}), '(a, full_matrices=False)\n', (6109, 6133), True, 'import numpy as np\n'), ((6602, 6618), 'tensorflow.constant', 'tf.constant', (['out'], {}), '(out)\n', (6613, 6618), True, 'import tensorflow as tf\n'), ((6777, 6801), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (6794, 6801), True, 'import tensorflow as tf\n'), ((7273, 7320), 'tensorflow.split', 'tf.split', ([], {'axis': '(1)', 'num_or_size_splits': '(4)', 'value': 'z'}), '(axis=1, num_or_size_splits=4, value=z)\n', (7281, 7320), True, 'import tensorflow as tf\n'), ((7333, 7349), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['i'], {}), '(i)\n', (7346, 7349), True, 'import tensorflow as tf\n'), ((7362, 7378), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['f'], {}), '(f)\n', (7375, 7378), True, 'import tensorflow as tf\n'), ((7391, 7407), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['o'], {}), '(o)\n', (7404, 7407), True, 'import tensorflow as tf\n'), ((7420, 7430), 'tensorflow.tanh', 'tf.tanh', (['u'], {}), '(u)\n', (7427, 7430), True, 'import tensorflow as tf\n'), ((9861, 9879), 'tensorflow.group', 'tf.group', (['*updates'], {}), '(*updates)\n', (9869, 9879), True, 'import tensorflow as tf\n'), ((11170, 11180), 'numpy.prod', 'np.prod', (['x'], {}), '(x)\n', (11177, 11180), True, 'import numpy as np\n'), ((11319, 11369), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads'], {'clip_norm': 'clip_norm'}), '(grads, clip_norm=clip_norm)\n', (11341, 11369), True, 'import tensorflow as tf\n'), ((11785, 11820), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[total_size]'], {}), '(dtype, [total_size])\n', (11799, 11820), True, 'import tensorflow as tf\n'), ((12074, 12092), 'tensorflow.group', 'tf.group', (['*assigns'], {}), '(*assigns)\n', (12082, 12092), True, 'import tensorflow as tf\n'), ((12770, 12791), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['i'], {}), '(i)\n', (12788, 12791), True, 'import tensorflow as tf\n'), ((1355, 1364), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (1361, 1364), True, 'import tensorflow as tf\n'), ((1382, 1394), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (1391, 1394), True, 'import tensorflow as tf\n'), ((2343, 2355), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2353, 2355), True, 'import tensorflow as tf\n'), ((2553, 2574), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2572, 2574), True, 'import tensorflow as tf\n'), ((2602, 2626), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2624, 2626), True, 'import tensorflow as tf\n'), ((3556, 3591), 'os.path.join', 'os.path.join', (['logdir', '"""checkpoints"""'], {}), "(logdir, 'checkpoints')\n", (3568, 3591), False, 'import os\n'), ((5127, 5179), 'tensorflow.split', 'tf.split', ([], {'axis': '(0)', 'num_or_size_splits': 'nsteps', 'value': 'h'}), '(axis=0, num_or_size_splits=nsteps, value=h)\n', (5135, 5179), True, 'import tensorflow as tf\n'), ((5556, 5583), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'h'}), '(axis=0, values=h)\n', (5565, 5583), True, 'import tensorflow as tf\n'), ((5631, 5657), 'tensorflow.stack', 'tf.stack', ([], {'values': 'h', 'axis': '(0)'}), '(values=h, axis=0)\n', (5639, 5657), True, 'import tensorflow as tf\n'), ((7467, 7477), 'tensorflow.tanh', 'tf.tanh', (['c'], {}), '(c)\n', (7474, 7477), True, 'import tensorflow as tf\n'), ((1419, 1428), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (1425, 1428), True, 'import tensorflow as tf\n'), ((1791, 1818), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1816, 1818), False, 'import multiprocessing\n'), ((3754, 3796), 'os.path.join', 'os.path.join', (['self.logdir', '"""checkpoints/*"""'], {}), "(self.logdir, 'checkpoints/*')\n", (3766, 3796), False, 'import os\n'), ((6472, 6495), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (6487, 6495), True, 'import numpy as np\n'), ((7023, 7051), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (7046, 7051), True, 'import tensorflow as tf\n'), ((7212, 7228), 'tensorflow.matmul', 'tf.matmul', (['x', 'wx'], {}), '(x, wx)\n', (7221, 7228), True, 'import tensorflow as tf\n'), ((7231, 7247), 'tensorflow.matmul', 'tf.matmul', (['h', 'wh'], {}), '(h, wh)\n', (7240, 7247), True, 'import tensorflow as tf\n'), ((12133, 12157), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (12155, 12157), True, 'import tensorflow as tf\n'), ((12390, 12414), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (12412, 12414), True, 'import tensorflow as tf\n'), ((5943, 5962), 'numpy.prod', 'np.prod', (['shape[:-1]'], {}), '(shape[:-1])\n', (5950, 5962), True, 'import numpy as np\n'), ((10663, 10687), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (10685, 10687), True, 'import tensorflow as tf\n'), ((11983, 12027), 'tensorflow.reshape', 'tf.reshape', (['theta[start:start + size]', 'shape'], {}), '(theta[start:start + size], shape)\n', (11993, 12027), True, 'import tensorflow as tf\n'), ((6544, 6558), 'numpy.square', 'np.square', (['out'], {}), '(out)\n', (6553, 6558), True, 'import numpy as np\n'), ((11457, 11473), 'tensorflow.zeros_like', 'tf.zeros_like', (['v'], {}), '(v)\n', (11470, 11473), True, 'import tensorflow as tf\n'), ((3892, 3911), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (3908, 3911), False, 'import os\n')]
|
import numpy as np
import math
import matplotlib.pyplot as plt
U = 5 # equival a l'E
R = 2 # equival a R1
R2 = 3
P = 1.2
Vt = 0.026
Is = 0.000005
n = 200 # profunditat
Vd = np.zeros(n) # sèries
Vl = np.zeros(n)
I1 = np.zeros(n)
I1[0] = U / R # inicialització de les sèries
Vd[0] = Vt * math.log(1 + I1[0] / Is)
Vl[0] = P / I1[0]
def convVd(Vd, I, i): # convolució pel càlcul de Vd[i]
suma = 0
for k in range(1, i):
suma += k * Vd[k] * I[i - k]
return suma
def convVlI(Vl, I1, i): # convolució pel càlcul de Vl[i]
suma = 0
for k in range(i):
suma = suma + Vl[k] * I1[i - k]
return suma
for i in range(1, n): # càlcul dels coeficients
I1[i] = (1 / R + 1 / R2) * (-Vd[i - 1] - Vl[i - 1])
Vd[i] = (i * Vt * I1[i] - convVd(Vd, I1, i)) / (i * (Is + I1[0]))
Vl[i] = -convVlI(Vl, I1, i) / I1[0]
If = sum(I1)
Vdf = sum(Vd)
Vlf = sum(Vl)
print('I1: ' + str(If))
print('Vd: ' + str(Vdf))
print('Vl: ' + str(Vlf))
print('P: ' + str(Vlf * If))
Vdfinal = np.zeros(n) # per tal de veure com evoluciona la tensió del díode
for j in range(n):
Vdfinal[j] = np.sum([Vd[:(j+1)]])
print(Vdfinal)
|
[
"numpy.sum",
"numpy.zeros",
"math.log"
] |
[((179, 190), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (187, 190), True, 'import numpy as np\n'), ((206, 217), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (214, 217), True, 'import numpy as np\n'), ((223, 234), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (231, 234), True, 'import numpy as np\n'), ((1015, 1026), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1023, 1026), True, 'import numpy as np\n'), ((296, 320), 'math.log', 'math.log', (['(1 + I1[0] / Is)'], {}), '(1 + I1[0] / Is)\n', (304, 320), False, 'import math\n'), ((1118, 1138), 'numpy.sum', 'np.sum', (['[Vd[:j + 1]]'], {}), '([Vd[:j + 1]])\n', (1124, 1138), True, 'import numpy as np\n')]
|
"""
Autonomous dataset collection of data for jetson nano
<NAME> - <EMAIL>
"""
import datasets
import json
from datasets import Board, ChessPiece, PieceColor, PieceType
#from realsense_utils import RealSenseCamera
import preprocessing as pr
import cv2
import pandas as pd
import os
from os.path import isfile, join
import uuid
import numpy as np
import uuid
from PIL import Image
from PIL.ExifTags import TAGS
RUN_CALIBRATION = False # Run calibration sequence or use preexisting board four corners data from config/setup.txt
BOARD_SAVE_DEST= r"board_metadata.jpeg" # Where the debug metadata board visualization image is saved (to ensure we properly setup the metadata)
TMP_DEST = "/home/spark/cv-chess/core/vision/tmp/" # Where images are temporarily saved before being uploaded to drive in a batch
LOCAL_MD_FILENAME = "local_meta.json"
LOCAL_METADATA_JSON_PATH = TMP_DEST + LOCAL_MD_FILENAME
TL = [250, 115]
BL = [250, 687]
TR = [825, 115]
BR = [825, 687]
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def fen_to_dict(string):
name_to_num = {
'p' : 1,
'b' : 2,
'n' : 3,
'r' : 4,
'q' : 5,
'k' : 6,
}
out = {}
letters = "ABCDEFGH"
for i in range(8):
for j in range(1,9):
out[letters[i] + str(j)] = 0
string = string.split('/')
new_string = []
for s in string:
for d in s:
if d.isnumeric():
ix = s.index(d)
for i in range(int(d)-1):
s = s[0:ix] + '1' + s[ix:]
new_string.append(s)
for i in range(8, 0, -1):
for j in range(8):
if new_string[8-i][j].isnumeric():
out[letters[j] + str(i)] = 0
else:
out[letters[j] + str(i)] = name_to_num[new_string[8-i][j].lower()]
return out
def get_sorted_time_saved(images):
"""
Given a list of image filenames, return a dictionary of image filename : time written to disk pairs.
Purpose: for debugging dataset
Args:
images (list): List of image filenames
Returns:
dict: dict of image filenames
"""
image_dat = []
for image in images:
imtmp = Image.open(image)
tmp = imtmp.getexif()
image_dat.append(tmp)
dt = {}
for exifdata in image_dat:
idx = image_dat.index(exifdata)
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
# Add datetime field
if tag == "DateTime":
dt[images[idx]] = data
print(f"{tag:25}: {data}")
output = sorted(dt.items(), key=lambda eta: eta[1], reverse=False)
print(output)
dt = {}
for item in output:
dt[item[0]] = item[1]
with open(TMP_DEST + "datetimes.json", "w") as wr: # dump to json
json.dump(output, wr)
return output
def del_batch_from_text_file(file):
filenames = []
with open(file, "r") as rd:
for line in rd.readlines():
# parse each line for file to delete:
commaIndex = line.index(",")
filename = line[:commaIndex]
os.remove(TMP_DEST + filename)
if __name__ == "__main__":
# Initialize camera
realsense = RealSenseCamera()
"""
# Check if calibration sequence must be run
if RUN_CALIBRATION:
realsense.calibrate_board_pos()
if realsense.get_board_corners() is None:
print("Failed to run calibration. Exiting...")
exit()
"""
"""
board_meta = Board()
# Add pieces to metadata csv
board_meta.add_pieces({
"A1":ChessPiece(PieceType.KNIGHT, PieceColor.BLUE), "A2":ChessPiece(PieceType.PAWN, PieceColor.BLUE), "A3":ChessPiece(PieceType.PAWN, PieceColor.ORANGE)
})
board_meta.display_board(dest=BOARD_SAVE_DEST)
print(f"Verify board is correct output dest={BOARD_SAVE_DEST}.\nContine [Y] or Exit [E]?")
validate = input()
if validate.upper() == "E" or validate.upper() == "N":
print("Exiting...")
realsense.stop_pipeline()
exit()
files = []
files = [f for f in os.listdir(TMP_DEST) if isfile(os.path.join(TMP_DEST, f))]
# Check to see if there is pre-existing .csv metadata to add to
if LOCAL_MD_FILENAME in files:
try:
total_metadata = pd.read_csv(LOCAL_METADATA_JSON_PATH)
except:
total_metadata = pd.DataFrame()
else:
total_metadata = pd.DataFrame()
# Loop through input
while input() != "exit":
img = realsense.capture_rgb_image() # Capture the image
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
files = pr.board_to_64_files(img, base_directory=TMP_DEST) # Break image up into 64 files
piece_types, piece_colors = [], []
batch_id = uuid.uuid1()
for tile in sorted(files.keys()):
temp = board_meta.get_chess_piece(tile)
if temp is None:
piece_types.append(None)
piece_colors.append(None)
else:
piece_types.append(temp.piece_type.name)
piece_colors.append(temp.piece_color.name)
tmp_meta = pd.DataFrame({
"File" : [files[file] for file in files.keys()],
"Position" : [file for file in files.keys()],
"Piece Type" : piece_types,
"Piece Color" : piece_colors,
"Batch ID" : [batch_id for i in range(len(files.keys()))]
})
frames = [total_metadata, tmp_meta]
total_metadata = pd.concat(frames) # Concatenate dataframes
print(total_metadata)
total_metadata.to_csv(path_or_buf=LOCAL_METADATA_JSON_PATH)
"""
#pr.delete_board2_64_output(base_directory=TMP_DEST)
FEN = "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper()
last_input = None
df = pd.DataFrame()
while input() != "end":
resp = input("[n] for new fen, [anything key to take an image] >")
if resp == "new":
fen = input("Enter a FEN:").upper()
img = realsense.capture_rgb_image() # Capture the image
print("Captured image")
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
cv2.imwrite("original.jpg", img)
# Get dict of positions
temp_dict = fen_to_dict(FEN)
tiles = pr.board_to_64_files(img, temp_dict, base_directory=TMP_DEST) # Break image up into 64 files
data_frame = pd.DataFrame(tiles)
data_frame = data_frame.transpose()
frames = [df, data_frame]
df = pd.concat(frames) # Concatenate dataframe
csv_file = df.to_csv(TMP_DEST + 'my_csv.csv', header=False, index=False)
# Close streams and end pipeline
realsense.stop_pipeline()
|
[
"cv2.imwrite",
"cv2.warpAffine",
"PIL.Image.open",
"PIL.ExifTags.TAGS.get",
"numpy.array",
"preprocessing.board_to_64_files",
"pandas.DataFrame",
"cv2.getRotationMatrix2D",
"pandas.concat",
"json.dump",
"os.remove"
] |
[((1065, 1114), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (1088, 1114), False, 'import cv2\n'), ((1126, 1200), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (1140, 1200), False, 'import cv2\n'), ((6265, 6279), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6277, 6279), True, 'import pandas as pd\n'), ((2290, 2307), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (2300, 2307), False, 'from PIL import Image\n'), ((3113, 3134), 'json.dump', 'json.dump', (['output', 'wr'], {}), '(output, wr)\n', (3122, 3134), False, 'import json\n'), ((6657, 6689), 'cv2.imwrite', 'cv2.imwrite', (['"""original.jpg"""', 'img'], {}), "('original.jpg', img)\n", (6668, 6689), False, 'import cv2\n'), ((6784, 6845), 'preprocessing.board_to_64_files', 'pr.board_to_64_files', (['img', 'temp_dict'], {'base_directory': 'TMP_DEST'}), '(img, temp_dict, base_directory=TMP_DEST)\n', (6804, 6845), True, 'import preprocessing as pr\n'), ((6907, 6926), 'pandas.DataFrame', 'pd.DataFrame', (['tiles'], {}), '(tiles)\n', (6919, 6926), True, 'import pandas as pd\n'), ((7027, 7044), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (7036, 7044), True, 'import pandas as pd\n'), ((1019, 1047), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (1027, 1047), True, 'import numpy as np\n'), ((2547, 2571), 'PIL.ExifTags.TAGS.get', 'TAGS.get', (['tag_id', 'tag_id'], {}), '(tag_id, tag_id)\n', (2555, 2571), False, 'from PIL.ExifTags import TAGS\n'), ((3421, 3451), 'os.remove', 'os.remove', (['(TMP_DEST + filename)'], {}), '(TMP_DEST + filename)\n', (3430, 3451), False, 'import os\n')]
|
import numpy as np
import sys
sys.path.append('/homes/rlreed/workspace/unotran/src')
from coarseBounds import computeBounds, Grouping
import pickle
from makeDLPbasis import makeBasis as makeDLP
from makeKLTbasis import makeBasis as makeKLT
import sph
import sph_dgm
import pydgm
def buildGEO(ass_map):
fine_map = [1]
coarse_map = [1.26]
material_map = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]
npins = len(ass_map)
cm = [0.0]
fm = []
mm = []
for i, ass in enumerate(ass_map):
mm += material_map[ass]
cm += coarse_map
fm += fine_map
cm = np.cumsum(cm)
return npins, fm, cm, mm
def makeDGMXS(G, refXS, dgmstructure, basisType):
if 'klt' in basisType:
makeKLT(basisType, dgmstructure)
else:
makeDLP(dgmstructure)
dgmstructure.fname = '{}_{}'.format(basisType, dgmstructure.fname)
fname = '_homo.'.join(xs_name.split('.'))
refXS.write_homogenized_XS(fname)
nPin, fm, cm, mm = buildGEO(pin_map)
dgm = sph_dgm.DGMSOLVER(G, fname, fm, cm, mm, nPin, dgmstructure, solveFlag=False)
pydgm.dgmsolver.initialize_dgmsolver()
dgm.extractInfo()
pydgm.dgmsolver.finalize_dgmsolver()
pydgm.control.finalize_control()
nCellPerPin = dgm.phi.shape[2] // dgm.npin
return sph_dgm.XS(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s)
if __name__ == '__main__':
np.set_printoptions(precision=6)
G = 44
dgmstructure = computeBounds(G, 'full', 1, 0.0, 1.3, 60)
fname = dgmstructure.fname
xs_name = 'XS/{}gXS.anlxs'.format(G)
pin_map = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
data_path = 'data2'
# Get the homogenized cross sections
refXS = pickle.load(open('{}/refXS_sph_space_{}.p'.format(data_path, G), 'rb'))
for basis in ['dlp', 'klt_full', 'klt_combine', 'klt_pins_full']:
dgmstructure.fname = fname
XS = makeDGMXS(G, refXS, dgmstructure, basis)
pickle.dump(XS, open('{}/refXS_dgm_{}_{}_h{}.p'.format(data_path, dgmstructure.fname, 'fine_mu', 0), 'wb'))
|
[
"sph_dgm.DGMSOLVER",
"coarseBounds.computeBounds",
"makeDLPbasis.makeBasis",
"pydgm.dgmsolver.initialize_dgmsolver",
"pydgm.control.finalize_control",
"numpy.cumsum",
"sph_dgm.XS",
"sys.path.append",
"makeKLTbasis.makeBasis",
"pydgm.dgmsolver.finalize_dgmsolver",
"numpy.set_printoptions"
] |
[((30, 84), 'sys.path.append', 'sys.path.append', (['"""/homes/rlreed/workspace/unotran/src"""'], {}), "('/homes/rlreed/workspace/unotran/src')\n", (45, 84), False, 'import sys\n'), ((612, 625), 'numpy.cumsum', 'np.cumsum', (['cm'], {}), '(cm)\n', (621, 625), True, 'import numpy as np\n'), ((1025, 1101), 'sph_dgm.DGMSOLVER', 'sph_dgm.DGMSOLVER', (['G', 'fname', 'fm', 'cm', 'mm', 'nPin', 'dgmstructure'], {'solveFlag': '(False)'}), '(G, fname, fm, cm, mm, nPin, dgmstructure, solveFlag=False)\n', (1042, 1101), False, 'import sph_dgm\n'), ((1106, 1144), 'pydgm.dgmsolver.initialize_dgmsolver', 'pydgm.dgmsolver.initialize_dgmsolver', ([], {}), '()\n', (1142, 1144), False, 'import pydgm\n'), ((1172, 1208), 'pydgm.dgmsolver.finalize_dgmsolver', 'pydgm.dgmsolver.finalize_dgmsolver', ([], {}), '()\n', (1206, 1208), False, 'import pydgm\n'), ((1213, 1245), 'pydgm.control.finalize_control', 'pydgm.control.finalize_control', ([], {}), '()\n', (1243, 1245), False, 'import pydgm\n'), ((1306, 1375), 'sph_dgm.XS', 'sph_dgm.XS', (['G', 'nCellPerPin', 'dgm.sig_t', 'dgm.vsig_f', 'dgm.chi', 'dgm.sig_s'], {}), '(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s)\n', (1316, 1375), False, 'import sph_dgm\n'), ((1408, 1440), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(6)'}), '(precision=6)\n', (1427, 1440), True, 'import numpy as np\n'), ((1473, 1514), 'coarseBounds.computeBounds', 'computeBounds', (['G', '"""full"""', '(1)', '(0.0)', '(1.3)', '(60)'], {}), "(G, 'full', 1, 0.0, 1.3, 60)\n", (1486, 1514), False, 'from coarseBounds import computeBounds, Grouping\n'), ((742, 774), 'makeKLTbasis.makeBasis', 'makeKLT', (['basisType', 'dgmstructure'], {}), '(basisType, dgmstructure)\n', (749, 774), True, 'from makeKLTbasis import makeBasis as makeKLT\n'), ((793, 814), 'makeDLPbasis.makeBasis', 'makeDLP', (['dgmstructure'], {}), '(dgmstructure)\n', (800, 814), True, 'from makeDLPbasis import makeBasis as makeDLP\n')]
|
import numpy as np
from PIL import Image
from keras.models import load_model
img_gray = Image.open('1002.png')
number = np.array(img_gray)
print(number.shape)
print('准备的图片的shape:',number.flatten().shape)
print('原number:',number)
number = number.astype('float32')
number = number/255 #归一化
number = number.flatten()
print('处理过后的number.shape:',number.shape)
model = load_model('mnist-dnn.h5')
# model.load_weights('mnist.model.best.hdf5')
# def recognize(photo_data):
# return clf.predict(photo_data)
print(model.predict_classes(np.array([number])))
#print('测试标签为:',test_target[8000])
|
[
"numpy.array",
"PIL.Image.open",
"keras.models.load_model"
] |
[((89, 111), 'PIL.Image.open', 'Image.open', (['"""1002.png"""'], {}), "('1002.png')\n", (99, 111), False, 'from PIL import Image\n'), ((121, 139), 'numpy.array', 'np.array', (['img_gray'], {}), '(img_gray)\n', (129, 139), True, 'import numpy as np\n'), ((367, 393), 'keras.models.load_model', 'load_model', (['"""mnist-dnn.h5"""'], {}), "('mnist-dnn.h5')\n", (377, 393), False, 'from keras.models import load_model\n'), ((535, 553), 'numpy.array', 'np.array', (['[number]'], {}), '([number])\n', (543, 553), True, 'import numpy as np\n')]
|
from dataclasses import dataclass, field
from typing import Mapping, List, Any
from datetime import datetime
import logging
import pandas as pd
import glob
import numpy as np
import logging
import os
from collections import OrderedDict
import nrrd
import vtk
import vedo
from vtk.util.numpy_support import numpy_to_vtk
from iblviewer.collection import Collection
import iblviewer.objects as obj
import iblviewer.utils as utils
@dataclass
class VolumeModel:
RAW = 'raw'
SEGMENTED = 'segmented'
NORMALIZED_SUFFIX = '_norm'
DATA_TYPE = {RAW:0, SEGMENTED:1}
PREFIX = 'Volume'
__count = 0
def unique_name():
VolumeModel.__count += 1
return f'{VolumeModel.PREFIX}_{VolumeModel.__count}'
name: str = field(default_factory=unique_name)
file_path: str = None
scalars: Collection = field(default_factory=Collection)
axes: List = field(default_factory=lambda: [1, 1, 1])
data_min: float = None
data_max: float = None
data_map_step: float = 1.0
data: np.ndarray = None
data_type: str = RAW
resolution: int = 1
# Default units are microns.
units: float = 1e-06
base_color_map: Any = None
# At IBL, volume mappings are used from ibllib: ibllib.atlas.regions.mappings
mapping_name: str = None
lateralized: bool = False
# Mapping function. If None, the volume will be given as it is.
mapping: Any = None
luts: Collection = field(default_factory=Collection)
slicers: Collection = field(default_factory=Collection)
isosurfaces: Collection = field(default_factory=Collection)
interactive_subsampling: bool = True
volume_visible: bool = True
slices_visible: bool = True
transpose_shape: Any = None
dimensions: np.ndarray = np.zeros(3).astype(float)
center: np.ndarray = np.zeros(3).astype(float)
def compute_size(self):
"""
Compute volume size
"""
if self.data is None:
return
self.dimensions = np.array(self.data.shape)[:3]
if self.resolution is None:
return
self.resolution = int(self.resolution) # TODO: move this to constructor or init
self.dimensions *= self.resolution
self.center = np.ones(3) * self.resolution / 2 + self.dimensions / 2
def compute_range(self, force=False):
"""
Compute min and max range in the volume
:return: Min and max values
"""
if self.data_min is not None and self.data_max is not None and not force:
return self.data_min, self.data_max
self.data_min = np.min(self.data)
self.data_max = np.max(self.data)
#print('Volume min-max', self.data_min, self.data_max)
return self.data_min, self.data_max
def guess_volume_type(self):
"""
Infer the volume type when it was not specified by the user.
We assume here that typical values between -1 and 1 are raw volumes.
"""
if self.data_type is None:
if self.data_min is None or self.data_max is None:
self.compute_range()
if self.data_min >= -1 and self.data_max <= 1:
guess = VolumeModel.RAW
else:
guess = VolumeModel.SEGMENTED
self.data_type = guess
def is_segmented(self, auto_guess=True):
"""
Get whether current volume/image is segmented
:return: Boolean
"""
if self.data_type is None and auto_guess:
self.guess_volume_type()
return self.data_type == VolumeModel.SEGMENTED
def read_volume(self, file_path):
"""
Read local volume. Downloads the file first if it's remote.
:param file_path: Volume path
:return: 3D array
"""
if file_path.startswith('http') or file_path.startswith('ftp'):
downloaded_temp_file_path = vedo.download(file_path, verbose=False)
if file_path.endswith('nrrd'):
data, header = nrrd.read(downloaded_temp_file_path)
else:
data = vedo.loadImageData(downloaded_temp_file_path)
else:
if file_path.endswith('nrrd'):
data, header = nrrd.read(file_path, index_order='C')
else:
data = vedo.loadImageData(file_path)
return data
def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True):
"""
Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK
:param file_path: Volume file path. Could support other file types easily.
:param remap_scalars: Whether scalar values in the volume are replaced by
their row id from a mapping that stores. This is necessary in the case of segmented
volumes with regions that have a discontinuous id.
:param mapping: Pandas Series or a Dictionary
:param make_current: Set the volume data as the current one
:return: 3D array
"""
data = None
if not remap_scalars or mapping is None:
data = self.import_volume(file_path)
else:
time = datetime.now()
new_file_path = utils.change_file_name(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)
if os.path.exists(new_file_path):
data = self.import_volume(new_file_path)
else:
data = self.import_volume(file_path)
data, mapping = self.remap_slow(data, mapping, new_file_path)
logging.info('Remapped scalar values in: ' + str(utils.time_diff(time)) + 's')
'''
if volume is not None:
logging.info('Opened atlas ' + new_file_path + ' in ' + str(utils.time_diff(time)) + 's')
min_value, max_value = np.amin(data), np.amax(data)
logging.info('Min max scalar values in volume ' + str(min_value) + ' -> ' + str(max_value))
else:
logging.error('Failed to open atlas ' + new_file_path)
'''
if make_current and data is not None:
self.data = data
return data, mapping
def transpose(self, shape=None):
"""
Transpose the volume for visualization in VTK
:param shape: The new shape. If None, will default to self.transpose_shape
"""
if shape is None:
shape = self.transpose_shape
if shape is None:
return
self.data = np.transpose(self.data, shape)
def remap_slow(self, data, mapping=None, write_path=None):
"""
Reassign volume values (slow on large volumes!) so that they're continuous
:param data: Volume ndarray
:param write_path: Where the modified volume will be stored
(to spare going through this method next time)
:param mapping: Pandas Series or a Dictionary that maps raw volume scalars to new ones
:return: Modified volume data
"""
logging.info('\nBuilding appropriate volume from Allen data source...')
#volume = np.vectorize(self.f)(data)
labels = np.sort(np.unique(data))
num_labels = len(labels)
if mapping is None:
mapping = pd.Series(labels)
logging.info('Num regions labeled in volume ' + str(num_labels) + ' from ' + str(mapping.size) + ' in atlas')
logging.info('Reassigning ' + str(num_labels) + ' scalar values...')
for iter_id in range(num_labels):
label = labels[iter_id]
ids = mapping.index[mapping == label].to_list()
if len(ids) < 1:
continue
# On a large volume, this takes a long time
data[data == label] = ids[0]
if num_labels > 10000 and iter_id % 10 == 0:
logging.info(' Progress: ' + str(int(iter_id/num_labels)*100) + '%')
if write_path is not None:
logging.info('Saving volume data under ' + write_path)
nrrd.write(write_path, data, index_order='C')
return data, mapping
def build_lut(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None, make_active=True):
"""
Build a look-up table (LUT, sometimes known as transfer function) for the volume
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and transparency (RGBA) to assign to invalid (out of range or None) scalar values
:param make_active: Whether this one is made active (you still have to update the views after that)
:return: LUTModel
"""
lut_model = LUTModel()
lut_model.build(scalar_map, scalar_range, color_map, alpha_map,
zero_is_transparent, noise_amount, nan_rgba)
self.luts.store(lut_model, set_current=make_active)
return lut_model
def blend_maps(map1, map2, time, total_time):
"""
Blend color maps
"""
weight1 = max(0.0, total_time - time)
weight2 = max(0.0, time)
return map1 * weight1 + map2 * weight2
class Volume(vedo.Volume):
"""
Overwriting of vedo.Volume constructor that is ill-designed as
it transposes the given numpy array without us knowing about it,
not giving us the option to choose about that.
"""
def __init__(self,
inputobj=None,
c='RdBu_r',
alpha=(0.0, 0.0, 0.2, 0.4, 0.8, 1.0),
alphaGradient=None,
alphaUnit=1,
mode=0,
shade=False,
spacing=None,
dims=None,
origin=None,
mapper='smart'):
vtk.vtkVolume.__init__(self)
vedo.BaseGrid.__init__(self)
self.axes = [1, 1, 1]
###################
if isinstance(inputobj, str):
if "https://" in inputobj:
from vedo.io import download
inputobj = download(inputobj, verbose=False) # fpath
elif os.path.isfile(inputobj):
pass
else:
inputobj = sorted(glob.glob(inputobj))
###################
if 'gpu' in mapper:
self._mapper = vtk.vtkGPUVolumeRayCastMapper()
elif 'opengl_gpu' in mapper:
self._mapper = vtk.vtkOpenGLGPUVolumeRayCastMapper()
elif 'smart' in mapper:
self._mapper = vtk.vtkSmartVolumeMapper()
elif 'fixed' in mapper:
self._mapper = vtk.vtkFixedPointVolumeRayCastMapper()
elif isinstance(mapper, vtk.vtkMapper):
self._mapper = mapper
else:
print("Error unknown mapper type", [mapper])
raise RuntimeError()
self.SetMapper(self._mapper)
###################
inputtype = str(type(inputobj))
#colors.printc('Volume inputtype', inputtype)
if inputobj is None:
img = vtk.vtkImageData()
elif vedo.utils.isSequence(inputobj):
if isinstance(inputobj[0], str): # scan sequence of BMP files
ima = vtk.vtkImageAppend()
ima.SetAppendAxis(2)
pb = vedo.utils.ProgressBar(0, len(inputobj))
for i in pb.range():
f = inputobj[i]
picr = vtk.vtkBMPReader()
picr.SetFileName(f)
picr.Update()
mgf = vtk.vtkImageMagnitude()
mgf.SetInputData(picr.GetOutput())
mgf.Update()
ima.AddInputData(mgf.GetOutput())
pb.print('loading...')
ima.Update()
img = ima.GetOutput()
else:
if "ndarray" not in inputtype:
inputobj = np.array(inputobj)
if len(inputobj.shape)==1:
varr = vedo.numpy2vtk(inputobj, dtype=np.float)
else:
# ------------------------------ Nasty lines commented here
#if len(inputobj.shape)>2:
#inputobj = np.transpose(inputobj, axes=[2, 1, 0])
varr = vedo.numpy2vtk(inputobj.ravel(order='F'), dtype=np.float)
varr.SetName('input_scalars')
img = vtk.vtkImageData()
if dims is not None:
img.SetDimensions(dims)
else:
if len(inputobj.shape)==1:
vedo.colors.printc("Error: must set dimensions (dims keyword) in Volume.", c='r')
raise RuntimeError()
img.SetDimensions(inputobj.shape)
img.GetPointData().SetScalars(varr)
#to convert rgb to numpy
# img_scalar = data.GetPointData().GetScalars()
# dims = data.GetDimensions()
# n_comp = img_scalar.GetNumberOfComponents()
# temp = utils.vtk2numpy(img_scalar)
# numpy_data = temp.reshape(dims[1],dims[0],n_comp)
# numpy_data = numpy_data.transpose(0,1,2)
# numpy_data = np.flipud(numpy_data)
elif "ImageData" in inputtype:
img = inputobj
elif isinstance(inputobj, vedo.Volume):
img = inputobj.GetMapper().GetInput()
elif "UniformGrid" in inputtype:
img = inputobj
elif hasattr(inputobj, "GetOutput"): # passing vtk object, try extract imagdedata
if hasattr(inputobj, "Update"):
inputobj.Update()
img = inputobj.GetOutput()
elif isinstance(inputobj, str):
from vedo.io import loadImageData, download
if "https://" in inputobj:
inputobj = download(inputobj, verbose=False)
img = loadImageData(inputobj)
else:
vedo.colors.printc("Volume(): cannot understand input type:\n", inputtype, c='r')
return
if dims is not None:
img.SetDimensions(dims)
if origin is not None:
img.SetOrigin(origin) ### DIFFERENT from volume.origin()!
if spacing is not None:
img.SetSpacing(spacing)
self._data = img
self._mapper.SetInputData(img)
self.mode(mode).color(c).alpha(alpha).alphaGradient(alphaGradient)
self.GetProperty().SetShade(True)
self.GetProperty().SetInterpolationType(1)
self.GetProperty().SetScalarOpacityUnitDistance(alphaUnit)
# remember stuff:
self._mode = mode
self._color = c
self._alpha = alpha
self._alphaGrad = alphaGradient
self._alphaUnit = alphaUnit
@dataclass
class LUTModel:
"""
This class might look slightly convoluted but it's actually simple.
We use double mapping here in order to enable live/interactive visualization
of volumetric data. Instead of replacing values in a 3D volume, we only replace
the colors in the 1D LUT list.
The point is that it's too slow to update a given data, like a segmented
volume with custom values. Instead, we map such custom values to a 1D
array (our LUT) that maps colors to raw volume values.
This is much faster in terms of rendering and it enables interactive visualization.
The scalar_lut is the original LUT for the given scalars (custom values)
and the mapped_lut is the LUT assigned to the surfaces (like slices)
that have copied data from the volume. The volume is given color_map
and alpha_map through vedo methods.
You might say "ok for double mapping, it's the only way for interactive
rendering of a volume, but what about color_map and mapped_lut? Aren't
they the same?". The answer is: they're the same but VTK does not accept
a vtkLookupTable for a volume. Instead, it wants a vtkColorTransferFunction
and a vtkPiecewiseFunction for alpha. There's no way around it.
The color_map will be computed as a vtkColorTransferFunction and
the alpha_map as the vtkPiecewiseFunction.
"""
name: str = NotImplementedError
color_map_function: Any = None
scalar_map: np.ndarray = None
scalar_min: float = 0.0
scalar_max: float = 1.0
scalar_lut: vtk.vtkLookupTable = None
mapped_lut: vtk.vtkLookupTable = None
color_map: np.ndarray = None
alpha_map: np.ndarray = None
base_color_map: np.ndarray = None
def build(self, scalar_map=None, scalar_range=None, color_map=None,
alpha_map=None, zero_is_transparent=True,
noise_amount=0.0, nan_rgba=None):
"""
Build several look-up tables (LUT, sometimes known as transfer function) for the volume.
This is where double-mapping occurs for segmented volumes that have values from 0 to n where
each value defines a sub-volume or region. If we want to assign values (say from another model)
to these regions, we'd have to change the volume values and it would be too slow iterating over
each voxel in 3D. Instead we define colors that represent these values and assign them to
segmented regions in a 1D list.
:param scalar_map: A 2D list with values in first column from the volume itself and values from
the second column being your scalar values that correspond to such region
:param scalar_range: Min and max values in a list
:param color_map: Color map name to apply
:param alpha_map: Alpha map, either None or a list of values the same length as scalar_map, that
says how transparent a scalar value should be
:param zero_is_transparent: Whether zero values are made transparent, True by default
:param noise_amount: Whether a noise value is applied on the colors
:param nan_rgba: Color and alpha values to assign to invalid (out of range or None) scalar values
:return: LUTModel
"""
if color_map is None:
return
if nan_rgba is None:
nan_rgba = [0.0, 0.0, 0.0, 0.0]
if self.base_color_map is None:
self.base_color_map = color_map
colors = []
alphas = []
lut = vtk.vtkLookupTable()
scalar_lut = vtk.vtkLookupTable()
# Use the number of values in the volume
num_steps = len(self.base_color_map) if self.base_color_map is not None else len(color_map)
num_steps = 2655
s_min = 0
s_max = num_steps
if scalar_map is None:
if color_map is None and self.base_color_map is not None:
color_map = self.base_color_map
loop = range(num_steps)
noise = None
if isinstance(noise_amount, float) and noise_amount > 0:
noise = np.random.rand(num_steps) * noise_amount - noise_amount / 2
# Vedo works with nested lists:
# [region_id, [r, g, b]] for color, and [region_id, a] for alpha
if scalar_map is None:
# Standard volume that is not segmented
lut.SetRange(s_min, s_max)
lut.SetNumberOfTableValues(num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(num_steps)
for r_id in loop:
color = vedo.colors.getColor(color_map[r_id])
color = np.array(color)
if noise is not None:
color = color + noise[r_id]
color = np.maximum(color, 0.0)
color = np.minimum(color, 1.0)
colors.append([r_id, color])
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if r_id == 0 and zero_is_transparent:
alpha = 0.0
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
scalar_lut.SetTableValue(r_id, *color, alpha)
#scalar_map[r_id] = color_map[r_id]
else:
# Segmented volume
s_min, s_max = scalar_range
lut.SetRange(0, num_steps)
lut.SetNumberOfTableValues(num_steps)
color = None
for r_id in range(num_steps):
try:
value = scalar_map[r_id]
except Exception:
value = None
if value is None:# or s_min > value or s_max < value:
color = nan_rgba[:3]
alpha = nan_rgba[3]
else:
color = vedo.colorMap(value, color_map, s_min, s_max)
alpha = 1.0 if alpha_map is None else alpha_map[r_id]
if value == 0 and zero_is_transparent:
alpha = 0.0
colors.append([r_id, color])
alphas.append([r_id, alpha])
lut.SetTableValue(r_id, *color, alpha)
# Real scalar LUT, mainly as a reference for the user
# Here the colors resulting from the given scalar min to max
# are assigned to segmented values in the volume
mock_values = np.linspace(s_min, s_max, num_steps)
scalar_lut.SetRange(s_min, s_max)
scalar_lut.SetNumberOfTableValues(len(mock_values))
for r_id in range(len(mock_values)):
color = list(vedo.colorMap(mock_values[r_id], color_map, s_min, s_max))
alpha = 0.0 if mock_values[r_id] == 0 and zero_is_transparent else 1.0
scalar_lut.SetTableValue(r_id, *color, 1.0)
lut.Build()
scalar_lut.Build()
# Just to avoid confusion: the user can give a string as a color map, like 'viridis'
# but the real color map object is stored in self.color_map. The name 'viridis'
# is stored under self.color_map_function (if needed later on)
self.color_map_function = color_map
self.color_map = colors
self.alpha_map = alphas
self.scalar_map = scalar_map
self.mapped_lut = lut
self.scalar_lut = scalar_lut
def get_sorted_scalars(self):
"""
Get a numpy 2D array of key-value pairs sorted by value
:return: 2D array
"""
sorted_scalars = np.zeros((len(self.scalar_map), 2))
values = list(self.scalar_map.values())
keys = list(self.scalar_map.keys())
sorted_scalars[:, 0] = keys
sorted_scalars[:, 1] = values
sorted_mask = sorted_scalars[:, 1].argsort()
sorted_scalars = sorted_scalars[sorted_mask]
return sorted_scalars
class VolumeController():
"""
Wrapper class that handles both the volume and its slices
"""
def __init__(self, plot, model, initialize=True, clipping=True, slicer_box=True,
center_on_edges=False, alpha_unit_upper_offset=0.0, add_to_scene=True):
"""
Constructor
:param plot: Plot instance
:param model: VolumeModel instance
:param initialize: Whether the initalization
:param clipping: Whether clipping is enabled at init time
:param slicer_box: Whether the slicer box is enabled at init
:param center_on_edges: Whether the volume is offest by half a voxel or not
:param alpha_unit_upper_offset: The offset to apply to alpha unit computation.
If greater than 0, the volume will be less opaque
:param add_to_scene: Whether the volume is added to scene after init
"""
self.plot = plot
self.model = model
self.actor = None
self.picker = None
self.scalars = None
self.mask = None
self.bounding_mesh = None
self.alpha_unit_upper_offset = alpha_unit_upper_offset
self.alpha_factor = 0.001 # * self.model.resolution
self.clipping_planes = None
self.enable_volume_clipping = True
self.clipping_axes = []
self.slicers = OrderedDict()
self.slicers_selectable = False
self.scalar_bar = None
if initialize:
self.initialize(clipping, slicer_box, center_on_edges, add_to_scene)
#msg = 'Volume abs center', self.volume_center, 'position', np.array(self.volume_actor.pos())
#logging.info(msg)
def get_related_actors(self):
"""
Get all 3D actors related to this view (for registering it in the application)
:return: List of VTK objects
"""
actors = []
for slicer_id in self.slicers:
actor = self.slicers[slicer_id].actor
if actor is not None:
actors.append(actor)
for iso_id in self.model.isosurfaces:
actors.append(self.model.isosurfaces[iso_id])
actors.append(self.actor)
return actors
def initialize(self, clipping=True, slicer_box=True, center_on_edges=False, add_to_scene=True):
"""
Set the volume actor for visualization in VTK
:param clipping: Whether clipping is enabled
:param slicer_box: Whether the slicer box mode is enabled (6 clipping planes)
:param center_on_edges: Whether the volume's center is aligned to its edges
rather than the voxel center
:param add_to_scene: Whether the object is added to the scene
"""
self.build_actor(center_on_edges, add_to_scene)
self.initialize_picker()
if slicer_box:
self.initialize_slicer_box()
self.initialize_clipping_planes()
self.set_volume_clipping(clipping)
self.set_color_map()
'''
if use_mask:
self.mask = self.actor.clone()
self.mask.threshold(1, replace=1, replaceOut=0)
self.actor.mapper().SetMaskTypeToBinary()
self.actor.mapper().SetMaskInput(self.mask)
'''
def set_volume_visibility(self, on=True):
"""
Set volume visibility
:param on: Visibility boolean
"""
if self.actor is not None:
self.actor.SetVisibility(on)
def set_slices_visibility(self, on=True):
"""
Set the visibility of slices
:param on: Visibility boolean
"""
for slicer_id in self.slicers:
slicer_view = self.slicers.get(slicer_id)
slicer_view.actor.SetVisibility(on)
def get_slices_opacity(self):
"""
Get the opacity of slices (should be the same value for all slices)
A mean calculation is performed on all slices alpha, just in case
:return: Alpha value
"""
value = 0
num_values = 0
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slice_alpha = slicer.actor.GetProperty().GetOpacity()
if slice_alpha is None:
continue
value += slice_alpha
num_values += 1
if num_values == 0 or value == 0:
return None
return value / num_values
def set_slices_opacity(self, value):
"""
Set the opacity of slices
:param value: Alpha value
"""
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.actor is not None:
slicer.actor.alpha(value)
def get_opacity(self):
"""
Get the relative opacity unit
:return: Float
"""
return self.get_relative_opacity_unit()
def get_relative_opacity_unit(self):
"""
Get the alpha unit relative value
:return: Float
"""
alpha_unit = self.actor.alphaUnit()
r = self.model.resolution
# Inverse function of set_opacity_unit()
value = 1.1 - (alpha_unit / r)**0.5
return value
def set_opacity(self, value):
"""
Set the opacity of the volume like in set_opacity_unit()
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
self.set_opacity_unit(value)
def set_opacity_unit(self, value):
"""
Set the opacity of the volume by modifying its alpha unit (a VTK thing).
The alpha unit defines how much a voxel is transparent to incoming ray.
This method normalizes the range between 0.0 and 1.0 as it depends
on the resolution of the volume
:param value: Opacity value between 0.0 and 1.0
:return: Resulting alpha unit
"""
r = self.model.resolution
# 1 is chosen and not 1.0 because when value == 1.0, that would
# mean that the volume is fully opaque and this yields artifacts with VTK
alpha_unit = (1 + self.alpha_unit_upper_offset - value)**2 * r
# vedo calls it "alpha" unit, vtk "opacity" unit. same-same!
self.actor.alphaUnit(alpha_unit)
return alpha_unit
def get_spacing(self):
"""
Get the spacing/resolution of the volume
"""
res = self.model.resolution
spacing = None
if isinstance(res, int) or isinstance(res, float):
spacing = np.array([res]*3)
elif len(res) == 3:
spacing = res
else:
raise ValueError(f'Given volume resolution {self.model.resolution} is invalid')
return spacing
def build_actor(self, center_on_edges=False, add_to_scene=True): #[1, 2]
"""
Set the volume actor for visualization in VTK
:param center_on_edges: Whether alignment by one voxel is applied
:param add_to_scene: Whether the object is added to the scene
"""
spacing = self.get_spacing()
self.actor = Volume(self.model.data, spacing=spacing, mapper='smart')
self.scalars = self.actor._data.GetPointData().GetScalars()
self.actor.name = self.model.name
self.actor.shade(False)
self.actor.mode(0)
self.actor.pickable(True)
self.set_interactive_subsampling(False)
if center_on_edges:
# Moving the volume by one voxel. This is possibly due the use of custom spacing.
self.actor.pos(self.actor.pos() + spacing)
center = np.array(self.actor.pos()) + self.actor.center()
if np.linalg.norm(center - self.model.center) > 0:
#print('Adjusting volume center from', self.model.center, 'to', center)
self.model.center = center
self.set_opacity_unit(0.9)
self.actor.jittering(True)
#self.actor._mapper.AutoAdjustSampleDistancesOn()
#self.actor._mapper.SetBlendModeToAverageIntensity()
#self.actor._mapper.SetSampleDistance(100)
if add_to_scene:
self.plot.add(self.actor, render=False)
def set_position(self, position):
"""
Set the position of the volume
"""
self.actor.pos(position)
# TODO: we're entering in unstable things when we move the volume
# because there is not yet a guaranteed support for updating the slices
# with the correct position
self.reset_clipping_planes()
def mirror_volume(self, axes):
"""
Mirror the volume on given axes
:param mirror_axes: A list of axes (either 0, 1, 2 or 'x', 'y', 'z') on which
the volume will be mirrored. Optional
"""
if axes is None or self.actor is None:
return
axes_str = ['x', 'y', 'z']
for axis in axes:
if isinstance(axis, int) and 0 <= axis <= 2:
axis = axes_str[axis]
if isinstance(axis, str) and len(axis) == 1:
self.actor.mirror(axis=axis.lower())
def initialize_picker(self, opacity_iso_value=0.0001):
"""
Initialize the volume picker
:param opacity_iso_value: Threshold that defines at what accumulated
opacity the picker hits the volume. In the case of a segmented volume,
you want to keep this value very low as the default one.
"""
# As per C++ doc https://vtk.org/Wiki/VTK/Examples/Cxx/VTKConcepts/Scalars
# https://stackoverflow.com/questions/35378796/vtk-value-at-x-y-z-point
picker = vtk.vtkVolumePicker()
picker.PickCroppingPlanesOn()
picker.UseVolumeGradientOpacityOff()
picker.SetTolerance(opacity_iso_value)
# A low OpacityIsoValue is necessary in the case of segmented volumes
picker.SetVolumeOpacityIsovalue(opacity_iso_value)
picker.AddPickList(self.actor)
picker.PickFromListOn()
self.picker = picker
def initialize_slicer_box(self):
"""
Initialize 6 slicing planes as a box.
"""
for axis_id in range(6):
slicer_model = SlicerModel(axis=axis_id)
slicer_model.align_to_axis(axis_id, self.model.dimensions)
self.model.slicers.store(slicer_model)
# It's important in this case to have standalone=False
self.slicers[axis_id] = SlicerView(self.plot, self, slicer_model, standalone=False)
def update_slicer(self, slicer_id, value=None, normal=None):
"""
Update a given slicer with the given value
:param slicer_id: SlicerView id
:param value: Value or 3D point
:param normal: Normal
"""
slicer_view = self.slicers.get(slicer_id)
if slicer_view is None:
return
# This is an important part where the slicing plane is itself sliced by other planes
slicer_model = slicer_view.model
slicer_model.clipping_planes = self.get_clipping_planes(slicer_model.axis)
# Use given value (or point) and normal to guide the below code
result = slicer_model.update(value, normal)
if not result:
return
# Update slicing image
slicer_view.update()
def initialize_clipping_planes(self):
"""
Initialize X, Y and Z clipping planes with two planes per axis
for positive and negative slicing
"""
self.clipping_planes = vtk.vtkPlaneCollection()
slicer_models = self.model.slicers
for slicer_id in slicer_models:
self.clipping_planes.AddItem(vtk.vtkPlane())
self.reset_clipping_planes()
return
def get_clipping_planes(self, except_axis=None):
"""
Get the current clipping planes except the ones on the given axis
:param except_axis: Axis id to ignore. If None, all clipping planes will be returned
:return: vtkPlaneCollection
"""
if not isinstance(except_axis, int):
return self.clipping_planes
exceptions = [except_axis * 2, except_axis * 2 + 1]
planes = vtk.vtkPlaneCollection()
for plane_id in range(self.clipping_planes.GetNumberOfItems()):
if plane_id in exceptions:
continue
plane = self.clipping_planes.GetItem(plane_id)
planes.AddItem(plane)
return planes
def reset_clipping_planes(self):
"""
Reset clipping planes
"""
slicer_models = self.model.slicers
for slicer_id in slicer_models:
slicer_model = slicer_models[slicer_id]
plane_id = slicer_model.get_box_plane_id()
plane = self.clipping_planes.GetItem(plane_id)
plane.SetOrigin(slicer_model.origin + self.actor.pos())
plane.SetNormal(slicer_model.normal)
def clip_on_axis(self, position=None, axis=None, normal=None):
"""
Apply clipping on a single axis
:param position: Position
:param axis: Clipping axis, defaults to 0 (X axis)
:param thickness: Whether a thickness (so two clipping planes) are applied
"""
axis_offset = 0
# This should already be sorted in the model but in case it isn't, we double check here
if normal is not None and normal[axis] < 0:
# This means that the given axis has two
# clipping planes and we take the negative one
axis_offset += 1
#position = self.model.dimensions - position
axis_storage_id = axis * 2 + axis_offset
plane = self.clipping_planes.GetItem(axis_storage_id)
plane.SetOrigin(position)
plane.SetNormal(normal)
def set_volume_clipping(self, on=None):
"""
Set volume clipping on or off.
:param on: Whether clipping is enabled or disabled. If None, then
the state is toggled.
"""
if on is None:
self.enable_volume_clipping = not self.enable_volume_clipping
else:
self.enable_volume_clipping = on
if self.enable_volume_clipping:
self.actor.mapper().SetClippingPlanes(self.clipping_planes)
else:
self.actor.mapper().SetClippingPlanes(None)
def clip_to_bounds(self, bounds):
"""
Clip the volume and move the slicing planes according to 6 boundary points
:param bounds: Six values in a list (xmin, xmax, ymin, ymax, zmin, zmax)
"""
planes = vtk.vtkPlanes()
planes.SetBounds(bounds)
# Normals are reversed with the above code
# so we fix that here with flip_normals=True
self.set_clipping_planes(planes, flip_normals=True)
def box_widget_update(self, widget=None, event=None):
"""
Clip the volume with the current box widget
:param widget: vtkBoxCutter
:param event: vtkEvent
"""
if widget is None:
return
planes = vtk.vtkPlanes()
widget.GetPlanes(planes)
self.set_clipping_planes(planes)
def set_clipping_planes(self, planes, flip_normals=False):
"""
Clip the volume and move the slicing planes according the given planes
:param planes: vtkPlanes
"""
vtk_n = planes.GetNormals()
vtk_pts = planes.GetPoints()
num_pts = vtk_pts.GetNumberOfPoints()
for plane_id in range(num_pts):
normal = vtk_n.GetTuple(plane_id)
origin = vtk_pts.GetPoint(plane_id)
plane = self.clipping_planes.GetItem(plane_id)
current_origin = np.array(plane.GetOrigin())
# We don't need to check the normal because
# we prevent box cutter rotation in our case
if np.linalg.norm(current_origin - origin) < 0.1:
continue
plane.SetOrigin(origin)
if flip_normals:
normal = np.array(normal)*-1
plane.SetNormal(normal)
self.update_slicer(plane_id, origin, normal)
self.clipping_planes.Modified()
self.actor.GetMapper().Update()
def set_alpha_map(self, alpha_map, alpha_factor=None):
"""
Set alpha map to the volume view
:param alpha_map: 2D list of scalar values and alpha values
:param alpha_factor: Alpha factor
"""
if alpha_map is None:
if self.model.luts.current is None:
return
alpha_map = self.model.luts.current.alpha_map
if alpha_factor is None:
alpha_factor = self.alpha_factor
if len(np.array(alpha_map).shape) > 1:
volume_alpha_map = np.ones_like(alpha_map).astype(float)
volume_alpha_map[:] = alpha_map[:]
volume_alpha_map[:, 1] *= alpha_factor
self.actor.alpha(volume_alpha_map)
else:
self.actor.alpha(np.array(alpha_map) * alpha_factor)
def set_color_map(self, color_map=None, alpha_map=None):
"""
Set the color and alpha map to the view objects
:param color_map: Nested list of scalar values and rgb colors
like [[0, [0.0, 0.0, 0.0]], [8, [0.5, 0.8, 0.3]], ...]
:param alpha_map: 2D list of scalar values and alpha values
"""
lut = self.model.luts.current
if color_map is None and lut is not None:
color_map = lut.color_map
if alpha_map is None and lut is not None:
alpha_map = lut.alpha_map
if color_map is None:
return
self.actor.cmap(color_map)
self.set_alpha_map(alpha_map)
if lut is not None:
for surface in self.model.isosurfaces:
surface._mapper.SetLookupTable(lut.opaque_lut)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.apply_lut(lut.mapped_lut)
else:
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
slicer.set_color_map(color_map, alpha_map)
def disable_shading(self):
"""
Disable volume shading
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.ShadeOff()
self.actor.SetProperty(volumeProperty)
def enable_shading(self, ambient=0.6, diffuse=0.8, specular=0.9):
"""
Enable volume shading
TODO: See if this method is useful
"""
volumeProperty = self.actor.GetProperty()
volumeProperty.SetInterpolationTypeToLinear()
volumeProperty.ShadeOn()
volumeProperty.SetAmbient(ambient)
volumeProperty.SetDiffuse(diffuse)
volumeProperty.SetSpecular(specular)
volumeProperty.SetScalarOpacityUnitDistance(1)
self.actor.SetProperty(volumeProperty)
def toggle_slices_visibility(self):
"""
Toggle slices visibility
"""
self.model.slices_visible = not self.model.slices_visible
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
self.update_slicer(slicer)
if slicer.actor is not None:
slicer.actor.SetVisibility(self.model.slices_visible)
def toggle_hollow(self):
"""
Toggle hollow mode for volume rendering. This is intended
to work only on segmented (annotated) volumes.
"""
volume_property = self.actor.GetProperty()
# Shout at VTK devs: it's twisted to name properties Disable and then have DisableOff...
disabled = bool(volume_property.GetDisableGradientOpacity())
if disabled:
volume_property.DisableGradientOpacityOff()
alpha_gradient = vtk.vtkPiecewiseFunction()
alpha_gradient.AddPoint(0, 0.0)
alpha_gradient.AddPoint(1, 0.75)
alpha_gradient.AddPoint(2, 1.0)
volume_property.SetGradientOpacity(alpha_gradient)
else:
volume_property.DisableGradientOpacityOn()
return not disabled
def get_value_from_xyz(self, position, normal_step=None, avoid_values=0, cast_to_int=True, none_as_zero=False):
"""
Get a scalar value from the volume with respect to XYZ coordinates and a optionally a normal step,
that is the normal on which to probe multiplied by the distance you want to travel further into
the volume to pick a correct value. Often the "surface point" on a volume with non uniform transparency
is at the boundary between transparent (let's say a 0 value is transparent) and more opaque parts.
So you need to go further into the "cloud" so to speak, in order to find the values you want.
:param position: 3D array
:param normal_step: A vector normal multiplied by the lookup distance, in case the raw position yields
bad or unwanted results
:param avoid_values: Try and find other values than this
:param cast_to_int: Whether the value should be cast to integer
:return: Scalar value
"""
if isinstance(avoid_values, int) or isinstance(avoid_values, float):
avoid_values = [avoid_values]
# TODO: see if this is faster? To be tested
# ijk_result = [0.0, 0.0, 0.0]
# volume_actor._data.TransformPhysicalPointToContinuousIndex(xyz, ijk_result)
# volume_actor._data.GetPoint(ijk_result)
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if not valid_id or (value in avoid_values):
if normal_step is not None:
position += normal_step
pt_id = self.actor._data.FindPoint(*position)
valid_id = 0 < pt_id < self.scalars.GetNumberOfValues()
value = self.scalars.GetValue(pt_id) if valid_id else None
if cast_to_int and value is not None:
value = int(value)
if value is None and none_as_zero:
value = 0
return value
def raycast(self, origin, screen_position):
"""
Shorthand for pick() method
"""
return self.pick(origin, screen_position)
def pick(self, origin, screen_position):
"""
Find the nearest intersection – even on sliced volume – with the ray formed
by an origin and a screen-space position (given by VTK when you click on an actor)
:param origin: Origin of the vector
:param screen_position: 2D position on screen. This is given by vtk events like MouseRelease
:return: The nearest position and its related value queried in the volume image
"""
self.picker.Pick(*screen_position[:2], 0, self.plot.renderer)
position = np.array(self.picker.GetPickPosition())
ray = position - origin
distance = np.linalg.norm(ray)
normal = ray / distance
# Go half a voxel further to make sure we don't hit "void"
vol_position = position # + normal * self.model.resolution / 2
probe_position = position + normal * self.model.resolution * 10
closest_dist = distance
slice_position = None
# See if the line hits any of the slicers (that are image planes)
for slicer_id in self.slicers:
slicer = self.slicers[slicer_id]
if slicer.got_slice:
hits = slicer.actor.intersectWithLine(origin, probe_position)
if len(hits) != 1:
continue
new_dist = np.linalg.norm(position - hits[0])
if new_dist < closest_dist and new_dist < self.model.resolution * 2:
closest_dist = new_dist
slice_position = hits[0]
if slice_position is None:
position = vol_position
else:
position = slice_position
value = self.get_value_from_xyz(position, normal * self.model.resolution * 4)
return position, value
def add_probe(self, origin, destination, resolution=40, radius=10, color_map=None,
screen_space=True, min_v=None, max_v=None, add_to_scene=True):
"""
Add a series of points along a line probe
:param origin: Probe origin
:param destination: Probe destination point
:param resolution: Number of (equidistant) points that will be probed along that line
:param radius: Radius of the points
:param color_map: Scalars color map
:param screen_space: Whether the points are screen space or spheres
:param min_v: Min scalar value
:param max_v: Max scalar value
:param add_to_scene: Whether the new probe is added to scene
:return: Points
"""
if color_map is None:
color_map = self.model.luts.current.color_map
positions, values = self.probe(origin, destination, resolution)
points_obj = obj.Points(positions, values=values, radius=radius, screen_space=screen_space,
color_map=color_map, min_v=min_v, max_v=max_v)
points_obj.origin = origin
points_obj.destination = destination
# Dynamic properties assignment
points_obj.target = self.actor
points_obj.target_controller = self
if add_to_scene:
self.plot.add(points_obj)
return points_obj
def update_probe(self, origin, destination, points_obj):
"""
Update a probe with given start and end points
:param origin: Start point
:param destination: End point
:param points_obj: Points object
"""
resolution = points_obj._polydata.GetPoints().GetNumberOfPoints()
positions, values = self.probe(origin, destination, resolution)
points_obj.update_data(positions, values)
def probe(self, origin, destination, resolution=40):
"""
Probe a volume with a line
:param origin: Origin of the line probe
:param destination: Destination of the line probe
:param resolution: Number of point samples along the probe
:return: Positions and values
"""
origin = np.array(origin)
destination = np.array(destination)
distance = np.linalg.norm(destination - origin)
ray = destination - origin
ray_norm = ray / distance
step = distance / resolution
positions = [origin + ray_norm * p_id * step for p_id in range(resolution)]
values = np.array([self.get_value_from_xyz(point, none_as_zero=True) for point in positions])
return positions, values
def set_interactive_subsampling(self, on=False):
"""
Set volume subsampling on or off.
This is enabled by default in VTK and we disable it by default in IBLViewer
:param on: Whether volume subsampling in interactive mode is on or off
"""
#self.plot.window.SetDesiredUpdateRate(0)
#self.actor._mapper.SetInteractiveUpdateRate(0)
self.model.interactive_subsampling = on
self.actor._mapper.SetAutoAdjustSampleDistances(on)
if on:
self.actor._mapper.InteractiveAdjustSampleDistancesOn()
else:
self.actor._mapper.InteractiveAdjustSampleDistancesOff()
def isosurface(self, label, exceptions=[0], force_rebuild=False, set_current=True, to_int=True, split_meshes=True):
"""
Creates a surface mesh (isosurface) of a segmented/labelled volume for the given value.
Unlike general isosurfacing, this method extracts only the surface mesh of the
desired region/label/segmentation, not of all values from 0 to label.
:param label: Label (scalar) value found in the volume
:param exceptions: If the label is found in the exceptions list, isosurfacing will not occur
:param force_rebuild: Whether rebuilding is forced in case we find an existing mesh for the given label
:param set_current: Whether the label is set as the current one in the model
:param to_int: Whether the label is cast to integer
:param split_meshes: Whether we split meshes when multiple ones are found
:return: A list of all manifold meshes for the given label
"""
if label is None or label in exceptions:
return
if to_int:
label = int(label)
existing_meshes = self.model.isosurfaces.get(label)
if existing_meshes is not None and not force_rebuild:
return existing_meshes
lut = self.model.luts.current
simple_lut = vtk.vtkLookupTable()
simple_lut.SetNumberOfColors(1)
simple_lut.SetTableRange(0, 1)
simple_lut.SetScaleToLinear()
simple_lut.SetTableValue(0, 0, 0, 0, 0)
simple_lut.SetTableValue(1, *lut.mapped_lut.GetTableValue(label))
simple_lut.Build()
# Generate object boundaries from labelled volume
discrete = vtk.vtkDiscreteMarchingCubes()
discrete.SetInputData(self.actor.imagedata())
discrete.GenerateValues(1, label, label)
smoothing_iterations = 15
pass_band = 0.001
feature_angle = 120.0
smoother = vtk.vtkWindowedSincPolyDataFilter()
smoother.SetInputConnection(discrete.GetOutputPort())
smoother.SetNumberOfIterations(smoothing_iterations)
smoother.BoundarySmoothingOff()
smoother.FeatureEdgeSmoothingOff()
smoother.SetFeatureAngle(feature_angle)
smoother.SetPassBand(pass_band)
smoother.NonManifoldSmoothingOn()
smoother.NormalizeCoordinatesOn()
smoother.Update()
self.model.isosurfaces[label] = []
#splitter = vtk.vtkExtractPolyDataGeometry()
if split_meshes:
splitter = vtk.vtkPolyDataConnectivityFilter()
splitter.SetInputConnection(smoother.GetOutputPort())
splitter.SetExtractionModeToAllRegions()
splitter.ColorRegionsOn()
splitter.Update()
for region_id in range(splitter.GetNumberOfExtractedRegions()):
#splitter.AddSpecifiedRegion(region_id)
#splitter.Update()
#poly = vtk.vtkPolyData()
#poly.ShallowCopy(splitter.GetOutput())
threshold = vtk.vtkThreshold()
threshold.SetInputConnection(splitter.GetOutputPort())
threshold.ThresholdBetween(region_id, region_id)
threshold.Update()
actor = vedo.Mesh(threshold.GetOutput())
#actor._mapper.SetScalarRange(min_value, lut.scalar_max)
#actor._mapper.SetUseLookupTableScalarRange(True)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
#actor.cmap(lut.scalar_lut, np.ones(poly.GetNumberOfVerts())*label)
else:
poly = smoother.GetOutput()
actor = vedo.Mesh(poly)
actor._mapper.SetLookupTable(simple_lut)
actor._mapper.ScalarVisibilityOn()
actor.name = 'Isosurface_' + str(label)
self.model.isosurfaces[label].append(actor)
'''
pdnorm = vtk.vtkPolyDataNormals()
pdnorm.SetInputData(smoother.GetOutput())
pdnorm.ComputePointNormalsOn()
pdnorm.ComputeCellNormalsOn()
pdnorm.FlipNormalsOff()
pdnorm.ConsistencyOn()
pdnorm.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(smoother.GetOutputPort())
mapper.SetLookupTable(lut.scalar_lut)
mapper.SetScalarRange(min_value, lut.scalar_max)
'''
if set_current:
self.model.isosurfaces.set_current(label)
return self.model.isosurfaces[label]
@dataclass
class SlicerModel:
PREFIX = '[Slicer]_'
MIN_SLAB_THICKNESS = 1.0 #um
__count = 0
def unique_name():
SlicerModel.__count += 1
return f'{SlicerModel.PREFIX}_{SlicerModel.__count}'
name: str = field(default_factory=unique_name)
# 0, 1 or 2. See the normal for axis orientation
axis: int = None
value: float = 0.0
bounds: np.ndarray = None
#thickness: float = 0.0
origin: np.ndarray = np.array([0.0, 0.0, 0.0])
normal: np.ndarray = np.array([1.0, 0.0, 0.0])
clipping_planes: vtk.vtkPlaneCollection = None
def get_box_plane_id(self):
"""
Get the plane id
:return: Int
"""
if self.axis is None:
return
offset = 0 if self.normal[self.axis] < 0 else 1
return self.axis * 2 + offset
def get_axis_aligned_info(self, vtk_axis):
"""
VTK stores box clipping planes in the order:
-X to +X: 0, 1
-Y to +Y: 2, 3
-Z to +Z: 4, 5
This method retrieves what is the XYZ axis (0, 1 or 2)
and its orientation sign
:return: Int axis and float orientation
"""
orientation = -1.0 if vtk_axis % 2 == 0 else 1.0
axis = (vtk_axis - vtk_axis % 2) // 2
return axis, orientation
def align_to_axis(self, axis, dimensions=None):
"""
Set the axis of the slicer
:param axis: See parameter vtk_axis in SlicerModel.get_axis_aligned_info()
:param dimensions: Dimensions of the volume
"""
if not isinstance(axis, int):
return
normal = np.zeros(3).astype(float)
xyz_axis, orientation = self.get_axis_aligned_info(axis)
normal[xyz_axis] = orientation
self.axis = xyz_axis
if dimensions is not None and orientation < 0:
self.origin = np.zeros(3)
self.origin[xyz_axis] = dimensions[xyz_axis]
self.normal = normal
def flip_normal(self):
"""
Flip the normal of the slicer
"""
self.normal *= -1.0
self.check_normal()
if isinstance(self.axis, int):
self.axis *= -1
def check_normal(self):
"""
Check if the normal is axis-aligned.
If not, the axis is set to None.
"""
zeros = self.normal == 0
if len(self.normal[zeros]) >= 2:
self.axis = 0
def update(self, value=None, normal=None, axis=None):
"""
Update slicer
:param value: Origin of the slicing plane
:param normal: Normal of the slicing plane
:param axis: Axis, if the plane is axis-aligned
:return: True if model changed, False if it didn't
"""
if not(isinstance(value, int) or isinstance(value, float)):
if normal is None:
normal = self.normal
if normal is None:
return False
if normal[1] == 0 and normal[2] == 0:
axis = 0 #if normal[0] > 0 else 1
elif normal[0] == 0 and normal[2] == 0:
axis = 1 #if normal[1] > 0 else 1
elif normal[0] == 0 and normal[1] == 0:
axis = 2 #if normal[2] > 0 else 1
if axis is not None:
value = value[axis]
if axis is None:
axis = self.axis
if self.value == value:
return False
if axis is not None:
self.value = value
self.origin = np.array(normal) * value
else:
self.value = None
self.origin = value
self.normal = normal
self.axis = axis
return True
class SlicerView():
slices = {}
def __init__(self, plot, volume_view, slicer_model, standalone=True):
"""
Constructor
:param plot: Plot instance
:param volume_view: VolumeView instance
:param slicer_model: SlicerModel instance
:param standalone: Whether the slice is a standalone actor that
can be clicked. Set this to False if you want to use transparency,
at the expense that because of a VTK bug, you won't be able to
click on it anymore, requiring you to code another way of detecting
where the user clicked. See more in initialize_mapper()
"""
self.plot = plot
self.volume_view = volume_view
self.model = slicer_model
self.actor = None
self.filter = None
self.filter = None
self.actor = None
self.reslice = None
self.slice_type = -1
self.depth_peeling_enabled = None
self.standalone = standalone
self.got_slice = False
self.color_map = None
self.alpha_map = None
self.initialize()
def initialize(self, render=False):
"""
Initialize the slicer object
"""
if self.filter is None:
self.filter = vtk.vtkImageDataGeometryFilter()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
# Adding empty actor so that it's updated later on
self.plot.add(self.actor, render=render)
self.actor.lighting('off')
self.actor.name = self.model.name
self.initialize_mapper()
def initialize_mapper(self):
"""
Initialize the object mapper
"""
mapper = self.actor._mapper
mapper.SetScalarModeToUsePointData() #SetScalarModeToUsePointFieldData
mapper.SetColorModeToMapScalars()
mapper.ScalarVisibilityOn()
# We operate on static volumes thanks to the double LUT mapping implemented here
mapper.SetStatic(True)
# Without using scalar range, the mapping will be off
mapper.SetUseLookupTableScalarRange(True)
# We prevent this actor from being pickable as a result of the bug described below
# when we want to use transparency on the slice.
self.actor.pickable(self.standalone)
if self.standalone:
# There is a bug in VTK 9 that prevents clicking on transparent objects
# as reported on vedo's tracker https://github.com/marcomusy/vedo/issues/291
# The "Force opaque fix" below should be gone with the next VTK update hopefully.
# In the meantime, we use this.
# TODO: remove this when this bug is fixed in VTK
self.actor.ForceOpaqueOn()
else:
# We bypass the transparent selection bug when a VolumeView has multiple slicers
# like in box mode because the click detection occurs on the volume and we perform
# an additional test to see if a slicer yields a nearby result. If it does,
# the result is like clicking on the slice and we get transparency for free.
pass
# Make sure we have depth peeling activated, otherwise transparency with volumes
# will look weird and in the wrong order
self.plot.renderer.UseDepthPeelingOn()
self.plot.renderer.UseDepthPeelingForVolumesOn()
segmented = self.volume_view.model.is_segmented()
if segmented:
# This very line below will mess up the entire slice coloring if:
# - you have a segmented volume and this is set to True
# - you have a non-segmented (like raw MRI, CT) volume and this is set to False
mapper.SetInterpolateScalarsBeforeMapping(not segmented)
mapper.Update()
def set_color_map(self, color_map, alpha_map=None):
"""
Set a color map to the slice
:param color_map: Color map, can be a string, a list of colors or more.
See vedo documentation.
"""
self.color_map = color_map
if alpha_map is not None:
self.alpha_map = alpha_map
if self.got_slice and color_map is not None:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
def set_slice_type(self, slice_type):
"""
Set the slice type. 0 for axial, 1 for free slicing
:param slice_type: Int value
"""
if slice_type == 0 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.volume_view.actor.imagedata())
elif slice_type == 1 and self.slice_type != slice_type:
self.slice_type = slice_type
self.filter.SetInputData(self.reslice.GetOutput())
def slice_on_normal(self, origin, normal):
"""
Slice a volume with a plane oriented by the given normal.
This allows slicing in all directions.
:param origin: Origin of the slicing plane
:param normal: Normal of the slicing plane
:return: Mesh object with the slice as an image texture
"""
'''
mapper = vtk.vtkImageResliceMapper()
mapper.SetInputData(self.volume_view.actor._data)
mapper.SliceFacesCameraOff()
mapper.SliceAtFocalPointOff()
mapper.JumpToNearestSliceOn()
mapper.SetImageSampleFactor(2)
mapper.BorderOn()
mapper.BackgroundOff()
mapper.UpdateInformation()
mapper.GetSlicePlane().SetOrigin(*origin)
mapper.GetSlicePlane().SetNormal(*normal)
mapper.GetSlicePlane().Modified()
mapper.Modified()
mapper.Update()
self.actor = vtk.vtkImageSlice()
self.actor.SetMapper(mapper)
prop = vtk.vtkImageProperty()
if True:
prop.SetInterpolationTypeToLinear()
else:
prop.SetInterpolationTypeToNearest()
self.actor.SetProperty(prop)
return
'''
if self.reslice is None:
reslice = vtk.vtkImageReslice()
reslice.SetInputData(self.volume_view.actor._data)
#reslice.SetInputData(image)
reslice.SetOutputDimensionality(2)
reslice.SetAutoCropOutput(False)
#reslice.SetInterpolationModeToLinear()
reslice.SetInterpolationModeToNearestNeighbor()
reslice.SetSlabNumberOfSlices(1)
reslice.SetOutputSpacing(self.volume_view.get_spacing())
reslice.ReleaseDataFlagOn()
self.reslice = reslice
self.set_slice_type(1)
M, T = utils.get_transformation_matrix(origin, normal)
self.reslice.SetResliceAxes(M)
self.reslice.Update()
self.filter.Update()
if self.actor is None:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
else:
self.actor._update(self.filter.GetOutput())
self.initialize_mapper()
self.actor.SetOrientation(T.GetOrientation())
self.actor.SetPosition(origin)
self.got_slice = True
return self.actor
def x_slice(self, i):
"""
Extract the slice at index `i` of volume along x-axis.
:param i: I index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if i <= 1 or i > nx - 1:
return False
self.filter.SetExtent(i, i, 0, ny, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def y_slice(self, j):
"""
Extract the slice at index `j` of volume along y-axis.
:param j: J index
"""
self.set_slice_type(0)
#nx, ny, nz = self.volume_view.model.dimensions / resolution
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if j <= 1 or j > ny - 1:
return False
self.filter.SetExtent(0, nx, j, j, 0, nz)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def z_slice(self, k):
"""
Extract the slice at index `k` of volume along z-axis.
:param k: K index
"""
self.set_slice_type(0)
nx, ny, nz = self.volume_view.actor.GetMapper().GetInput().GetDimensions()
if k <= 1 or k > nz - 1:
return False
self.filter.SetExtent(0, nx, 0, ny, k, k)
self.filter.Update()
if self.actor is not None:
self.actor._update(self.filter.GetOutput())
else:
self.actor = vedo.Mesh(self.filter.GetOutput())
self.initialize_mapper()
self.got_slice = True
return True
def slice_on_axis(self, value=None, normal=None, axis=None, use_reslice=False):
"""
Slice on standard X, Y or Z axis
:param value: Value on the given axis
:param normal: Axis normal, can be either +1.0 or -1.0 along that axis
:param axis: Axis integer, 0 for X, 1 for Y, 2 for Z
:param use_reslice: if True, this enables vtkImageReslice which is useful when
the normal is not aligned to either X, Y or Z. If you use it on an axis-aligned
normal, some color inaccuracies will appear if you don't tweak the vtkImageResliceMapper.
This is why the default is False.
:return: Result boolean, whether slice occured or not
"""
resolution = self.volume_view.model.resolution
volume_dimensions = self.volume_view.model.dimensions
'''
if normal[axis] < 0:
if value > 0:
# Make value consistent with given normal.
value *= normal[axis]
value = volume_dimensions[axis] + value
'''
in_volume_slice = int(value) // resolution
if use_reslice:
self.slice_on_normal(normal * value, normal)
return
if axis == 0:
result = self.x_slice(in_volume_slice)
elif axis == 1:
result = self.y_slice(in_volume_slice)
elif axis == 2:
result = self.z_slice(in_volume_slice)
return result
def update(self):
"""
Update slice object according to data in the model
"""
had_slice = self.got_slice
result = True
if isinstance(self.model.axis, int) and 0 <= self.model.axis <= 2:
result = self.slice_on_axis(self.model.value, self.model.normal, self.model.axis)
else:
self.slice_on_normal(self.model.origin, self.model.normal)
if not result:
self.plot.remove(self.actor)
self.got_slice = False
return
#self.actor.pos(*(self.volume_view.actor.pos()-self.actor.pos()))
lut = self.volume_view.model.luts.current
if lut is not None:
'''
This is VTK for you...a mesh can use a vtkLookupTable for RGBA mapping
BUT volumes require vtkColorTransferFunction (RGB) and vtkPiecewiseFunction (alpha)
So we have to put a color map, alpha map and a vtkLookupTable
built from both maps in a LUTModel.
Alternatively, we could update the LUT with alpha values but it's a pain.
ctf = self.volume_view.actor.GetProperty().GetRGBTransferFunction()
lut = vedo.utils.ctf2lut(self.volume_view.actor)
otf = self.volume_view.actor.GetProperty().GetScalarOpacity
# using "ctf" would work only for colors, not for transparency!
self.apply_lut(ctf)
'''
self.apply_lut(lut.mapped_lut)
else:
if self.alpha_map is None:
self.actor.cmap(self.color_map)
else:
self.actor.cmap(self.color_map, alpha=self.alpha_map)
if self.model.clipping_planes is not None:
self.actor.mapper().SetClippingPlanes(self.model.clipping_planes)
if not had_slice:
self.plot.add(self.actor, render=True)
def apply_lut(self, lut=None):
"""
Apply a LUT to the volume
:param lut: vtkLookupTable
:param actor: The actor to receive this
"""
if self.actor is None or lut is None:
return
mapper = self.actor._mapper
mapper.SetLookupTable(lut)
|
[
"vedo.colors.getColor",
"numpy.random.rand",
"iblviewer.utils.get_transformation_matrix",
"nrrd.read",
"vtk.vtkPlane",
"iblviewer.objects.Points",
"vtk.vtkImageAppend",
"numpy.array",
"vedo.io.loadImageData",
"vtk.vtkImageReslice",
"vedo.colorMap",
"numpy.linalg.norm",
"vedo.loadImageData",
"logging.info",
"os.path.exists",
"vedo.download",
"iblviewer.utils.change_file_name",
"vtk.vtkDiscreteMarchingCubes",
"numpy.max",
"numpy.linspace",
"numpy.min",
"iblviewer.utils.time_diff",
"numpy.maximum",
"vtk.vtkPiecewiseFunction",
"vtk.vtkPlanes",
"dataclasses.field",
"glob.glob",
"collections.OrderedDict",
"vtk.vtkLookupTable",
"vtk.vtkVolumePicker",
"numpy.ones",
"vedo.colors.printc",
"vtk.vtkWindowedSincPolyDataFilter",
"vtk.vtkImageDataGeometryFilter",
"vtk.vtkGPUVolumeRayCastMapper",
"os.path.isfile",
"vtk.vtkImageMagnitude",
"vtk.vtkPlaneCollection",
"vtk.vtkImageData",
"vedo.utils.isSequence",
"vtk.vtkBMPReader",
"numpy.transpose",
"pandas.Series",
"nrrd.write",
"vtk.vtkVolume.__init__",
"numpy.ones_like",
"vtk.vtkFixedPointVolumeRayCastMapper",
"numpy.unique",
"numpy.minimum",
"vtk.vtkPolyDataConnectivityFilter",
"vedo.io.download",
"vtk.vtkThreshold",
"datetime.datetime.now",
"numpy.zeros",
"vedo.numpy2vtk",
"vedo.BaseGrid.__init__",
"vtk.vtkSmartVolumeMapper",
"vtk.vtkOpenGLGPUVolumeRayCastMapper",
"vedo.Mesh"
] |
[((753, 787), 'dataclasses.field', 'field', ([], {'default_factory': 'unique_name'}), '(default_factory=unique_name)\n', (758, 787), False, 'from dataclasses import dataclass, field\n'), ((841, 874), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (846, 874), False, 'from dataclasses import dataclass, field\n'), ((892, 933), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : [1, 1, 1])'}), '(default_factory=lambda : [1, 1, 1])\n', (897, 933), False, 'from dataclasses import dataclass, field\n'), ((1442, 1475), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (1447, 1475), False, 'from dataclasses import dataclass, field\n'), ((1502, 1535), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (1507, 1535), False, 'from dataclasses import dataclass, field\n'), ((1566, 1599), 'dataclasses.field', 'field', ([], {'default_factory': 'Collection'}), '(default_factory=Collection)\n', (1571, 1599), False, 'from dataclasses import dataclass, field\n'), ((55813, 55847), 'dataclasses.field', 'field', ([], {'default_factory': 'unique_name'}), '(default_factory=unique_name)\n', (55818, 55847), False, 'from dataclasses import dataclass, field\n'), ((56028, 56053), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (56036, 56053), True, 'import numpy as np\n'), ((56079, 56104), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (56087, 56104), True, 'import numpy as np\n'), ((2599, 2616), 'numpy.min', 'np.min', (['self.data'], {}), '(self.data)\n', (2605, 2616), True, 'import numpy as np\n'), ((2641, 2658), 'numpy.max', 'np.max', (['self.data'], {}), '(self.data)\n', (2647, 2658), True, 'import numpy as np\n'), ((6526, 6556), 'numpy.transpose', 'np.transpose', (['self.data', 'shape'], {}), '(self.data, shape)\n', (6538, 6556), True, 'import numpy as np\n'), ((7033, 7107), 'logging.info', 'logging.info', (['"""\nBuilding appropriate volume from Allen data source..."""'], {}), '("""\nBuilding appropriate volume from Allen data source...""")\n', (7045, 7107), False, 'import logging\n'), ((10414, 10442), 'vtk.vtkVolume.__init__', 'vtk.vtkVolume.__init__', (['self'], {}), '(self)\n', (10436, 10442), False, 'import vtk\n'), ((10451, 10479), 'vedo.BaseGrid.__init__', 'vedo.BaseGrid.__init__', (['self'], {}), '(self)\n', (10473, 10479), False, 'import vedo\n'), ((19031, 19051), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ([], {}), '()\n', (19049, 19051), False, 'import vtk\n'), ((19073, 19093), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ([], {}), '()\n', (19091, 19093), False, 'import vtk\n'), ((24790, 24803), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24801, 24803), False, 'from collections import OrderedDict\n'), ((33103, 33124), 'vtk.vtkVolumePicker', 'vtk.vtkVolumePicker', ([], {}), '()\n', (33122, 33124), False, 'import vtk\n'), ((35001, 35025), 'vtk.vtkPlaneCollection', 'vtk.vtkPlaneCollection', ([], {}), '()\n', (35023, 35025), False, 'import vtk\n'), ((35661, 35685), 'vtk.vtkPlaneCollection', 'vtk.vtkPlaneCollection', ([], {}), '()\n', (35683, 35685), False, 'import vtk\n'), ((38045, 38060), 'vtk.vtkPlanes', 'vtk.vtkPlanes', ([], {}), '()\n', (38058, 38060), False, 'import vtk\n'), ((38523, 38538), 'vtk.vtkPlanes', 'vtk.vtkPlanes', ([], {}), '()\n', (38536, 38538), False, 'import vtk\n'), ((46463, 46482), 'numpy.linalg.norm', 'np.linalg.norm', (['ray'], {}), '(ray)\n', (46477, 46482), True, 'import numpy as np\n'), ((48549, 48679), 'iblviewer.objects.Points', 'obj.Points', (['positions'], {'values': 'values', 'radius': 'radius', 'screen_space': 'screen_space', 'color_map': 'color_map', 'min_v': 'min_v', 'max_v': 'max_v'}), '(positions, values=values, radius=radius, screen_space=\n screen_space, color_map=color_map, min_v=min_v, max_v=max_v)\n', (48559, 48679), True, 'import iblviewer.objects as obj\n'), ((49805, 49821), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (49813, 49821), True, 'import numpy as np\n'), ((49844, 49865), 'numpy.array', 'np.array', (['destination'], {}), '(destination)\n', (49852, 49865), True, 'import numpy as np\n'), ((49885, 49921), 'numpy.linalg.norm', 'np.linalg.norm', (['(destination - origin)'], {}), '(destination - origin)\n', (49899, 49921), True, 'import numpy as np\n'), ((52223, 52243), 'vtk.vtkLookupTable', 'vtk.vtkLookupTable', ([], {}), '()\n', (52241, 52243), False, 'import vtk\n'), ((52589, 52619), 'vtk.vtkDiscreteMarchingCubes', 'vtk.vtkDiscreteMarchingCubes', ([], {}), '()\n', (52617, 52619), False, 'import vtk\n'), ((52834, 52869), 'vtk.vtkWindowedSincPolyDataFilter', 'vtk.vtkWindowedSincPolyDataFilter', ([], {}), '()\n', (52867, 52869), False, 'import vtk\n'), ((65918, 65965), 'iblviewer.utils.get_transformation_matrix', 'utils.get_transformation_matrix', (['origin', 'normal'], {}), '(origin, normal)\n', (65949, 65965), True, 'import iblviewer.utils as utils\n'), ((1768, 1779), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1776, 1779), True, 'import numpy as np\n'), ((1819, 1830), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1827, 1830), True, 'import numpy as np\n'), ((2001, 2026), 'numpy.array', 'np.array', (['self.data.shape'], {}), '(self.data.shape)\n', (2009, 2026), True, 'import numpy as np\n'), ((3901, 3940), 'vedo.download', 'vedo.download', (['file_path'], {'verbose': '(False)'}), '(file_path, verbose=False)\n', (3914, 3940), False, 'import vedo\n'), ((5186, 5200), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5198, 5200), False, 'from datetime import datetime\n'), ((5229, 5305), 'iblviewer.utils.change_file_name', 'utils.change_file_name', (['file_path', 'None', 'None', 'VolumeModel.NORMALIZED_SUFFIX'], {}), '(file_path, None, None, VolumeModel.NORMALIZED_SUFFIX)\n', (5251, 5305), True, 'import iblviewer.utils as utils\n'), ((5321, 5350), 'os.path.exists', 'os.path.exists', (['new_file_path'], {}), '(new_file_path)\n', (5335, 5350), False, 'import os\n'), ((7175, 7190), 'numpy.unique', 'np.unique', (['data'], {}), '(data)\n', (7184, 7190), True, 'import numpy as np\n'), ((7275, 7292), 'pandas.Series', 'pd.Series', (['labels'], {}), '(labels)\n', (7284, 7292), True, 'import pandas as pd\n'), ((7976, 8030), 'logging.info', 'logging.info', (["('Saving volume data under ' + write_path)"], {}), "('Saving volume data under ' + write_path)\n", (7988, 8030), False, 'import logging\n'), ((8043, 8088), 'nrrd.write', 'nrrd.write', (['write_path', 'data'], {'index_order': '"""C"""'}), "(write_path, data, index_order='C')\n", (8053, 8088), False, 'import nrrd\n'), ((10953, 10984), 'vtk.vtkGPUVolumeRayCastMapper', 'vtk.vtkGPUVolumeRayCastMapper', ([], {}), '()\n', (10982, 10984), False, 'import vtk\n'), ((11665, 11683), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (11681, 11683), False, 'import vtk\n'), ((11698, 11729), 'vedo.utils.isSequence', 'vedo.utils.isSequence', (['inputobj'], {}), '(inputobj)\n', (11719, 11729), False, 'import vedo\n'), ((21981, 22017), 'numpy.linspace', 'np.linspace', (['s_min', 's_max', 'num_steps'], {}), '(s_min, s_max, num_steps)\n', (21992, 22017), True, 'import numpy as np\n'), ((30005, 30024), 'numpy.array', 'np.array', (['([res] * 3)'], {}), '([res] * 3)\n', (30013, 30024), True, 'import numpy as np\n'), ((43270, 43296), 'vtk.vtkPiecewiseFunction', 'vtk.vtkPiecewiseFunction', ([], {}), '()\n', (43294, 43296), False, 'import vtk\n'), ((53427, 53462), 'vtk.vtkPolyDataConnectivityFilter', 'vtk.vtkPolyDataConnectivityFilter', ([], {}), '()\n', (53460, 53462), False, 'import vtk\n'), ((54730, 54745), 'vedo.Mesh', 'vedo.Mesh', (['poly'], {}), '(poly)\n', (54739, 54745), False, 'import vedo\n'), ((57437, 57448), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (57445, 57448), True, 'import numpy as np\n'), ((60515, 60547), 'vtk.vtkImageDataGeometryFilter', 'vtk.vtkImageDataGeometryFilter', ([], {}), '()\n', (60545, 60547), False, 'import vtk\n'), ((65344, 65365), 'vtk.vtkImageReslice', 'vtk.vtkImageReslice', ([], {}), '()\n', (65363, 65365), False, 'import vtk\n'), ((4015, 4051), 'nrrd.read', 'nrrd.read', (['downloaded_temp_file_path'], {}), '(downloaded_temp_file_path)\n', (4024, 4051), False, 'import nrrd\n'), ((4093, 4138), 'vedo.loadImageData', 'vedo.loadImageData', (['downloaded_temp_file_path'], {}), '(downloaded_temp_file_path)\n', (4111, 4138), False, 'import vedo\n'), ((4227, 4264), 'nrrd.read', 'nrrd.read', (['file_path'], {'index_order': '"""C"""'}), "(file_path, index_order='C')\n", (4236, 4264), False, 'import nrrd\n'), ((4306, 4335), 'vedo.loadImageData', 'vedo.loadImageData', (['file_path'], {}), '(file_path)\n', (4324, 4335), False, 'import vedo\n'), ((10690, 10723), 'vedo.io.download', 'download', (['inputobj'], {'verbose': '(False)'}), '(inputobj, verbose=False)\n', (10698, 10723), False, 'from vedo.io import loadImageData, download\n'), ((10749, 10773), 'os.path.isfile', 'os.path.isfile', (['inputobj'], {}), '(inputobj)\n', (10763, 10773), False, 'import os\n'), ((11049, 11086), 'vtk.vtkOpenGLGPUVolumeRayCastMapper', 'vtk.vtkOpenGLGPUVolumeRayCastMapper', ([], {}), '()\n', (11084, 11086), False, 'import vtk\n'), ((20122, 20159), 'vedo.colors.getColor', 'vedo.colors.getColor', (['color_map[r_id]'], {}), '(color_map[r_id])\n', (20142, 20159), False, 'import vedo\n'), ((20184, 20199), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (20192, 20199), True, 'import numpy as np\n'), ((31135, 31177), 'numpy.linalg.norm', 'np.linalg.norm', (['(center - self.model.center)'], {}), '(center - self.model.center)\n', (31149, 31177), True, 'import numpy as np\n'), ((35150, 35164), 'vtk.vtkPlane', 'vtk.vtkPlane', ([], {}), '()\n', (35162, 35164), False, 'import vtk\n'), ((39310, 39349), 'numpy.linalg.norm', 'np.linalg.norm', (['(current_origin - origin)'], {}), '(current_origin - origin)\n', (39324, 39349), True, 'import numpy as np\n'), ((47149, 47183), 'numpy.linalg.norm', 'np.linalg.norm', (['(position - hits[0])'], {}), '(position - hits[0])\n', (47163, 47183), True, 'import numpy as np\n'), ((53962, 53980), 'vtk.vtkThreshold', 'vtk.vtkThreshold', ([], {}), '()\n', (53978, 53980), False, 'import vtk\n'), ((57197, 57208), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (57205, 57208), True, 'import numpy as np\n'), ((59074, 59090), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (59082, 59090), True, 'import numpy as np\n'), ((2239, 2249), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2246, 2249), True, 'import numpy as np\n'), ((11146, 11172), 'vtk.vtkSmartVolumeMapper', 'vtk.vtkSmartVolumeMapper', ([], {}), '()\n', (11170, 11172), False, 'import vtk\n'), ((11828, 11848), 'vtk.vtkImageAppend', 'vtk.vtkImageAppend', ([], {}), '()\n', (11846, 11848), False, 'import vtk\n'), ((13049, 13067), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (13065, 13067), False, 'import vtk\n'), ((19617, 19642), 'numpy.random.rand', 'np.random.rand', (['num_steps'], {}), '(num_steps)\n', (19631, 19642), True, 'import numpy as np\n'), ((20314, 20336), 'numpy.maximum', 'np.maximum', (['color', '(0.0)'], {}), '(color, 0.0)\n', (20324, 20336), True, 'import numpy as np\n'), ((20365, 20387), 'numpy.minimum', 'np.minimum', (['color', '(1.0)'], {}), '(color, 1.0)\n', (20375, 20387), True, 'import numpy as np\n'), ((21378, 21423), 'vedo.colorMap', 'vedo.colorMap', (['value', 'color_map', 's_min', 's_max'], {}), '(value, color_map, s_min, s_max)\n', (21391, 21423), False, 'import vedo\n'), ((22206, 22263), 'vedo.colorMap', 'vedo.colorMap', (['mock_values[r_id]', 'color_map', 's_min', 's_max'], {}), '(mock_values[r_id], color_map, s_min, s_max)\n', (22219, 22263), False, 'import vedo\n'), ((39472, 39488), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (39480, 39488), True, 'import numpy as np\n'), ((40152, 40171), 'numpy.array', 'np.array', (['alpha_map'], {}), '(alpha_map)\n', (40160, 40171), True, 'import numpy as np\n'), ((40215, 40238), 'numpy.ones_like', 'np.ones_like', (['alpha_map'], {}), '(alpha_map)\n', (40227, 40238), True, 'import numpy as np\n'), ((40441, 40460), 'numpy.array', 'np.array', (['alpha_map'], {}), '(alpha_map)\n', (40449, 40460), True, 'import numpy as np\n'), ((10848, 10867), 'glob.glob', 'glob.glob', (['inputobj'], {}), '(inputobj)\n', (10857, 10867), False, 'import glob\n'), ((11232, 11270), 'vtk.vtkFixedPointVolumeRayCastMapper', 'vtk.vtkFixedPointVolumeRayCastMapper', ([], {}), '()\n', (11268, 11270), False, 'import vtk\n'), ((12048, 12066), 'vtk.vtkBMPReader', 'vtk.vtkBMPReader', ([], {}), '()\n', (12064, 12066), False, 'import vtk\n'), ((12167, 12190), 'vtk.vtkImageMagnitude', 'vtk.vtkImageMagnitude', ([], {}), '()\n', (12188, 12190), False, 'import vtk\n'), ((12540, 12558), 'numpy.array', 'np.array', (['inputobj'], {}), '(inputobj)\n', (12548, 12558), True, 'import numpy as np\n'), ((12630, 12670), 'vedo.numpy2vtk', 'vedo.numpy2vtk', (['inputobj'], {'dtype': 'np.float'}), '(inputobj, dtype=np.float)\n', (12644, 12670), False, 'import vedo\n'), ((13242, 13327), 'vedo.colors.printc', 'vedo.colors.printc', (['"""Error: must set dimensions (dims keyword) in Volume."""'], {'c': '"""r"""'}), "('Error: must set dimensions (dims keyword) in Volume.',\n c='r')\n", (13260, 13327), False, 'import vedo\n'), ((5623, 5644), 'iblviewer.utils.time_diff', 'utils.time_diff', (['time'], {}), '(time)\n', (5638, 5644), True, 'import iblviewer.utils as utils\n'), ((14629, 14652), 'vedo.io.loadImageData', 'loadImageData', (['inputobj'], {}), '(inputobj)\n', (14642, 14652), False, 'from vedo.io import loadImageData, download\n'), ((14680, 14765), 'vedo.colors.printc', 'vedo.colors.printc', (['"""Volume(): cannot understand input type:\n"""', 'inputtype'], {'c': '"""r"""'}), "('Volume(): cannot understand input type:\\n', inputtype,\n c='r')\n", (14698, 14765), False, 'import vedo\n'), ((14577, 14610), 'vedo.io.download', 'download', (['inputobj'], {'verbose': '(False)'}), '(inputobj, verbose=False)\n', (14585, 14610), False, 'from vedo.io import loadImageData, download\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 11:11:56 2020
This program is use to plot polarization map from vlbi fits image.
You should specify the input fits images by -i or --infile,
output file by -o or --output,
contour levs by -l or --levs
contour base by -c or --cmul
polarization parameters by -p or --pol: "icut pcut inc scale"
plot window by -w or --win
restore beam position by -b or --bpos
figsize by -f or --figsize
Installation:
1. copy file
chmod a+x mapplot.py
cp mapplot.py ~/myapp
2. set envioment parameters
Add the following line to ~/.bashrc
export PATH=$PATH:/home/usename/myapp
source ~/.bashrc
Running like this:
mapplot.py -w <win> -f <figsize> -n <normalize> <infile> <cmul>
mapplot.py i <input file list> -o <out.pdf> -c <cmul> -w <win> -p <pol>
Examples:
1. mapplot.py -i cta102.fits -o cta102-color.pdf -c 1.8e-3 -w '18 -8 -20 6' -f '7 6' -n 'power 0.5'
2. mapplot.py -w '18 -8 -20 6' -f '4.0 6' -n 'power 0.5' cta102.fits 1.8e-3
https://matplotlib.org/3.1.1/tutorials/colors/colormaps.html
https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.colors.Normalize.html#matplotlib.colors.Normalize
@author: <NAME>
Shanghai Astronomical Observatory, Chinese Academy of Sciences
E-mail: <EMAIL>; <EMAIL>
"""
import sys
import getopt
from astropy.io import fits
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.colors as mcolors
def add_beam(ax, win, h, bpos=None, pad=2.0):
if bpos==None :
x = win[0] - pad * h['bmaj']*3.6E6
y = win[2] + pad * h['bmaj']*3.6E6
bpos = (x, y)
bmaj = h['bmaj'] * 3.6E6
bmin = h['bmin'] * 3.6E6
bpa = 90 - h['bpa']
e = Ellipse(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')
ax.add_artist(e)
def annotate(ax, notefile=''):
if notefile != '':
tab = Table.read(notefile, format='csv')
for t in tab:
ax.text(t['x'], t['y'], t['text'])
# ax.annotate('%s' % h['object'], xy=(0.125,0.91), xycoords='figure fraction')
# ax.annotate('%.1f GHz' % (h['crval3']/1.0E9), xy=(0.83, 0.91), xycoords='figure fraction')
def cut_cmap(cmap, N_cut=0):
# cmap = mcolors.Colormap(cmap)
cmap = plt.get_cmap(cmap)
x = np.arange(N_cut, 256) / 256.0
color_index = cmap(x)
cmap = mcolors.ListedColormap(color_index)
return cmap
def get_normalize(args, vmin=0.0, vmax=1.0):
if args == '':
norm = mcolors.Normalize(vmin, vmax)
args = args.split(' ')
name = args[0]
if name == 'linear':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.Normalize(vmin, vmax, True)
elif name == 'power':
if len(args)==1:
gamma = 0.5
if len(args)==2:
gamma = float(args[1])
elif len(args)==4:
gamma, vmin, vmax = np.array(args[1:], dtype='f4')
if gamma < 1.0 and vmin < 0.0:
vmin = 0.0
norm = mcolors.PowerNorm(gamma, vmin, vmax, True)
elif name == 'log':
if len(args)==3:
vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.LogNorm(vmin, vmax)
elif name == 'symlog':
if len(args)==2:
linthresh = float(args[1])
linscale = 1.0
elif len(args)==3:
linthresh, linscale = np.array(args[1:], dtype='f4')
elif len(args)==5:
linthresh, linscale, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.SymLogNorm(linthresh, linscale, vmin, vmax)
elif name == 'twoslope':
if len(args)==2:
vcenter = float(args[1])
elif len(args)==4:
vcenter, vmin, vmax = np.array(args[1:], dtype='f4')
norm = mcolors.TwoSlopeNorm(vcenter, vmin, vmax)
return norm
def add_annotation(ax, infile=''):
if infile == '':
return
with open(infile, 'r') as f:
for line in f.readlines():
row = line.split(',')
row = [col.strip() for col in row]
typ = row[0]
args = row[1:]
if typ == 'text':
x, y, text = args
x, y = float(x), float(y)
ax.text(x, y, text)
elif typ == 'arrow':
x1, y1, x2, y2 = np.array(args, dtype='f4')
ax.annotate("", xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'annotation':
x1, y1, x2, y2 = np.array(args[:-1], dtype='f4')
text = args[-1]
ax.annotate(text, xy=(x1, y1), xytext=(x2, y2),
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
elif typ == 'ellipse':
x, y, majax, minax, pa = np.array(args, dtype='f4')
e = Ellipse((x,y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')
ax.add_artist(e)
def set_axis(ax, w):
ax.set_aspect('equal')
ax.set_xlabel('Relative R.A. (mas)')
ax.set_ylabel('Relative Dec. (mas)')
ax.set_xlim(w[0],w[1])
ax.set_ylim(w[2],w[3])
ax.tick_params(which='both', direction='in', length=6, right=True, top=True)
ax.tick_params(which='minor',length=4)
ax.minorticks_on()
def word2pix(w, h):
if w == None:
W = [0, h['naxis1'], 0, h['naxis2']]
else:
x0, x1, y0, y1 = w
X0 = h['crpix1'] + x0/(h['cdelt1']*3.6E6)
Y0 = h['crpix2'] + y0/(h['cdelt2']*3.6E6)
X1 = h['crpix1'] + x1/(h['cdelt1']*3.6E6)
Y1 = h['crpix2'] + y1/(h['cdelt2']*3.6E6)
W = [int(X0), int(X1), int(Y0), int(Y1)]
return W
def pix2word(W, h):
if W == None:
W = [0, h['naxis1'], 0, h['naxis2']]
X0, X1, Y0, Y1 = W
x0 = h['cdelt1']*3.6E6 * (X0-h['crpix1'])
y0 = h['cdelt2']*3.6E6 * (Y0-h['crpix2'])
x1 = h['cdelt1']*3.6E6 * (X1-h['crpix1'])
y1 = h['cdelt2']*3.6E6 * (Y1-h['crpix2'])
w = [x0, x1, y0, y1]
return w
def savefig(outfile, dpi=100):
if outfile.lower().endswith('.pdf') :
plt.savefig(outfile)
elif outfile.lower().endswith('.jpg') or outfile.lower().endswith('.jpeg'):
plt.savefig(outfile, dpi=dpi)
elif outfile.lower().endswith('.png'):
plt.savefig(outfile, dpi=dpi)
def mapplot(infile, cmul, outfile='', win=None, levs=None, bpos=None,
figsize=None, dpi=100, annotationfile='', cmap='', N_cut=0,
norm='', fraction=0.05):
hdul = fits.open(infile)
h = hdul[0].header
# img = hdul[0].data[0, 0, :, :]
if levs==None:
levs = cmul*np.array([-1,1,2,4,8,16,32,64,128,256,512,1024,2048,4096])
# print(win)
if figsize == None :
figsize = (6, 6)
if win == None:
win = pix2word(None, h)
W = word2pix(None, h)
else:
W = word2pix(win, h)
img = hdul[0].data[0, 0, W[2]:W[3], W[0]:W[1]]
if cmap == '':
cmap = 'rainbow'
cmap = cut_cmap(cmap, N_cut)
vmin, vmax = np.min(img), np.max(img)
if norm == '':
norm = 'linear %.3f %.3f' % (vmin, vmax)
norm = get_normalize(norm, vmin, vmax)
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
set_axis(ax, win)
add_beam(ax, win, h, bpos=bpos)
add_annotation(ax, annotationfile)
ax.contour(img, levs, extent=win,
linewidths=0.5, colors='k')
pcm = ax.imshow(img, extent=win, origin='lower',
interpolation='none', cmap=cmap, norm=norm)
cbar = fig.colorbar(pcm, ax=ax, fraction=fraction)
# cbar.ax.minorticks_off()
cbar.ax.tick_params('both',direction='in',right=True,top=True,which='both')
cbar.ax.tick_params(axis='y', labelrotation=90)
fig.tight_layout(pad=0.5)
if outfile != '':
savefig(outfile, dpi)
hdul.close()
def myhelp():
print('Help: mapplot.py -w "18 -8 -20 6" -f "7 6" -n "power 0.5" <cta102.fits> <1.8e-3>')
print(' or: mapplot.py -i cta102.fits -o cta102.png -w "18 -8 -20 6" -f "7 6" -n "power 0.5"')
def main(argv):
# infile = r'3c66a-calib/circe-beam.fits'
infile = ''
outfile = ''
annotationfile = ''
cmul = ''
win = None
levs = None
bpos = None
figsize = None
dpi = 100
colormap = ''
N_cut = 0
norm = ''
fraction = 0.05
try:
opts, args = getopt.getopt(argv, "hi:c:o:w:l:b:f:d:a:n:N:",
['help', 'infile=', 'cmul=', 'outfile=', 'win=',
'bpos=', 'figsize=', 'dpi=', 'annotatefile=', 'levs=', 'colormap=',
'N_cut=', 'norm=', 'fraction='])
except getopt.GetoptError:
myhelp()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
myhelp()
elif opt in ('-i', '--infile'):
infile = arg
elif opt in ('-c', '--cmul'):
cmul = arg
elif opt in ('-o', '--outfile'):
outfile = arg
elif opt in ('-w', '--win'):
win = arg
elif opt in ('-l', '--levs'):
levs = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-b', '--bpos'):
bpos = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-f', '--figsize'):
figsize = np.array(arg.split(), dtype=np.float64).tolist()
elif opt in ('-d', '--dpi'):
dpi = int(arg)
elif opt in ('-a', '--annotatefile'):
annotationfile = arg
elif opt in ('--colormap', ):
colormap = arg
elif opt in ('-N', '--N_cut'):
N_cut = int(arg)
elif opt in ('-n', '--norm'):
norm = arg
elif opt in ('--fraction',):
fraction = float(arg)
if infile=='' and len(args)==2:
infile, cmul = args
if infile=='' and len(args)==3:
infile, outfile, cmul = args
if infile=='' and len(args)==4:
infile, outfile, cmul, win = args
if outfile == '':
outfile = infile.split('.')[0] + '.pdf'
cmul = float(cmul)
if type(win) == str:
win = np.array(win.split(), dtype=np.float64).tolist()
mapplot(infile, cmul, outfile=outfile, win=win, levs=levs, bpos=bpos,
figsize=figsize, dpi=dpi, annotationfile=annotationfile,
cmap=colormap, N_cut=N_cut, norm=norm, fraction=fraction)
if __name__ == '__main__' :
main(sys.argv[1:])
|
[
"getopt.getopt",
"matplotlib.pyplot.savefig",
"sys.exit",
"matplotlib.colors.LogNorm",
"numpy.min",
"matplotlib.colors.ListedColormap",
"numpy.max",
"numpy.array",
"matplotlib.colors.PowerNorm",
"matplotlib.colors.SymLogNorm",
"astropy.table.Table.read",
"astropy.io.fits.open",
"matplotlib.colors.Normalize",
"matplotlib.patches.Ellipse",
"matplotlib.colors.TwoSlopeNorm",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.get_cmap"
] |
[((1726, 1788), 'matplotlib.patches.Ellipse', 'Ellipse', (['bpos', 'bmaj', 'bmin'], {'angle': 'bpa', 'ec': '"""k"""', 'facecolor': '"""gray"""'}), "(bpos, bmaj, bmin, angle=bpa, ec='k', facecolor='gray')\n", (1733, 1788), False, 'from matplotlib.patches import Ellipse\n'), ((2198, 2216), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (2210, 2216), True, 'import matplotlib.pyplot as plt\n'), ((2283, 2318), 'matplotlib.colors.ListedColormap', 'mcolors.ListedColormap', (['color_index'], {}), '(color_index)\n', (2305, 2318), True, 'import matplotlib.colors as mcolors\n'), ((5830, 5847), 'astropy.io.fits.open', 'fits.open', (['infile'], {}), '(infile)\n', (5839, 5847), False, 'from astropy.io import fits\n'), ((6405, 6419), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6417, 6419), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1901), 'astropy.table.Table.read', 'Table.read', (['notefile'], {'format': '"""csv"""'}), "(notefile, format='csv')\n", (1877, 1901), False, 'from astropy.table import Table\n'), ((2222, 2243), 'numpy.arange', 'np.arange', (['N_cut', '(256)'], {}), '(N_cut, 256)\n', (2231, 2243), True, 'import numpy as np\n'), ((2403, 2432), 'matplotlib.colors.Normalize', 'mcolors.Normalize', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (2420, 2432), True, 'import matplotlib.colors as mcolors\n'), ((2570, 2605), 'matplotlib.colors.Normalize', 'mcolors.Normalize', (['vmin', 'vmax', '(True)'], {}), '(vmin, vmax, True)\n', (2587, 2605), True, 'import matplotlib.colors as mcolors\n'), ((5455, 5475), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (5466, 5475), True, 'import matplotlib.pyplot as plt\n'), ((6270, 6281), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (6276, 6281), True, 'import numpy as np\n'), ((6283, 6294), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (6289, 6294), True, 'import numpy as np\n'), ((7457, 7661), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hi:c:o:w:l:b:f:d:a:n:N:"""', "['help', 'infile=', 'cmul=', 'outfile=', 'win=', 'bpos=', 'figsize=',\n 'dpi=', 'annotatefile=', 'levs=', 'colormap=', 'N_cut=', 'norm=',\n 'fraction=']"], {}), "(argv, 'hi:c:o:w:l:b:f:d:a:n:N:', ['help', 'infile=', 'cmul=',\n 'outfile=', 'win=', 'bpos=', 'figsize=', 'dpi=', 'annotatefile=',\n 'levs=', 'colormap=', 'N_cut=', 'norm=', 'fraction='])\n", (7470, 7661), False, 'import getopt\n'), ((2530, 2560), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (2538, 2560), True, 'import numpy as np\n'), ((2839, 2881), 'matplotlib.colors.PowerNorm', 'mcolors.PowerNorm', (['gamma', 'vmin', 'vmax', '(True)'], {}), '(gamma, vmin, vmax, True)\n', (2856, 2881), True, 'import matplotlib.colors as mcolors\n'), ((5555, 5584), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {'dpi': 'dpi'}), '(outfile, dpi=dpi)\n', (5566, 5584), True, 'import matplotlib.pyplot as plt\n'), ((5933, 6004), 'numpy.array', 'np.array', (['[-1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096]'], {}), '([-1, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096])\n', (5941, 6004), True, 'import numpy as np\n'), ((7712, 7723), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7720, 7723), False, 'import sys\n'), ((2978, 3005), 'matplotlib.colors.LogNorm', 'mcolors.LogNorm', (['vmin', 'vmax'], {}), '(vmin, vmax)\n', (2993, 3005), True, 'import matplotlib.colors as mcolors\n'), ((5627, 5656), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {'dpi': 'dpi'}), '(outfile, dpi=dpi)\n', (5638, 5656), True, 'import matplotlib.pyplot as plt\n'), ((2752, 2782), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (2760, 2782), True, 'import numpy as np\n'), ((2938, 2968), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (2946, 2968), True, 'import numpy as np\n'), ((3272, 3323), 'matplotlib.colors.SymLogNorm', 'mcolors.SymLogNorm', (['linthresh', 'linscale', 'vmin', 'vmax'], {}), '(linthresh, linscale, vmin, vmax)\n', (3290, 3323), True, 'import matplotlib.colors as mcolors\n'), ((3901, 3927), 'numpy.array', 'np.array', (['args'], {'dtype': '"""f4"""'}), "(args, dtype='f4')\n", (3909, 3927), True, 'import numpy as np\n'), ((3483, 3524), 'matplotlib.colors.TwoSlopeNorm', 'mcolors.TwoSlopeNorm', (['vcenter', 'vmin', 'vmax'], {}), '(vcenter, vmin, vmax)\n', (3503, 3524), True, 'import matplotlib.colors as mcolors\n'), ((4091, 4122), 'numpy.array', 'np.array', (['args[:-1]'], {'dtype': '"""f4"""'}), "(args[:-1], dtype='f4')\n", (4099, 4122), True, 'import numpy as np\n'), ((3143, 3173), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (3151, 3173), True, 'import numpy as np\n'), ((4313, 4339), 'numpy.array', 'np.array', (['args'], {'dtype': '"""f4"""'}), "(args, dtype='f4')\n", (4321, 4339), True, 'import numpy as np\n'), ((4348, 4422), 'matplotlib.patches.Ellipse', 'Ellipse', (['(x, y)', 'majax', 'minax'], {'angle': 'pa', 'lw': '(0.5)', 'fc': '"""none"""', 'ec': '"""k"""', 'ls': '"""-"""'}), "((x, y), majax, minax, angle=pa, lw=0.5, fc='none', ec='k', ls='-')\n", (4355, 4422), False, 'from matplotlib.patches import Ellipse\n'), ((3232, 3262), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (3240, 3262), True, 'import numpy as np\n'), ((3443, 3473), 'numpy.array', 'np.array', (['args[1:]'], {'dtype': '"""f4"""'}), "(args[1:], dtype='f4')\n", (3451, 3473), True, 'import numpy as np\n')]
|
"""Tests for quantization"""
import numpy as np
import unittest
import os
import shutil
import yaml
import tensorflow as tf
def build_fake_yaml():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
accuracy_criterion:
relative: 0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_yaml2():
fake_yaml = '''
model:
name: fake_yaml
framework: tensorflow
inputs: x
outputs: op_to_store
device: cpu
evaluation:
accuracy:
metric:
topk: 1
tuning:
strategy:
name: random
exit_policy:
max_trials: 5
accuracy_criterion:
relative: -0.01
workspace:
path: saved
'''
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml2.yaml', "w", encoding="utf-8") as f:
yaml.dump(y, f)
f.close()
def build_fake_model():
try:
graph = tf.Graph()
graph_def = tf.GraphDef()
with tf.Session() as sess:
x = tf.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filter=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.global_variables_initializer())
constant_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
except:
graph = tf.Graph()
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float64, shape=(1, 3, 3, 1), name='x')
y = tf.compat.v1.constant(np.random.random((2, 2, 1, 1)), name='y')
op = tf.nn.conv2d(input=x, filters=y, strides=[
1, 1, 1, 1], padding='VALID', name='op_to_store')
sess.run(tf.compat.v1.global_variables_initializer())
constant_graph = tf.compat.v1.graph_util.convert_variables_to_constants(sess, sess.graph_def, [
'op_to_store'])
graph_def.ParseFromString(constant_graph.SerializeToString())
with graph.as_default():
tf.import_graph_def(graph_def, name='')
return graph
class TestQuantization(unittest.TestCase):
@classmethod
def setUpClass(self):
self.constant_graph = build_fake_model()
build_fake_yaml()
build_fake_yaml2()
@classmethod
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml2.yaml')
shutil.rmtree("saved", ignore_errors=True)
def test_ru_random_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
def test_ru_random_max_trials(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml2.yaml')
dataset = quantizer.dataset('dummy', (100, 3, 3, 1), label=True)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
quantizer()
if __name__ == "__main__":
unittest.main()
|
[
"yaml.load",
"unittest.main",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"os.remove",
"tensorflow.graph_util.convert_variables_to_constants",
"tensorflow.Graph",
"tensorflow.compat.v1.placeholder",
"numpy.random.random",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.GraphDef",
"neural_compressor.experimental.common.DataLoader",
"tensorflow.compat.v1.graph_util.convert_variables_to_constants",
"tensorflow.nn.conv2d",
"tensorflow.compat.v1.GraphDef",
"yaml.dump",
"tensorflow.import_graph_def",
"tensorflow.global_variables_initializer",
"shutil.rmtree",
"neural_compressor.experimental.Quantization"
] |
[((619, 663), 'yaml.load', 'yaml.load', (['fake_yaml'], {'Loader': 'yaml.SafeLoader'}), '(fake_yaml, Loader=yaml.SafeLoader)\n', (628, 663), False, 'import yaml\n'), ((1296, 1340), 'yaml.load', 'yaml.load', (['fake_yaml'], {'Loader': 'yaml.SafeLoader'}), '(fake_yaml, Loader=yaml.SafeLoader)\n', (1305, 1340), False, 'import yaml\n'), ((4415, 4430), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4428, 4430), False, 'import unittest\n'), ((735, 750), 'yaml.dump', 'yaml.dump', (['y', 'f'], {}), '(y, f)\n', (744, 750), False, 'import yaml\n'), ((1413, 1428), 'yaml.dump', 'yaml.dump', (['y', 'f'], {}), '(y, f)\n', (1422, 1428), False, 'import yaml\n'), ((1500, 1510), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1508, 1510), True, 'import tensorflow as tf\n'), ((1532, 1545), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1543, 1545), True, 'import tensorflow as tf\n'), ((3374, 3401), 'os.remove', 'os.remove', (['"""fake_yaml.yaml"""'], {}), "('fake_yaml.yaml')\n", (3383, 3401), False, 'import os\n'), ((3411, 3439), 'os.remove', 'os.remove', (['"""fake_yaml2.yaml"""'], {}), "('fake_yaml2.yaml')\n", (3420, 3439), False, 'import os\n'), ((3451, 3493), 'shutil.rmtree', 'shutil.rmtree', (['"""saved"""'], {'ignore_errors': '(True)'}), "('saved', ignore_errors=True)\n", (3464, 3493), False, 'import shutil\n'), ((3633, 3663), 'neural_compressor.experimental.Quantization', 'Quantization', (['"""fake_yaml.yaml"""'], {}), "('fake_yaml.yaml')\n", (3645, 3663), False, 'from neural_compressor.experimental import Quantization, common\n'), ((3776, 3802), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (3793, 3802), False, 'from neural_compressor.experimental import Quantization, common\n'), ((3840, 3866), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (3857, 3866), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4075, 4106), 'neural_compressor.experimental.Quantization', 'Quantization', (['"""fake_yaml2.yaml"""'], {}), "('fake_yaml2.yaml')\n", (4087, 4106), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4219, 4245), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (4236, 4245), False, 'from neural_compressor.experimental import Quantization, common\n'), ((4283, 4309), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['dataset'], {}), '(dataset)\n', (4300, 4309), False, 'from neural_compressor.experimental import Quantization, common\n'), ((1560, 1572), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1570, 1572), True, 'import tensorflow as tf\n'), ((1599, 1655), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {'shape': '(1, 3, 3, 1)', 'name': '"""x"""'}), "(tf.float64, shape=(1, 3, 3, 1), name='x')\n", (1613, 1655), True, 'import tensorflow as tf\n'), ((1745, 1840), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filter': 'y', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""op_to_store"""'}), "(input=x, filter=y, strides=[1, 1, 1, 1], padding='VALID', name\n ='op_to_store')\n", (1757, 1840), True, 'import tensorflow as tf\n'), ((1957, 2045), 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', "['op_to_store']"], {}), "(sess, sess.graph_def, [\n 'op_to_store'])\n", (2001, 2045), True, 'import tensorflow as tf\n'), ((2179, 2218), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (2198, 2218), True, 'import tensorflow as tf\n'), ((2249, 2259), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2257, 2259), True, 'import tensorflow as tf\n'), ((2281, 2304), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (2302, 2304), True, 'import tensorflow as tf\n'), ((1685, 1715), 'numpy.random.random', 'np.random.random', (['(2, 2, 1, 1)'], {}), '((2, 2, 1, 1))\n', (1701, 1715), True, 'import numpy as np\n'), ((1892, 1925), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1923, 1925), True, 'import tensorflow as tf\n'), ((2319, 2341), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2339, 2341), True, 'import tensorflow as tf\n'), ((2368, 2434), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float64'], {'shape': '(1, 3, 3, 1)', 'name': '"""x"""'}), "(tf.float64, shape=(1, 3, 3, 1), name='x')\n", (2392, 2434), True, 'import tensorflow as tf\n'), ((2534, 2629), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'x', 'filters': 'y', 'strides': '[1, 1, 1, 1]', 'padding': '"""VALID"""', 'name': '"""op_to_store"""'}), "(input=x, filters=y, strides=[1, 1, 1, 1], padding='VALID',\n name='op_to_store')\n", (2546, 2629), True, 'import tensorflow as tf\n'), ((2757, 2854), 'tensorflow.compat.v1.graph_util.convert_variables_to_constants', 'tf.compat.v1.graph_util.convert_variables_to_constants', (['sess', 'sess.graph_def', "['op_to_store']"], {}), "(sess, sess.graph_def,\n ['op_to_store'])\n", (2811, 2854), True, 'import tensorflow as tf\n'), ((3057, 3096), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (3076, 3096), True, 'import tensorflow as tf\n'), ((2474, 2504), 'numpy.random.random', 'np.random.random', (['(2, 2, 1, 1)'], {}), '((2, 2, 1, 1))\n', (2490, 2504), True, 'import numpy as np\n'), ((2682, 2725), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (2723, 2725), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
"""
@package ion_functions.qc_functions
@file ion_functions/qc_functions.py
@author <NAME>
@brief Module containing QC functions ported from matlab samples in DPS documents
"""
from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month
import time
import numpy as np
import numexpr as ne
from scipy.interpolate import LinearNDInterpolator
from ion_functions import utils
from ion_functions.utils import fill_value
# try to load the OOI logging module, using default Python logging module if
# unavailable
try:
from ooi.logging import log
except ImportError:
import logging
log = logging.getLogger('ion-functions')
def is_fill(arr):
return np.atleast_1d(arr)[-1] == -9999. # Not the normal fill value, it's hardcoded to the QC params
def is_none(arr):
return arr is None or (np.atleast_1d(arr)[-1] == None)
def dataqc_globalrangetest_minmax(dat, dat_min, dat_max, strict_validation=False):
'''
Python wrapper for dataqc_globalrangetest
Combines the min/max arguments into list for dataqc_globalrangetest
'''
if is_none(dat_min) or is_none(dat_max) or is_fill(dat_min) or is_fill(dat_max):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_globalrangetest(dat, [np.atleast_1d(dat_min)[-1], np.atleast_1d(dat_max)[-1]], strict_validation=strict_validation)
def dataqc_globalrangetest(dat, datlim, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. Returns 1 for presumably good data and 0 for
data presumed bad.
Implemented by:
2010-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance improvements by adding
strict_validation flag.
Usage:
qcflag = dataqc_globalrangetest(dat, datlim, strict_validation)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = Input dataset, any scalar or vector. Must be numeric and real.
datlim = Two-element vector with the minimum and maximum values
considered to be valid.
strict_validation = Flag (default is False) to assert testing of input
types (e.g. isreal, isnumeric)
References:
OOI (2012). Data Product Specification for Global Range Test. Document
Control Number 1341-10004. https://alfresco.oceanobservatories.org
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10004_Data_Product_SPEC_GLBLRNG_OOI.pdf)
"""
dat = np.atleast_1d(dat)
datlim = np.atleast_1d(datlim)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isnumeric(datlim).all():
raise ValueError('\'datlim\' must be numeric')
if not utils.isreal(datlim).all():
raise ValueError('\'datlim\' must be real')
if len(datlim) < 2: # Must have at least 2 elements
raise ValueError('\'datlim\' must have at least 2 elements')
return (datlim.min() <= dat) & (dat <= datlim.max()).astype('int8')
def dataqc_localrangetest_wrapper(dat, datlim, datlimz, dims, pval_callback):
if is_none(datlim) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(datlimz) or np.all(np.atleast_1d(datlim).flatten() == -9999):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(dims):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
if is_none(pval_callback):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
z = []
for dim in dims:
if dim == 'month':
# Convert time vector to vector of months
v = pval_callback('time')
v = np.asanyarray(v, dtype=np.float)
v = ntp_to_month(v)
z.append(v)
else:
# Fetch the dimension from the callback method
v = pval_callback(dim)
z.append(v)
if len(dims)>1:
z = np.column_stack(z)
else:
z = z[0]
datlimz = datlimz[:,0]
return dataqc_localrangetest(dat, z, datlim, datlimz)
def dataqc_localrangetest(dat, z, datlim, datlimz, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements fall into a
user-defined valid range. This range is not constant but varies with
measurement location. Returns 1 for presumably good data and 0 for data
presumed bad.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = dataqc_localrangetest(dat, z, datlim, datlimz)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric real scalar or column vector.
z = location of measurement dat. must have same # of rows as dat and
same # of columns as datlimz
datlim = two column array with the minimum (column 1) and maximum
(column 2) values considered valid.
datlimz = array with the locations where datlim is given. must have
same # of rows as datlim and same # of columns as z.
References:
OOI (2012). Data Product Specification for Local Range Test. Document
Control Number 1341-10005. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10005_Data_Product_SPEC_LOCLRNG_OOI.pdf)
"""
if strict_validation:
# check if dat and datlim are matrices
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a matrix')
if not utils.ismatrix(datlim):
raise ValueError('\'datlim\' must be a matrix')
# check if all inputs are numeric and real
for k, arg in {'dat': dat, 'z': z, 'datlim': datlim,
'datlimz': datlimz}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
if len(datlim.shape) == 3 and datlim.shape[0] == 1:
datlim = datlim.reshape(datlim.shape[1:])
if len(datlimz.shape) == 3 and datlimz.shape[0] == 1:
datlimz = datlimz.reshape(datlimz.shape[1:])
# test size and shape of the input arrays datlimz and datlim, setting test
# variables.
array_size = datlimz.shape
if len(array_size) == 1:
numlim = array_size[0]
ndim = 1
else:
numlim = array_size[0]
ndim = array_size[1]
array_size = datlim.shape
tmp1 = array_size[0]
tmp2 = array_size[1]
if tmp1 != numlim:
raise ValueError('\'datlim\' and \'datlimz\' must '
'have the same number of rows.')
if tmp2 != 2:
raise ValueError('\'datlim\' must be structured as 2-D array '
'with exactly 2 columns and 1 through N rows.')
# test the size and shape of the z input array
array_size = z.shape
if len(array_size) == 1:
num = array_size[0]
tmp2 = 1
else:
num = array_size[0]
tmp2 = array_size[1]
if tmp2 != ndim:
raise ValueError('\'z\' must have the same number of columns '
'as \'datlimz\'.')
if num != dat.size:
raise ValueError('Len of \'dat\' must match number of '
'rows in \'z\'')
# test datlim, values in column 2 must be greater than those in column 1
if not all(datlim[:, 1] > datlim[:, 0]):
raise ValueError('Second column values of \'datlim\' should be '
'greater than first column values.')
# calculate the upper and lower limits for the data set
if ndim == 1:
# determine the lower limits using linear interpolation
lim1 = np.interp(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)
# determine the upper limits using linear interpolation
lim2 = np.interp(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)
else:
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional lower limits
F = LinearNDInterpolator(datlimz, datlim[:, 0].reshape(numlim, 1))
lim1 = F(z).reshape(dat.size)
# Compute Delaunay Triangulation and use linear interpolation to
# determine the N-dimensional upper limits
F = LinearNDInterpolator(datlimz, datlim[:, 1].reshape(numlim, 1))
lim2 = F(z).reshape(dat.size)
# replace NaNs from above interpolations
ff = (np.isnan(lim1)) | (np.isnan(lim2))
lim1[ff] = np.max(datlim[:, 1])
lim2[ff] = np.min(datlim[:, 0])
# compute the qcflags
qcflag = (dat >= lim1) & (dat <= lim2)
return qcflag.astype('int8')
def dataqc_spiketest_wrapper(dat, acc, N, L, strict_validation=False):
if is_none(acc) or is_fill(acc) or is_none(N) or is_fill(N) or is_none(L) or is_fill(L):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_spiketest(dat, np.atleast_1d(acc)[-1], np.atleast_1d(N)[-1], np.atleast_1d(L)[-1], strict_validation=strict_validation)
def dataqc_spiketest(dat, acc, N=5, L=5, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for spikes.
Returns 1 for presumably good data and 0 for data presumed bad.
The time series is divided into windows of len L (an odd integer
number). Then, window by window, each value is compared to its (L-1)
neighboring values: a range R of these (L-1) values is computed (max.
minus min.), and replaced with the measurement accuracy ACC if ACC>R. A
value is presumed to be good, i.e. no spike, if it deviates from the
mean of the (L-1) peers by less than a multiple of the range,
N*max(R,ACC).
Further than (L-1)/2 values from the start or end points, the peer
values are symmetrically before and after the test value. Within that
range of the start and end, the peers are the first/last L values
(without the test value itself).
The purpose of ACC is to restrict spike detection to deviations
exceeding a minimum threshold value (N*ACC) even if the data have
little variability. Use ACC=0 to disable this behavior.
Implemented by:
2012-07-28: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_spiketest(dat, acc, N, L)
where
qcflag = Boolean, 0 if value is outside range, else = 1.
dat = input data set, a numeric, real vector.
acc = Accuracy of any input measurement.
N = (optional, defaults to 5) Range multiplier, cf. above
L = (optional, defaults to 5) Window len, cf. above
References:
OOI (2012). Data Product Specification for Spike Test. Document
Control Number 1341-10006. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10006_Data_Product_SPEC_SPKETST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'dat\' must be numeric')
if not utils.isreal(dat).all():
raise ValueError('\'dat\' must be real')
if not utils.isvector(dat):
raise ValueError('\'dat\' must be a vector')
for k, arg in {'acc': acc, 'N': N, 'L': L}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
dat = np.asanyarray(dat, dtype=np.float)
out = spikevalues(dat, L, N, acc)
return out
def dataqc_polytrendtest_wrapper(dat, t, ord_n, nstd, strict_validation=False):
if is_none(ord_n) or is_fill(ord_n) or is_none(nstd) or is_fill(ord_n):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
return dataqc_polytrendtest(dat, t, np.atleast_1d(ord_n)[-1], np.atleast_1d(nstd)[-1], strict_validation=strict_validation)
def dataqc_polytrendtest(dat, t, ord_n=1, nstd=3, strict_validation=False):
"""
Description:
Data quality control algorithm testing if measurements contain a
significant portion of a polynomial. Returns 1 if this is not the case,
else 0.
The purpose of this test is to check if a significant fraction of the
variability in a time series can be explained by a drift, possibly
interpreted as a sensor drift. This drift is assumed to be a polynomial
of order ORD. Use ORD=1 to consider a linear drift
The time series dat is passed to MatLab's POLYFIT routine to obtain a
polynomial fit PP to dat, and the difference dat-PP is compared to the
original dat. If the standard deviation of (dat-PP) is less than that
of dat by a factor of NSTD, the time series is assumed to contain a
significant trend (output will be 0), else not (output will be 1).
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
2013-05-30: <NAME>. Performance optimizations.
Usage:
qcflag = dataqc_polytrendtest(dat, t, ord_n, nstd, strict_validation)
where
qcflag = Boolean, 0 a trend is detected, 1 elsewhere.
dat = Input dataset, a numeric real vector.
t = time record associated with dat
ord_n (optional, defaults to 1) = Polynomial order.
nstd (optional, defaults to 3) = Factor by how much the standard
deviation must be reduced before qcflag switches from 1 to 0
strict_validation (optional, defaults to False) = Flag asserting
testing of inputs.
References:
OOI (2012). Data Product Specification for Trend Test. Document
Control Number 1341-10007. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10007_Data_Product_SPEC_TRNDTST_OOI.pdf)
"""
dat = np.atleast_1d(dat)
t = np.atleast_1d(t)
if strict_validation:
for k, arg in {'dat': dat, 't': t, 'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
for k, arg in {'dat': dat, 't': t}.iteritems():
if not utils.isvector(arg):
raise ValueError('\'{0}\' must be a vector'.format(k))
for k, arg in {'ord_n': ord_n, 'nstd': nstd}.iteritems():
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
ord_n = int(round(abs(ord_n)))
nstd = int(abs(nstd))
ll = len(dat)
# Not needed because time is incorporated as 't'
# t = range(ll)
pp = np.polyfit(t, dat, ord_n)
datpp = np.polyval(pp, t)
# test for a trend
if np.atleast_1d((np.std(dat - datpp) * nstd) < np.std(dat)).all():
trndtst = 0
else:
trndtst = 1
# insure output size equals input, even though test yields a single value.
qcflag = np.ones(dat.shape).astype('int8') * trndtst
return qcflag
def dataqc_stuckvaluetest_wrapper(x, reso, num, strict_validation=False):
if is_none(reso) or is_fill(reso) or is_none(num) or is_fill(num):
out = np.empty(x.shape, np.int8)
out.fill(-99)
return out
return dataqc_stuckvaluetest(x, np.atleast_1d(reso)[-1], np.atleast_1d(num)[-1], strict_validation=strict_validation)
def dataqc_stuckvaluetest(x, reso, num=10, strict_validation=False):
"""
Description:
Data quality control algorithm testing a time series for "stuck
values", i.e. repeated occurences of one value. Returns 1 for
presumably good data and 0 for data presumed bad.
Implemented by:
2012-10-29: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
qcflag = =dataqc_stuckvaluetest(x, RESO, NUM);
where
qcflag = Boolean output: 0 where stuck values are found, 1 elsewhere.
x = Input time series (vector, numeric).
reso = Resolution; repeat values less than reso apart will be
considered "stuck values".
num = Minimum number of successive values within reso of each other
that will trigger the "stuck value". num is optional and defaults
to 10 if omitted or empty.
References:
OOI (2012). Data Product Specification for Stuck Value Test. Document
Control Number 1341-10008. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> Controlled >> 1000 System Level >>
1341-10008_Data_Product_SPEC_STUCKVL_OOI.pdf)
"""
dat = np.atleast_1d(x)
if strict_validation:
if not utils.isnumeric(dat).all():
raise ValueError('\'x\' must be numeric')
if not utils.isvector(dat):
raise ValueError('\'x\' must be a vector')
if not utils.isreal(dat).all():
raise ValueError('\'x\' must be real')
for k, arg in {'reso': reso, 'num': num}.iteritems():
if not utils.isnumeric(arg).all():
raise ValueError('\'{0}\' must be numeric'.format(k))
if not utils.isscalar(arg):
raise ValueError('\'{0}\' must be a scalar'.format(k))
if not utils.isreal(arg).all():
raise ValueError('\'{0}\' must be real'.format(k))
num = np.abs(num)
dat = np.asanyarray(dat, dtype=np.float)
ll = len(x)
if ll < num:
# Warn - 'num' is greater than len(x), returning zeros
out = np.zeros(dat.size, dtype='int8')
else:
out = stuckvalues(dat, reso, num)
return out
def dataqc_gradienttest_wrapper(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
if is_none(ddatdx) or is_fill(ddatdx) or is_none(mindx) or is_fill(mindx) or is_none(startdat) or is_fill(startdat) or is_none(toldat) or is_fill(toldat):
out = np.empty(dat.shape, dtype=np.int8)
out.fill(-99)
return out
outqc = dataqc_gradienttest(dat, x, [-np.atleast_1d(ddatdx)[-1], np.atleast_1d(ddatdx)[-1]], np.atleast_1d(mindx)[-1], np.atleast_1d(startdat)[-1], np.atleast_1d(toldat)[-1], strict_validation=strict_validation)
return outqc
def dataqc_gradienttest(dat, x, ddatdx, mindx, startdat, toldat, strict_validation=False):
"""
Description
Data quality control algorithm testing if changes between successive
data points fall within a certain range.
Input data dat are given as a function of coordinate x. The algorithm
will flag dat values as bad if the change deltaDAT/deltaX between
successive dat values exceeds thresholds given in ddatdx. Once the
threshold is exceeded, following dat are considered bad until a dat
value returns to within toldat of the last known good value.
It is possible to remove data points that are too close together in x
coordinates (use mindx).
By default, the first value of dat is considered good. To change this,
use startdat and toldat to set as the first good data point the first
one that comes within toldat of startdat.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
startdat, toldat);
where
outdat = same as dat except that NaNs and values not meeting mindx are
removed.
outx = same as x except that NaNs and values not meeting mindx are
removed.
outqc = output quality control flags for outdat. 0 means bad data, 1
means good data.
dat = input dataset, a numeric real vector.
x = coordinate (e.g. time, distance) along which dat is given. Must be
of the same size as dat and strictly increasing.
ddatdx = two-element vector defining the valid range of ddat/dx
from one point to the next.
mindx = scalar. minimum dx for which this test will be applied (data
that are less than mindx apart will be deleted). defaults to zero
if NaN/empty.
startdat = start value (scalar) of dat that is presumed good. defaults
to first non-NaN value of dat if NaN/empty.
toldat = tolerance value (scalar) for dat; threshold to within which
dat must return to be counted as good, after exceeding a ddatdx
threshold detected bad data.
References:
OOI (2012). Data Product Specification for Gradient Test. Document
Control Number 1341-100010.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
"""
if strict_validation:
if not utils.isvector(dat) or not utils.isvector(x):
raise ValueError('\'dat\' and \'x\' must be vectors')
if len(dat) != len(x):
raise ValueError('\'dat\' and \'x\' must be of equal len')
if not all(np.diff(x) > 0):
raise ValueError('\'x\' must be montonically increasing')
dat = np.asanyarray(dat, dtype=np.float).flatten()
x = np.asanyarray(x, dtype=np.float).flatten()
if np.isnan(mindx):
mindx = 0
mindx = mindx or 0
if np.isnan(startdat):
startdat = 0
startdat = startdat or 0
# No strict validation here, they are scalards and they must be validated
# before going into the C-layer
if not utils.isscalar(mindx):
raise ValueError("'mindx' must be scalar, NaN, or empty.")
if not utils.isscalar(startdat):
raise ValueError("'startdat' must be scalar, NaN, or empty.")
# Confirm that there are still data points left, else abort:
if np.abs(x[0] - x[-1]) < mindx:
out = np.zeros(x.shape)
out.fill(1)
log.warn('Too few values to inspect')
return out
grad_min = ddatdx[0]
grad_max = ddatdx[1]
out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)
return out
def dataqc_solarelevation(lon, lat, dt):
"""
Description
Computes instantaneous no-sky solar radiation and altitude from date
and time stamp and position data. It is put together from expressions
taken from Appendix E in the 1978 edition of Almanac for Computers,
Nautical Almanac Office, U.S. Naval Observatory. They are reduced
accuracy expressions valid for the years 1800-2100. Solar declination
computed from these expressions is accurate to at least 1'. The solar
constant (1368.0 W/m^2) represents a mean of satellite measurements
made over the last sunspot cycle (1979-1995) taken from Coffey et al
(1995), Earth System Monitor, 6, 6-10.
This code is a python implementation of soradna1.m available in Air-Sea
Toolbox.
Implemented by:
1997-03-08: Version 1.0 (author unknown) of soradna1.m.
1998-08-28: Version 1.1 (author unknown) of soradna1.m.
1999-08-05: Version 2.0 (author unknown) of soradna1.m.
2013-04-07: <NAME>. Initial python implementation. Note,
this function is derived from old, unmaintained code. More robust
implementations exist (e.g. PyEphem and PySolar) that will probably
calculate these values more accurately.
Usage:
z, sorad = dataqc_solarelevation(lon, lat, dt)
where
z = solar altitude [degrees]
sorad = no atmosphere solar radiation [W m^-2]
lon = longitude (east is positive) [decimal degress]
lat = latitude [decimal degrees]
dt = date and time stamp in UTC [seconds since 1970-01-01]
Examples
dt = 1329177600 # 2012-02-14 00:00:00
z, sorad = dataqc_solarelevation(120, 30, dt)
z = 15.1566, sorad = 366.8129
OOI (2012). Data Product Specification for Solar Elevation. Document
Control Number 1341-100011.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10011_Data_Product_SPEC_SOLRELV_OOI.pdf)
"""
# Test lengths and types of inputs. Latitude and longitude must be the same
# size and can either be a scalar or a vecotr. The date and time stamp
# can also be either a scalar or a vector. If all three inputs are vectors,
# they must be of the same length.
if len(lon) != len(lat):
raise ValueError('\'lon\' and \'lat\' must be the same size')
if utils.isvector(lon) and utils.isvector(lat) and utils.isvector(dt):
# test their lengths
if not len(lon) == len(lat) == len(dt):
raise ValueError('If all inputs are vectors, these must all '
'be of the same length')
# set constants (using values from as_consts.m)
# ------ short-wave flux calculations
# the solar constant [W m^-2] represents a mean of satellite measurements
# made over the last sunspot cycle (1979-1995), taken from Coffey et al.
# (1995), Earth System Monitor, 6, 6-10.
solar_const = 1368.0
# Create a time tuple in UTC from the Epoch time input, and then create
# scalars or numpy arrays of time elements for subsequent calculations.
ldt = len(dt)
yy = np.zeros(ldt, dtype=np.int)
mn = np.zeros(ldt, dtype=np.int)
dd = np.zeros(ldt, dtype=np.int)
hh = np.zeros(ldt, dtype=np.int)
mm = np.zeros(ldt, dtype=np.int)
ss = np.zeros(ldt, dtype=np.int)
for i in range(ldt):
# create time tuple in UTC
gtime = time.gmtime(dt[i])
# create scalar elements
yy[i] = gtime[0]
mn[i] = gtime[1]
dd[i] = gtime[2]
hh[i] = gtime[3]
mm[i] = gtime[4]
ss[i] = gtime[5]
#constants used in function
deg2rad = np.pi / 180.0
rad2deg = 1 / deg2rad
# compute Universal Time in hours
utime = hh + (mm + ss / 60.0) / 60.0
# compute Julian ephemeris date in days (Day 1 is 1 Jan 4713 B.C. which
# equals -4712 Jan 1)
jed = (367.0 * yy - np.fix(7.0*(yy+np.fix((mn+9)/12.0))/4.0)
+ np.fix(275.0*mn/9.0) + dd + 1721013 + utime / 24.0)
# compute interval in Julian centuries since 1900
jc_int = (jed - 2415020.0) / 36525.0
# compute mean anomaly of the sun
ma_sun = 358.475833 + 35999.049750 * jc_int - 0.000150 * jc_int**2
ma_sun = (ma_sun - np.fix(ma_sun/360.0) * 360.0) * deg2rad
# compute mean longitude of sun
ml_sun = 279.696678 + 36000.768920 * jc_int + 0.000303 * jc_int**2
ml_sun = (ml_sun - np.fix(ml_sun/360.0) * 360.0) * deg2rad
# compute mean anomaly of Jupiter
ma_jup = 225.444651 + 2880.0 * jc_int + 154.906654 * jc_int
ma_jup = (ma_jup - np.fix(ma_jup/360.0) * 360.0) * deg2rad
# compute longitude of the ascending node of the moon's orbit
an_moon = (259.183275 - 1800 * jc_int - 134.142008 * jc_int
+ 0.002078 * jc_int**2)
an_moon = (an_moon - np.fix(an_moon/360.0) * 360.0 + 360.0) * deg2rad
# compute mean anomaly of Venus
ma_ven = (212.603219 + 58320 * jc_int + 197.803875 * jc_int
+ 0.001286 * jc_int**2)
ma_ven = (ma_ven - np.fix(ma_ven/360.0) * 360.0) * deg2rad
# compute sun theta
theta = (0.397930 * np.sin(ml_sun) + 0.009999 * np.sin(ma_sun-ml_sun)
+ 0.003334 * np.sin(ma_sun+ml_sun) - 0.000208 * jc_int
* np.sin(ml_sun) + 0.000042 * np.sin(2*ma_sun+ml_sun) - 0.000040
* np.cos(ml_sun) - 0.000039 * np.sin(an_moon-ml_sun) - 0.000030
* jc_int * np.sin(ma_sun-ml_sun) - 0.000014
* np.sin(2*ma_sun-ml_sun) - 0.000010
* np.cos(ma_sun-ml_sun-ma_jup) - 0.000010 * jc_int
* np.sin(ma_sun+ml_sun))
# compute sun rho
rho = (1.000421 - 0.033503 * np.cos(ma_sun) - 0.000140 * np.cos(2*ma_sun)
+ 0.000084 * jc_int * np.cos(ma_sun) - 0.000033
* np.sin(ma_sun-ma_jup) + 0.000027 * np.sin(2.*ma_sun-2.*ma_ven))
# compute declination
decln = np.arcsin(theta/np.sqrt(rho))
# compute equation of time (in seconds of time)
l = 276.697 + 0.98564734 * (jed-2415020.0)
l = (l - 360.0 * np.fix(l/360.0)) * deg2rad
eqt = (-97.8 * np.sin(l) - 431.3 * np.cos(l) + 596.6 * np.sin(2*l)
- 1.9 * np.cos(2*l) + 4.0 * np.sin(3*l) + 19.3 * np.cos(3*l)
- 12.7 * np.sin(4*l))
eqt = eqt / 60.0
# compute local hour angle from global hour angle
gha = 15.0 * (utime-12) + 15.0 * eqt / 60.0
lha = gha - lon
# compute radius vector
rv = np.sqrt(rho)
# compute solar altitude
sz = (np.sin(deg2rad*lat) * np.sin(decln) + np.cos(deg2rad*lat)
* np.cos(decln) * np.cos(deg2rad*lha))
z = rad2deg * np.arcsin(sz)
# compute solar radiation outside atmosphere (defaults to 0 when solar
# altitude is below the horizon)
sorad = (solar_const / rv**2) * np.sin(deg2rad * z)
sorad[z < 0] = 0
return (z, sorad)
def dataqc_propagateflags_wrapper(strict_validation=False, *args):
'''
This is a function that wraps dataqc_propagateflags for use in ION
It accepts a variable number of vector arguments (of the same shape) and calls dataqc_propagateflags
'''
if not strict_validation:
shapes = np.array([i.shape[0] for i in args])
if not (shapes == shapes[0]).all():
raise ValueError('Input vectors are not the same shape')
return dataqc_propagateflags(np.array(args), strict_validation=strict_validation)
def dataqc_propagateflags(inflags, strict_validation=False):
"""
Description:
Propagate "bad" qc flags (from an arbitrary number of source datasets)
to another (derived) dataset.
Consider data from an oceanographic CTD (conductivity, temperature, and
pressure) instrument. From these three time series, you want to compute
salinity. If any of the three source data (conductivity, temperature,
pressure) is of bad quality, the salinity will be bad as well. You can
feed your QC assessment of the former three into this routine, which
will then give you the combined assessment for the derived (here:
salinity) property.
Implemented by:
2012-07-17: DPS authored by <NAME>. Example code provided
for Matlab.
2013-04-06: <NAME>. Initial python implementation.
Usage:
outflag = dataqc_propagateflags(inflags)
where
outflag = a 1-by-N boolean vector that contains 1 where all of the
inflags are 1, and 0 otherwise.
inflags = an M-by-N boolean matrix, where each of the M rows contains
flags of an independent data set such that "0" means bad data and
"1" means good data.
References:
OOI (2012). Data Product Specification for Combined QC Flags. Document
Control Number 1341-100012.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10012_Data_Product_SPEC_CMBNFLG_OOI.pdf)
"""
if strict_validation:
if not utils.islogical(inflags):
raise ValueError('\'inflags\' must be \'0\' or \'1\' '
'integer flag array')
array_size = inflags.shape
nrows = array_size[0]
if nrows < 2:
error('\'inflags\' must be at least a two-dimensional array')
outflag = np.all(inflags, 0)
return outflag.astype('int8')
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8):
"""
Description:
Implementation of the Sea-Bird conductivity compressibility correction,
scaling the input conductivity based on ratio of the original pressure
and the updated pressure.
Implemented by:
2013-04-07: Christopher Wingard. Initial python implementation.
Usage:
c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor)
where
c_new = updated conductivity record [S/m]
p_orig = original pressure used to calculate original conductivity,
this typically the L1a PRESWAT [dbar]
p_new = updated pressure, typically L1b PRESWAT [dbar]
c_orig = original conductivty record, typically L1a CONDWAT [S/m]
cpcor = pressure correction coefficient used to calculate original
conductivity, default is -9.57e-8
References:
OOI (2012). Data Product Specification for Conductivity Compressibility
Correction. Document Control Number 1341-10030.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
>> Controlled >> 1000 System Level >>
1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf)
"""
c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new)
return c_new
|
[
"logging.getLogger",
"numpy.sqrt",
"numpy.polyfit",
"ion_functions.utils.islogical",
"numpy.column_stack",
"numpy.asanyarray",
"ion_functions.utils.isnumeric",
"numpy.array",
"numpy.sin",
"ion_functions.qc.qc_extensions.gradientvalues",
"numpy.fix",
"ion_functions.qc.qc_extensions.ntp_to_month",
"numpy.diff",
"numpy.max",
"numpy.polyval",
"numpy.empty",
"ooi.logging.log.warn",
"numpy.min",
"ion_functions.qc.qc_extensions.stuckvalues",
"ion_functions.utils.isvector",
"ion_functions.utils.ismatrix",
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"ion_functions.qc.qc_extensions.spikevalues",
"numpy.interp",
"numpy.cos",
"ion_functions.utils.isscalar",
"numpy.std",
"time.gmtime",
"numpy.atleast_1d",
"numpy.arcsin",
"numpy.zeros",
"numpy.all",
"ion_functions.utils.isreal"
] |
[((2766, 2784), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (2779, 2784), True, 'import numpy as np\n'), ((2798, 2819), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (2811, 2819), True, 'import numpy as np\n'), ((9354, 9374), 'numpy.max', 'np.max', (['datlim[:, 1]'], {}), '(datlim[:, 1])\n', (9360, 9374), True, 'import numpy as np\n'), ((9390, 9410), 'numpy.min', 'np.min', (['datlim[:, 0]'], {}), '(datlim[:, 0])\n', (9396, 9410), True, 'import numpy as np\n'), ((12027, 12045), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (12040, 12045), True, 'import numpy as np\n'), ((12664, 12698), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (12677, 12698), True, 'import numpy as np\n'), ((12714, 12741), 'ion_functions.qc.qc_extensions.spikevalues', 'spikevalues', (['dat', 'L', 'N', 'acc'], {}), '(dat, L, N, acc)\n', (12725, 12741), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((15208, 15226), 'numpy.atleast_1d', 'np.atleast_1d', (['dat'], {}), '(dat)\n', (15221, 15226), True, 'import numpy as np\n'), ((15235, 15251), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (15248, 15251), True, 'import numpy as np\n'), ((16104, 16129), 'numpy.polyfit', 'np.polyfit', (['t', 'dat', 'ord_n'], {}), '(t, dat, ord_n)\n', (16114, 16129), True, 'import numpy as np\n'), ((16142, 16159), 'numpy.polyval', 'np.polyval', (['pp', 't'], {}), '(pp, t)\n', (16152, 16159), True, 'import numpy as np\n'), ((18121, 18137), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (18134, 18137), True, 'import numpy as np\n'), ((18861, 18872), 'numpy.abs', 'np.abs', (['num'], {}), '(num)\n', (18867, 18872), True, 'import numpy as np\n'), ((18883, 18917), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (18896, 18917), True, 'import numpy as np\n'), ((22914, 22929), 'numpy.isnan', 'np.isnan', (['mindx'], {}), '(mindx)\n', (22922, 22929), True, 'import numpy as np\n'), ((22979, 22997), 'numpy.isnan', 'np.isnan', (['startdat'], {}), '(startdat)\n', (22987, 22997), True, 'import numpy as np\n'), ((23659, 23726), 'ion_functions.qc.qc_extensions.gradientvalues', 'gradientvalues', (['dat', 'x', 'grad_min', 'grad_max', 'mindx', 'startdat', 'toldat'], {}), '(dat, x, grad_min, grad_max, mindx, startdat, toldat)\n', (23673, 23726), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((27028, 27055), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27036, 27055), True, 'import numpy as np\n'), ((27065, 27092), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27073, 27092), True, 'import numpy as np\n'), ((27102, 27129), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27110, 27129), True, 'import numpy as np\n'), ((27139, 27166), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27147, 27166), True, 'import numpy as np\n'), ((27176, 27203), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27184, 27203), True, 'import numpy as np\n'), ((27213, 27240), 'numpy.zeros', 'np.zeros', (['ldt'], {'dtype': 'np.int'}), '(ldt, dtype=np.int)\n', (27221, 27240), True, 'import numpy as np\n'), ((30314, 30326), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (30321, 30326), True, 'import numpy as np\n'), ((33207, 33225), 'numpy.all', 'np.all', (['inflags', '(0)'], {}), '(inflags, 0)\n', (33213, 33225), True, 'import numpy as np\n'), ((656, 690), 'logging.getLogger', 'logging.getLogger', (['"""ion-functions"""'], {}), "('ion-functions')\n", (673, 690), False, 'import logging\n'), ((1209, 1243), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (1217, 1243), True, 'import numpy as np\n'), ((3623, 3657), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3631, 3657), True, 'import numpy as np\n'), ((3790, 3824), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3798, 3824), True, 'import numpy as np\n'), ((3902, 3936), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (3910, 3936), True, 'import numpy as np\n'), ((4023, 4057), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (4031, 4057), True, 'import numpy as np\n'), ((4520, 4538), 'numpy.column_stack', 'np.column_stack', (['z'], {}), '(z)\n', (4535, 4538), True, 'import numpy as np\n'), ((8558, 8620), 'numpy.interp', 'np.interp', (['z', 'datlimz', 'datlim[:, 0]'], {'left': 'np.nan', 'right': 'np.nan'}), '(z, datlimz, datlim[:, 0], left=np.nan, right=np.nan)\n', (8567, 8620), True, 'import numpy as np\n'), ((8700, 8762), 'numpy.interp', 'np.interp', (['z', 'datlimz', 'datlim[:, 1]'], {'left': 'np.nan', 'right': 'np.nan'}), '(z, datlimz, datlim[:, 1], left=np.nan, right=np.nan)\n', (8709, 8762), True, 'import numpy as np\n'), ((9304, 9318), 'numpy.isnan', 'np.isnan', (['lim1'], {}), '(lim1)\n', (9312, 9318), True, 'import numpy as np\n'), ((9323, 9337), 'numpy.isnan', 'np.isnan', (['lim2'], {}), '(lim2)\n', (9331, 9337), True, 'import numpy as np\n'), ((9694, 9728), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (9702, 9728), True, 'import numpy as np\n'), ((12929, 12963), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (12937, 12963), True, 'import numpy as np\n'), ((16622, 16648), 'numpy.empty', 'np.empty', (['x.shape', 'np.int8'], {}), '(x.shape, np.int8)\n', (16630, 16648), True, 'import numpy as np\n'), ((19028, 19060), 'numpy.zeros', 'np.zeros', (['dat.size'], {'dtype': '"""int8"""'}), "(dat.size, dtype='int8')\n", (19036, 19060), True, 'import numpy as np\n'), ((19085, 19112), 'ion_functions.qc.qc_extensions.stuckvalues', 'stuckvalues', (['dat', 'reso', 'num'], {}), '(dat, reso, num)\n', (19096, 19112), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((19402, 19436), 'numpy.empty', 'np.empty', (['dat.shape'], {'dtype': 'np.int8'}), '(dat.shape, dtype=np.int8)\n', (19410, 19436), True, 'import numpy as np\n'), ((23179, 23200), 'ion_functions.utils.isscalar', 'utils.isscalar', (['mindx'], {}), '(mindx)\n', (23193, 23200), False, 'from ion_functions import utils\n'), ((23280, 23304), 'ion_functions.utils.isscalar', 'utils.isscalar', (['startdat'], {}), '(startdat)\n', (23294, 23304), False, 'from ion_functions import utils\n'), ((23450, 23470), 'numpy.abs', 'np.abs', (['(x[0] - x[-1])'], {}), '(x[0] - x[-1])\n', (23456, 23470), True, 'import numpy as np\n'), ((23494, 23511), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (23502, 23511), True, 'import numpy as np\n'), ((23540, 23577), 'ooi.logging.log.warn', 'log.warn', (['"""Too few values to inspect"""'], {}), "('Too few values to inspect')\n", (23548, 23577), False, 'from ooi.logging import log\n'), ((26249, 26268), 'ion_functions.utils.isvector', 'utils.isvector', (['lon'], {}), '(lon)\n', (26263, 26268), False, 'from ion_functions import utils\n'), ((26273, 26292), 'ion_functions.utils.isvector', 'utils.isvector', (['lat'], {}), '(lat)\n', (26287, 26292), False, 'from ion_functions import utils\n'), ((26297, 26315), 'ion_functions.utils.isvector', 'utils.isvector', (['dt'], {}), '(dt)\n', (26311, 26315), False, 'from ion_functions import utils\n'), ((27317, 27335), 'time.gmtime', 'time.gmtime', (['dt[i]'], {}), '(dt[i])\n', (27328, 27335), False, 'import time\n'), ((30492, 30505), 'numpy.arcsin', 'np.arcsin', (['sz'], {}), '(sz)\n', (30501, 30505), True, 'import numpy as np\n'), ((30655, 30674), 'numpy.sin', 'np.sin', (['(deg2rad * z)'], {}), '(deg2rad * z)\n', (30661, 30674), True, 'import numpy as np\n'), ((31028, 31064), 'numpy.array', 'np.array', (['[i.shape[0] for i in args]'], {}), '([i.shape[0] for i in args])\n', (31036, 31064), True, 'import numpy as np\n'), ((31212, 31226), 'numpy.array', 'np.array', (['args'], {}), '(args)\n', (31220, 31226), True, 'import numpy as np\n'), ((721, 739), 'numpy.atleast_1d', 'np.atleast_1d', (['arr'], {}), '(arr)\n', (734, 739), True, 'import numpy as np\n'), ((4267, 4299), 'numpy.asanyarray', 'np.asanyarray', (['v'], {'dtype': 'np.float'}), '(v, dtype=np.float)\n', (4280, 4299), True, 'import numpy as np\n'), ((4316, 4331), 'ion_functions.qc.qc_extensions.ntp_to_month', 'ntp_to_month', (['v'], {}), '(v)\n', (4328, 4331), False, 'from ion_functions.qc.qc_extensions import stuckvalues, spikevalues, gradientvalues, ntp_to_month\n'), ((6201, 6220), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (6215, 6220), False, 'from ion_functions import utils\n'), ((6295, 6317), 'ion_functions.utils.ismatrix', 'utils.ismatrix', (['datlim'], {}), '(datlim)\n', (6309, 6317), False, 'from ion_functions import utils\n'), ((9803, 9821), 'numpy.atleast_1d', 'np.atleast_1d', (['acc'], {}), '(acc)\n', (9816, 9821), True, 'import numpy as np\n'), ((9827, 9843), 'numpy.atleast_1d', 'np.atleast_1d', (['N'], {}), '(N)\n', (9840, 9843), True, 'import numpy as np\n'), ((9849, 9865), 'numpy.atleast_1d', 'np.atleast_1d', (['L'], {}), '(L)\n', (9862, 9865), True, 'import numpy as np\n'), ((12282, 12301), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (12296, 12301), False, 'from ion_functions import utils\n'), ((13045, 13065), 'numpy.atleast_1d', 'np.atleast_1d', (['ord_n'], {}), '(ord_n)\n', (13058, 13065), True, 'import numpy as np\n'), ((13071, 13090), 'numpy.atleast_1d', 'np.atleast_1d', (['nstd'], {}), '(nstd)\n', (13084, 13090), True, 'import numpy as np\n'), ((16727, 16746), 'numpy.atleast_1d', 'np.atleast_1d', (['reso'], {}), '(reso)\n', (16740, 16746), True, 'import numpy as np\n'), ((16752, 16770), 'numpy.atleast_1d', 'np.atleast_1d', (['num'], {}), '(num)\n', (16765, 16770), True, 'import numpy as np\n'), ((18278, 18297), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (18292, 18297), False, 'from ion_functions import utils\n'), ((19575, 19595), 'numpy.atleast_1d', 'np.atleast_1d', (['mindx'], {}), '(mindx)\n', (19588, 19595), True, 'import numpy as np\n'), ((19601, 19624), 'numpy.atleast_1d', 'np.atleast_1d', (['startdat'], {}), '(startdat)\n', (19614, 19624), True, 'import numpy as np\n'), ((19630, 19651), 'numpy.atleast_1d', 'np.atleast_1d', (['toldat'], {}), '(toldat)\n', (19643, 19651), True, 'import numpy as np\n'), ((22810, 22844), 'numpy.asanyarray', 'np.asanyarray', (['dat'], {'dtype': 'np.float'}), '(dat, dtype=np.float)\n', (22823, 22844), True, 'import numpy as np\n'), ((22863, 22895), 'numpy.asanyarray', 'np.asanyarray', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (22876, 22895), True, 'import numpy as np\n'), ((29479, 29502), 'numpy.sin', 'np.sin', (['(ma_sun + ml_sun)'], {}), '(ma_sun + ml_sun)\n', (29485, 29502), True, 'import numpy as np\n'), ((29710, 29745), 'numpy.sin', 'np.sin', (['(2.0 * ma_sun - 2.0 * ma_ven)'], {}), '(2.0 * ma_sun - 2.0 * ma_ven)\n', (29716, 29745), True, 'import numpy as np\n'), ((29794, 29806), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (29801, 29806), True, 'import numpy as np\n'), ((30119, 30132), 'numpy.sin', 'np.sin', (['(4 * l)'], {}), '(4 * l)\n', (30125, 30132), True, 'import numpy as np\n'), ((30367, 30388), 'numpy.sin', 'np.sin', (['(deg2rad * lat)'], {}), '(deg2rad * lat)\n', (30373, 30388), True, 'import numpy as np\n'), ((30389, 30402), 'numpy.sin', 'np.sin', (['decln'], {}), '(decln)\n', (30395, 30402), True, 'import numpy as np\n'), ((30453, 30474), 'numpy.cos', 'np.cos', (['(deg2rad * lha)'], {}), '(deg2rad * lha)\n', (30459, 30474), True, 'import numpy as np\n'), ((32902, 32926), 'ion_functions.utils.islogical', 'utils.islogical', (['inflags'], {}), '(inflags)\n', (32917, 32926), False, 'from ion_functions import utils\n'), ((860, 878), 'numpy.atleast_1d', 'np.atleast_1d', (['arr'], {}), '(arr)\n', (873, 878), True, 'import numpy as np\n'), ((1325, 1347), 'numpy.atleast_1d', 'np.atleast_1d', (['dat_min'], {}), '(dat_min)\n', (1338, 1347), True, 'import numpy as np\n'), ((1353, 1375), 'numpy.atleast_1d', 'np.atleast_1d', (['dat_max'], {}), '(dat_max)\n', (1366, 1375), True, 'import numpy as np\n'), ((15670, 15689), 'ion_functions.utils.isvector', 'utils.isvector', (['arg'], {}), '(arg)\n', (15684, 15689), False, 'from ion_functions import utils\n'), ((15848, 15867), 'ion_functions.utils.isscalar', 'utils.isscalar', (['arg'], {}), '(arg)\n', (15862, 15867), False, 'from ion_functions import utils\n'), ((16399, 16417), 'numpy.ones', 'np.ones', (['dat.shape'], {}), '(dat.shape)\n', (16406, 16417), True, 'import numpy as np\n'), ((18646, 18665), 'ion_functions.utils.isscalar', 'utils.isscalar', (['arg'], {}), '(arg)\n', (18660, 18665), False, 'from ion_functions import utils\n'), ((19547, 19568), 'numpy.atleast_1d', 'np.atleast_1d', (['ddatdx'], {}), '(ddatdx)\n', (19560, 19568), True, 'import numpy as np\n'), ((22477, 22496), 'ion_functions.utils.isvector', 'utils.isvector', (['dat'], {}), '(dat)\n', (22491, 22496), False, 'from ion_functions import utils\n'), ((22504, 22521), 'ion_functions.utils.isvector', 'utils.isvector', (['x'], {}), '(x)\n', (22518, 22521), False, 'from ion_functions import utils\n'), ((28148, 28170), 'numpy.fix', 'np.fix', (['(ma_sun / 360.0)'], {}), '(ma_sun / 360.0)\n', (28154, 28170), True, 'import numpy as np\n'), ((28319, 28341), 'numpy.fix', 'np.fix', (['(ml_sun / 360.0)'], {}), '(ml_sun / 360.0)\n', (28325, 28341), True, 'import numpy as np\n'), ((28485, 28507), 'numpy.fix', 'np.fix', (['(ma_jup / 360.0)'], {}), '(ma_jup / 360.0)\n', (28491, 28507), True, 'import numpy as np\n'), ((28931, 28953), 'numpy.fix', 'np.fix', (['(ma_ven / 360.0)'], {}), '(ma_ven / 360.0)\n', (28937, 28953), True, 'import numpy as np\n'), ((29415, 29447), 'numpy.cos', 'np.cos', (['(ma_sun - ml_sun - ma_jup)'], {}), '(ma_sun - ml_sun - ma_jup)\n', (29421, 29447), True, 'import numpy as np\n'), ((29675, 29698), 'numpy.sin', 'np.sin', (['(ma_sun - ma_jup)'], {}), '(ma_sun - ma_jup)\n', (29681, 29698), True, 'import numpy as np\n'), ((29929, 29946), 'numpy.fix', 'np.fix', (['(l / 360.0)'], {}), '(l / 360.0)\n', (29935, 29946), True, 'import numpy as np\n'), ((30087, 30100), 'numpy.cos', 'np.cos', (['(3 * l)'], {}), '(3 * l)\n', (30093, 30100), True, 'import numpy as np\n'), ((30405, 30426), 'numpy.cos', 'np.cos', (['(deg2rad * lat)'], {}), '(deg2rad * lat)\n', (30411, 30426), True, 'import numpy as np\n'), ((30437, 30450), 'numpy.cos', 'np.cos', (['decln'], {}), '(decln)\n', (30443, 30450), True, 'import numpy as np\n'), ((2862, 2882), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (2877, 2882), False, 'from ion_functions import utils\n'), ((2962, 2979), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (2974, 2979), False, 'from ion_functions import utils\n'), ((3056, 3079), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['datlim'], {}), '(datlim)\n', (3071, 3079), False, 'from ion_functions import utils\n'), ((3162, 3182), 'ion_functions.utils.isreal', 'utils.isreal', (['datlim'], {}), '(datlim)\n', (3174, 3182), False, 'from ion_functions import utils\n'), ((12088, 12108), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (12103, 12108), False, 'from ion_functions import utils\n'), ((12188, 12205), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (12200, 12205), False, 'from ion_functions import utils\n'), ((16236, 16247), 'numpy.std', 'np.std', (['dat'], {}), '(dat)\n', (16242, 16247), True, 'import numpy as np\n'), ((18180, 18200), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['dat'], {}), '(dat)\n', (18195, 18200), False, 'from ion_functions import utils\n'), ((18370, 18387), 'ion_functions.utils.isreal', 'utils.isreal', (['dat'], {}), '(dat)\n', (18382, 18387), False, 'from ion_functions import utils\n'), ((19520, 19541), 'numpy.atleast_1d', 'np.atleast_1d', (['ddatdx'], {}), '(ddatdx)\n', (19533, 19541), True, 'import numpy as np\n'), ((22712, 22722), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (22719, 22722), True, 'import numpy as np\n'), ((27867, 27891), 'numpy.fix', 'np.fix', (['(275.0 * mn / 9.0)'], {}), '(275.0 * mn / 9.0)\n', (27873, 27891), True, 'import numpy as np\n'), ((28720, 28743), 'numpy.fix', 'np.fix', (['(an_moon / 360.0)'], {}), '(an_moon / 360.0)\n', (28726, 28743), True, 'import numpy as np\n'), ((29365, 29392), 'numpy.sin', 'np.sin', (['(2 * ma_sun - ml_sun)'], {}), '(2 * ma_sun - ml_sun)\n', (29371, 29392), True, 'import numpy as np\n'), ((29636, 29650), 'numpy.cos', 'np.cos', (['ma_sun'], {}), '(ma_sun)\n', (29642, 29650), True, 'import numpy as np\n'), ((30066, 30079), 'numpy.sin', 'np.sin', (['(3 * l)'], {}), '(3 * l)\n', (30072, 30079), True, 'import numpy as np\n'), ((3566, 3587), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (3579, 3587), True, 'import numpy as np\n'), ((3733, 3754), 'numpy.atleast_1d', 'np.atleast_1d', (['datlim'], {}), '(datlim)\n', (3746, 3754), True, 'import numpy as np\n'), ((6567, 6587), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (6582, 6587), False, 'from ion_functions import utils\n'), ((6685, 6702), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (6697, 6702), False, 'from ion_functions import utils\n'), ((12444, 12464), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (12459, 12464), False, 'from ion_functions import utils\n'), ((12562, 12579), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (12574, 12579), False, 'from ion_functions import utils\n'), ((15384, 15404), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (15399, 15404), False, 'from ion_functions import utils\n'), ((15502, 15519), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (15514, 15519), False, 'from ion_functions import utils\n'), ((16206, 16225), 'numpy.std', 'np.std', (['(dat - datpp)'], {}), '(dat - datpp)\n', (16212, 16225), True, 'import numpy as np\n'), ((18528, 18548), 'ion_functions.utils.isnumeric', 'utils.isnumeric', (['arg'], {}), '(arg)\n', (18543, 18548), False, 'from ion_functions import utils\n'), ((18758, 18775), 'ion_functions.utils.isreal', 'utils.isreal', (['arg'], {}), '(arg)\n', (18770, 18775), False, 'from ion_functions import utils\n'), ((29317, 29340), 'numpy.sin', 'np.sin', (['(ma_sun - ml_sun)'], {}), '(ma_sun - ml_sun)\n', (29323, 29340), True, 'import numpy as np\n'), ((29586, 29604), 'numpy.cos', 'np.cos', (['(2 * ma_sun)'], {}), '(2 * ma_sun)\n', (29592, 29604), True, 'import numpy as np\n'), ((30046, 30059), 'numpy.cos', 'np.cos', (['(2 * l)'], {}), '(2 * l)\n', (30052, 30059), True, 'import numpy as np\n'), ((29259, 29283), 'numpy.sin', 'np.sin', (['(an_moon - ml_sun)'], {}), '(an_moon - ml_sun)\n', (29265, 29283), True, 'import numpy as np\n'), ((29558, 29572), 'numpy.cos', 'np.cos', (['ma_sun'], {}), '(ma_sun)\n', (29564, 29572), True, 'import numpy as np\n'), ((30015, 30028), 'numpy.sin', 'np.sin', (['(2 * l)'], {}), '(2 * l)\n', (30021, 30028), True, 'import numpy as np\n'), ((29231, 29245), 'numpy.cos', 'np.cos', (['ml_sun'], {}), '(ml_sun)\n', (29237, 29245), True, 'import numpy as np\n'), ((29975, 29984), 'numpy.sin', 'np.sin', (['l'], {}), '(l)\n', (29981, 29984), True, 'import numpy as np\n'), ((29995, 30004), 'numpy.cos', 'np.cos', (['l'], {}), '(l)\n', (30001, 30004), True, 'import numpy as np\n'), ((29181, 29208), 'numpy.sin', 'np.sin', (['(2 * ma_sun + ml_sun)'], {}), '(2 * ma_sun + ml_sun)\n', (29187, 29208), True, 'import numpy as np\n'), ((27828, 27851), 'numpy.fix', 'np.fix', (['((mn + 9) / 12.0)'], {}), '((mn + 9) / 12.0)\n', (27834, 27851), True, 'import numpy as np\n'), ((29153, 29167), 'numpy.sin', 'np.sin', (['ml_sun'], {}), '(ml_sun)\n', (29159, 29167), True, 'import numpy as np\n'), ((29096, 29119), 'numpy.sin', 'np.sin', (['(ma_sun + ml_sun)'], {}), '(ma_sun + ml_sun)\n', (29102, 29119), True, 'import numpy as np\n'), ((29020, 29034), 'numpy.sin', 'np.sin', (['ml_sun'], {}), '(ml_sun)\n', (29026, 29034), True, 'import numpy as np\n'), ((29048, 29071), 'numpy.sin', 'np.sin', (['(ma_sun - ml_sun)'], {}), '(ma_sun - ml_sun)\n', (29054, 29071), True, 'import numpy as np\n')]
|
from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution
from mlagents.envs.base_env import BatchedStepResult, AgentGroupSpec
from mlagents.envs.exception import UnityEnvironmentException
import numpy as np
from typing import List
def step_result_to_brain_info(
step_result: BatchedStepResult,
group_spec: AgentGroupSpec,
agent_id_prefix: int = None,
) -> BrainInfo:
n_agents = step_result.n_agents()
vis_obs_indices = []
vec_obs_indices = []
for index, observation in enumerate(step_result.obs):
if len(observation.shape) == 2:
vec_obs_indices.append(index)
elif len(observation.shape) == 4:
vis_obs_indices.append(index)
else:
raise UnityEnvironmentException(
"Invalid input received from the environment, the observation should "
"either be a vector of float or a PNG image"
)
if len(vec_obs_indices) == 0:
vec_obs = np.zeros((n_agents, 0), dtype=np.float32)
else:
vec_obs = np.concatenate([step_result.obs[i] for i in vec_obs_indices], axis=1)
vis_obs = [step_result.obs[i] for i in vis_obs_indices]
mask = np.ones((n_agents, np.sum(group_spec.action_size)), dtype=np.float32)
if group_spec.is_action_discrete():
mask = np.ones(
(n_agents, np.sum(group_spec.discrete_action_branches)), dtype=np.float32
)
if step_result.action_mask is not None:
mask = 1 - np.concatenate(step_result.action_mask, axis=1)
if agent_id_prefix is None:
agent_ids = [str(ag_id) for ag_id in list(step_result.agent_id)]
else:
agent_ids = [f"${agent_id_prefix}-{ag_id}" for ag_id in step_result.agent_id]
return BrainInfo(
vis_obs,
vec_obs,
list(step_result.reward),
agent_ids,
list(step_result.done),
list(step_result.max_step),
mask,
)
def group_spec_to_brain_parameters(
name: str, group_spec: AgentGroupSpec
) -> BrainParameters:
vec_size = np.sum(
[shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
)
vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
a_size: List[int] = []
if group_spec.is_action_discrete():
a_size += list(group_spec.discrete_action_branches)
vector_action_space_type = 0
else:
a_size += [group_spec.action_size]
vector_action_space_type = 1
return BrainParameters(
name, int(vec_size), cam_res, a_size, [], vector_action_space_type
)
|
[
"mlagents.trainers.brain.CameraResolution",
"numpy.sum",
"numpy.zeros",
"mlagents.envs.exception.UnityEnvironmentException",
"numpy.concatenate"
] |
[((990, 1031), 'numpy.zeros', 'np.zeros', (['(n_agents, 0)'], {'dtype': 'np.float32'}), '((n_agents, 0), dtype=np.float32)\n', (998, 1031), True, 'import numpy as np\n'), ((1060, 1129), 'numpy.concatenate', 'np.concatenate', (['[step_result.obs[i] for i in vec_obs_indices]'], {'axis': '(1)'}), '([step_result.obs[i] for i in vec_obs_indices], axis=1)\n', (1074, 1129), True, 'import numpy as np\n'), ((2261, 2295), 'mlagents.trainers.brain.CameraResolution', 'CameraResolution', (['s[0]', 's[1]', 's[2]'], {}), '(s[0], s[1], s[2])\n', (2277, 2295), False, 'from mlagents.trainers.brain import BrainInfo, BrainParameters, CameraResolution\n'), ((1220, 1250), 'numpy.sum', 'np.sum', (['group_spec.action_size'], {}), '(group_spec.action_size)\n', (1226, 1250), True, 'import numpy as np\n'), ((749, 898), 'mlagents.envs.exception.UnityEnvironmentException', 'UnityEnvironmentException', (['"""Invalid input received from the environment, the observation should either be a vector of float or a PNG image"""'], {}), "(\n 'Invalid input received from the environment, the observation should either be a vector of float or a PNG image'\n )\n", (774, 898), False, 'from mlagents.envs.exception import UnityEnvironmentException\n'), ((1358, 1401), 'numpy.sum', 'np.sum', (['group_spec.discrete_action_branches'], {}), '(group_spec.discrete_action_branches)\n', (1364, 1401), True, 'import numpy as np\n'), ((1502, 1549), 'numpy.concatenate', 'np.concatenate', (['step_result.action_mask'], {'axis': '(1)'}), '(step_result.action_mask, axis=1)\n', (1516, 1549), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from skimage.color import rgb2gray
from skimage.io import imread, imsave
from scipy.misc import toimage
import numpy as np
import wrapper as wr
###########################################################
# IMAGE IO
###########################################################
def imload_rgb(path):
"""Load and return an RGB image in the range [0, 1]."""
return imread(path) / 255.0
def save_img(image, imgname, use_JPEG=False):
"""Save image as either .jpeg or .png"""
if use_JPEG:
imsave(imgname+".JPEG", image)
else:
toimage(image,
cmin=0.0, cmax=1.0).save(imgname+".png")
###########################################################
# IMAGE MANIPULATION
###########################################################
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level)
def grayscale_contrast(image, contrast_level):
"""Convert to grayscale. Adjust contrast.
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
return adjust_contrast(rgb2gray(image), contrast_level)
def uniform_noise(image, width, contrast_level, rng):
"""Convert to grayscale. Adjust contrast. Apply uniform noise.
parameters:
- image: a numpy.ndarray
- width: a scalar indicating width of additive uniform noise
-> then noise will be in range [-width, width]
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
image = grayscale_contrast(image, contrast_level)
return apply_uniform_noise(image, -width, width, rng)
###########################################################
# HELPER FUNCTIONS
###########################################################
def apply_uniform_noise(image, low, high, rng=None):
"""Apply uniform noise to an image, clip outside values to 0 and 1.
parameters:
- image: a numpy.ndarray
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
nrow = image.shape[0]
ncol = image.shape[1]
image = image + get_uniform_noise(low, high, nrow, ncol, rng)
#clip values
image = np.where(image < 0, 0, image)
image = np.where(image > 1, 1, image)
assert is_in_bounds(image, 0, 1), "values <0 or >1 occurred"
return image
def get_uniform_noise(low, high, nrow, ncol, rng=None):
"""Return uniform noise within [low, high) of size (nrow, ncol).
parameters:
- low: lower bound of noise within [low, high)
- high: upper bound of noise within [low, high)
- nrow: number of rows of desired noise
- ncol: number of columns of desired noise
- rng: a np.random.RandomState(seed=XYZ) to make it reproducible
"""
if rng is None:
return np.random.uniform(low=low, high=high,
size=(nrow, ncol))
else:
return rng.uniform(low=low, high=high,
size=(nrow, ncol))
def is_in_bounds(mat, low, high):
"""Return wether all values in 'mat' fall between low and high.
parameters:
- mat: a numpy.ndarray
- low: lower bound (inclusive)
- high: upper bound (inclusive)
"""
return np.all(np.logical_and(mat >= 0, mat <= 1))
def eidolon_partially_coherent_disarray(image, reach, coherence, grain):
"""Return parametrically distorted images (produced by Eidolon factory.
For more information on the effect of different distortions, please
have a look at the paper: Koenderink et al., JoV 2017,
Eidolons: Novel stimuli for vision research).
- image: a numpy.ndarray
- reach: float, controlling the strength of the manipulation
- coherence: a float within [0, 1] with 1 = full coherence
- grain: float, controlling how fine-grained the distortion is
"""
return wr.partially_coherent_disarray(wr.data_to_pic(image),
reach, coherence, grain)
###########################################################
# MAIN METHOD FOR TESTING & DEMONSTRATION PURPOSES
###########################################################
if __name__ == "__main__":
print("""This main method should generate manipulated
images in the directory where it was executed.""")
use_JPEG = False # either JPEG or PNG
img = imload_rgb("test_image.JPEG")
###################################################
# A) Example for color-experiment:
# - convert to grayscale
###################################################
img_grayscale = rgb2gray(img)
save_img(img_grayscale, "test_image_grayscale", use_JPEG)
###################################################
# B) Example for contrast-experiment:
# - convert to grayscale and
# - reduce contrast to nominal contrast of 10%
###################################################
contrast_level_1 = 0.1
img_low_contrast = grayscale_contrast(image=img,
contrast_level=contrast_level_1)
save_img(img_low_contrast, "test_image_low_contrast", use_JPEG)
###################################################
# C) Example for noise-experiment:
# - convert to graycale and
# - reduce contrast to 30% and
# - apply uniform noise with width 0.1
###################################################
noise_width = 0.1
contrast_level_2 = 0.3
rng = np.random.RandomState(seed=42)
img_noisy = uniform_noise(image=img, width=noise_width,
contrast_level=contrast_level_2,
rng=rng)
save_img(img_noisy, "test_image_noisy", use_JPEG)
###################################################
# D) Example for eidolon-experiment:
# - use partially_coherent_disarray
###################################################
grain = 10.0
coherence = 1.0
reach = 8.0
img_eidolon = eidolon_partially_coherent_disarray(img, reach,
coherence, grain)
save_img(img_eidolon, "test_image_eidolon", use_JPEG)
|
[
"skimage.color.rgb2gray",
"numpy.logical_and",
"numpy.where",
"scipy.misc.toimage",
"skimage.io.imread",
"skimage.io.imsave",
"numpy.random.uniform",
"wrapper.data_to_pic",
"numpy.random.RandomState"
] |
[((2702, 2731), 'numpy.where', 'np.where', (['(image < 0)', '(0)', 'image'], {}), '(image < 0, 0, image)\n', (2710, 2731), True, 'import numpy as np\n'), ((2744, 2773), 'numpy.where', 'np.where', (['(image > 1)', '(1)', 'image'], {}), '(image > 1, 1, image)\n', (2752, 2773), True, 'import numpy as np\n'), ((5095, 5108), 'skimage.color.rgb2gray', 'rgb2gray', (['img'], {}), '(img)\n', (5103, 5108), False, 'from skimage.color import rgb2gray\n'), ((5972, 6002), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (5993, 6002), True, 'import numpy as np\n'), ((397, 409), 'skimage.io.imread', 'imread', (['path'], {}), '(path)\n', (403, 409), False, 'from skimage.io import imread, imsave\n'), ((537, 569), 'skimage.io.imsave', 'imsave', (["(imgname + '.JPEG')", 'image'], {}), "(imgname + '.JPEG', image)\n", (543, 569), False, 'from skimage.io import imread, imsave\n'), ((1471, 1486), 'skimage.color.rgb2gray', 'rgb2gray', (['image'], {}), '(image)\n', (1479, 1486), False, 'from skimage.color import rgb2gray\n'), ((3309, 3365), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'low', 'high': 'high', 'size': '(nrow, ncol)'}), '(low=low, high=high, size=(nrow, ncol))\n', (3326, 3365), True, 'import numpy as np\n'), ((3749, 3783), 'numpy.logical_and', 'np.logical_and', (['(mat >= 0)', '(mat <= 1)'], {}), '(mat >= 0, mat <= 1)\n', (3763, 3783), True, 'import numpy as np\n'), ((4394, 4415), 'wrapper.data_to_pic', 'wr.data_to_pic', (['image'], {}), '(image)\n', (4408, 4415), True, 'import wrapper as wr\n'), ((587, 621), 'scipy.misc.toimage', 'toimage', (['image'], {'cmin': '(0.0)', 'cmax': '(1.0)'}), '(image, cmin=0.0, cmax=1.0)\n', (594, 621), False, 'from scipy.misc import toimage\n')]
|
import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.utils import random_interferometer
from strawberryfields.apps import data, sample, subgraph, plot
import plotly
import networkx as nx
import numpy as np
class GBS:
def __init__(self, samples =[], min_pho = 16, max_pho = 30, subgraph_size = 8, max_count = 2000):
self.samples = samples
self.min_pho = min_pho
self.max_pho = max_pho
self.subgraph_size = subgraph_size
self.max_count = max_count
def graphDensity(self, samples, min_pho, max_pho, subgraph_size, max_count):
dense = subgraph.search(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)
dense_freq = []
for k in range(subgraph_size, min_pho+1):
dense_freq.append([k,len(dense[k])])
return dense, dense_freq
def graphFreqScore(self, d_freqs, max_freq):
x,y = [], []
for i in range(len(d_freqs)):
for j in range(len(d_freqs[i])):
n,f = d_freqs[i][j][0],d_freqs[i][j][1]
x.append(n*f)
N = len(d_freq[i])
y.append((1/max_freq)*(np.sum(x)/N))
x = []
min_y = np.min(y)
y = [min_y/x for x in y]
return y, y.index(max(y))
def runJob(self, eng):
num_subsystem = 8
prog = sf.Program(num_subsystem, name="remote_job")
U = random_interferometer(4)
with prog.context as q:
# Initial squeezed states
# Allowed values are r=1.0 or r=0.0
ops.S2gate(1.0) | (q[0], q[4])
ops.S2gate(1.0) | (q[1], q[5])
ops.S2gate(1.0) | (q[3], q[7])
# Interferometer on the signal modes (0-3)
ops.Interferometer(U) | (q[0], q[1], q[2], q[3])
ops.BSgate(0.543, 0.123) | (q[2], q[0])
ops.Rgate(0.453) | q[1]
ops.MZgate(0.65, -0.54) | (q[2], q[3])
# *Same* interferometer on the idler modes (4-7)
ops.Interferometer(U) | (q[4], q[5], q[6], q[7])
ops.BSgate(0.543, 0.123) | (q[6], q[4])
ops.Rgate(0.453) | q[5]
ops.MZgate(0.65, -0.54) | (q[6], q[7])
ops.MeasureFock() | q
eng = eng
results =eng.run(prog, shots=10)
# state = results.state
# measurements = results.samples
return results.samples
|
[
"strawberryfields.Program",
"strawberryfields.ops.BSgate",
"strawberryfields.ops.MZgate",
"strawberryfields.ops.MeasureFock",
"strawberryfields.utils.random_interferometer",
"strawberryfields.apps.subgraph.search",
"numpy.sum",
"strawberryfields.ops.Interferometer",
"numpy.min",
"strawberryfields.ops.S2gate",
"strawberryfields.ops.Rgate"
] |
[((621, 700), 'strawberryfields.apps.subgraph.search', 'subgraph.search', (['samples', 'pl_graph', 'subgraph_size', 'min_pho'], {'max_count': 'max_count'}), '(samples, pl_graph, subgraph_size, min_pho, max_count=max_count)\n', (636, 700), False, 'from strawberryfields.apps import data, sample, subgraph, plot\n'), ((1212, 1221), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1218, 1221), True, 'import numpy as np\n'), ((1359, 1403), 'strawberryfields.Program', 'sf.Program', (['num_subsystem'], {'name': '"""remote_job"""'}), "(num_subsystem, name='remote_job')\n", (1369, 1403), True, 'import strawberryfields as sf\n'), ((1416, 1440), 'strawberryfields.utils.random_interferometer', 'random_interferometer', (['(4)'], {}), '(4)\n', (1437, 1440), False, 'from strawberryfields.utils import random_interferometer\n'), ((1571, 1586), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1581, 1586), False, 'from strawberryfields import ops\n'), ((1614, 1629), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1624, 1629), False, 'from strawberryfields import ops\n'), ((1657, 1672), 'strawberryfields.ops.S2gate', 'ops.S2gate', (['(1.0)'], {}), '(1.0)\n', (1667, 1672), False, 'from strawberryfields import ops\n'), ((1756, 1777), 'strawberryfields.ops.Interferometer', 'ops.Interferometer', (['U'], {}), '(U)\n', (1774, 1777), False, 'from strawberryfields import ops\n'), ((1817, 1841), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(0.543)', '(0.123)'], {}), '(0.543, 0.123)\n', (1827, 1841), False, 'from strawberryfields import ops\n'), ((1869, 1885), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['(0.453)'], {}), '(0.453)\n', (1878, 1885), False, 'from strawberryfields import ops\n'), ((1905, 1928), 'strawberryfields.ops.MZgate', 'ops.MZgate', (['(0.65)', '(-0.54)'], {}), '(0.65, -0.54)\n', (1915, 1928), False, 'from strawberryfields import ops\n'), ((2018, 2039), 'strawberryfields.ops.Interferometer', 'ops.Interferometer', (['U'], {}), '(U)\n', (2036, 2039), False, 'from strawberryfields import ops\n'), ((2079, 2103), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(0.543)', '(0.123)'], {}), '(0.543, 0.123)\n', (2089, 2103), False, 'from strawberryfields import ops\n'), ((2131, 2147), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['(0.453)'], {}), '(0.453)\n', (2140, 2147), False, 'from strawberryfields import ops\n'), ((2167, 2190), 'strawberryfields.ops.MZgate', 'ops.MZgate', (['(0.65)', '(-0.54)'], {}), '(0.65, -0.54)\n', (2177, 2190), False, 'from strawberryfields import ops\n'), ((2219, 2236), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (2234, 2236), False, 'from strawberryfields import ops\n'), ((1163, 1172), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (1169, 1172), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Modules to support data reduction in Python.
The main purpose of the base module ``Data_Reduction`` is to provide a
suplerclass with a good set of attributes and methods to cover all common needs.
The base module is also able to read data from a text file as a ``numpy``
structured array. This is done with a class called ``DataGetterMixin`` which
must be invoked after the base class has been initiated.
The module function ``examine_text_data_file()`` reveals the structure of the
file(s) that provide the data..
Examples
========
Here we initiate a base class after mixing in the data getter. The first line o
the file has column names but the first three columns are all under one
name ``UTC`` so we specify column widths to consider the first three columns
to be one column. We use the names from the first line of the file, which
could have been done with an ``open()``, ``readline()``, and ``close()``::
mixIn(Observation, DataGetterMixin)
obs = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs.open_datafile('t12127.10',
delimiter=[17,16,3,11,7,9,8,2,6],
skip_header=1,
names="UTC Epoch Chan Tsys Int Az El Diode Level".split())
Now the data getter is already mixed in to Observation so we don't need to do
it again. In this case we specify the names of the columns, changing ``Int`` to
``Integr``::
obs2 = Observation(dss=28, date="2012/127", project="SolarPatrol")
obs2.open_datafile('t12127.10', skip_header=1,
names="Year DOY UTC Epoch Chan Tsys Integr Az El Diode Level".split())
The class Map inherits from DataGetterMixin, so no explicit mixin required::
obsmap = Map(dss=84, date="2020/163", project="SolarPatrol")
obsmap.initialize('sim-venus.dat', source="Venus")
Let's examine ``obsmap``. We have only one signal column::
In [3]: obsmap.channel.keys()
Out[3]: dict_keys(['xl'])
In [4]: obsmap.channel['xl'].keys()
Out[4]: dict_keys(['freq', 'bw', 'pol', 'ifmode', 'atten', 'power'])
"""
# standard Python modules
import datetime
import glob
import h5py
import logging
import math
import matplotlib.dates as MPLd
import numpy as NP
import os
import re
import readline
import scipy.interpolate
import scipy.fftpack
import Astronomy as A
import Astronomy.DSN_coordinates as coords
import Astronomy.Ephem as AE
import DatesTimes as DT
import local_dirs
import Math.clusters as VQ # vector quantization
import support
# enable raw_input Tab completion
readline.parse_and_bind("tab: complete")
logger = logging.getLogger(__name__) # module logger
class Observation(object):
"""
superclass for a data structure and methods
Attributes
==========
aliases - (dict) data keys to replace those in original data
channel - (dict) signal paths, e.g., different freqs and pols
data - (dict) original data, e.g., read from file or database
DOY - (int) day of year of observation
end - (float) UNIX time at the end
latitude - (float) from obs
logger - (logging.Logger)
longitude - (float) from obs
name - (str) user assigned, defaults to YEAR/DOY
numdata - (int) number of data samples
obs - (AE.DSS) observatory
session - (Session) set of observations, parent to Observation
session_path - (str) directory for session files
start - (float) UNIX time at the beginning
year - (int) year of observation
**Reserved Column Names**
These column names are recognized. They are also the keys for attribute
``data``.
These quantities must be present in some form::
unixtime (float) UNIX time in sec
chan_name (str) channel name
integr (float) integration (exposure) in sec
azel (float,float) azimuth and elevation in decimal deg
power (float) power level if only a single channel
Optional::
diode (float) 0 or power in K (integers OK)
level (float) (unidentified -- in ``tlog`` table)
cryotemp (float) cryostat temp in K
windspeed (float) km/hr
winddir (float) deg
ambtemp (float) deg C
pressure (float) mbar
Columns to be computed::
mpldatenum (float) matplotlib ``datenum``
Alternative for ``power``::
tsys (float) system temperature (calibrated power)
top (float) alternative for ``tsys`` (used in DSN)
vfc_counts (int) VFC counts (rate times ``integr``)
Any column with a name which is not a reserved name is assumed to be
power-like data from the channel with that name, unless that name is in a
list provided to the argument ``ignore`` in the method ``get_data_channels``
of the class ``DataGetterMixin``.
Alternative for ``unixtime``::
year (int) year of observation
doy (int) day of year
utc (str) HH:MM:SS
timestr (str) something like 2020/06/14/14:22:21.00
Alternative for ``chan_name``::
chan (int) index in receiver channel names
Alternative for ``azel``::
radec (float,float) precessed right ascension in decimal hours and
precessed declination in decimal deg
radec1950 (float,float) mean right ascension in decimal hours and
mean declination in decimal deg at epoch
radec2000 (float,float) mean right ascension in decimal hours and
mean declination at epoch in decimal deg
az (float) azimuth in decimal deg
el (float) elevation in decimal deg
ra (float) precessed right ascension in decimal hours
dec (float) precessed declination in decimal deg
ra1950 (float) mean right ascension in decimal hours at epoch
dec1950 (float) mean declination in decimal deg at epoch
ra2000 (float) mean right ascension in decimal hours at epoch
dec2000 (float) mean declination in decimal deg at epoch
Notes
=====
* The ``data`` structure is a dict.
* The value of a ``data`` item is either a numpy array or a object
like ``float``, ``int``, or ``str``.
* The keys have reserved words defined above and will be lowercase.
* Items with other keys may be added, typically by a child class.
* Coordinates shall be in pairs, `e.g. ``azel``, ``radec``. (This way you
never get one without the other.)
"""
reserved = ['unixtime','chan_name','integr','az','el','year','doy','utc',
'timestr','chan','tsys','top','diode','level','cryotemp',
'windspeed','winddir','ambtemp','pressure',
'ra','dec','ra1950','dec1950','ra2000','dec2000']
power_keys = ['tsys', 'top', 'vfc_counts', 'power']
def __init__(self, parent=None, name=None, dss=None,
date=None, project=None):
"""
Create a base Observation object.
This is not meant to be initialized by itself. A subclass generally
determines how data are read in. However, method ``initialize()``
provides a basic data read capability using ``numpy.genfromtxt()``
and creates the object's data structure.
Args:
parent (Session): session to which this observation belongs
name (str): an identifier; default is station ID + "obs"
dss (int): station number
date (str): "YEAR/DOY"
project (str): directory under /usr/local/projects
"""
self.logger = logging.getLogger(logger.name+".Observation")
self.session = parent
# observatory must be specified
if dss:
self.obs = coords.DSS(dss)
self.longitude = self.obs.long*180/math.pi # deg
self.latitude = self.obs.lat*180/math.pi # deg
else:
self.logger.error("__init__: requires observatory location")
raise Exception("Where were the data taken?")
# give the object a name
if name:
self.name = name
else:
self.name = "DSS"+str(dss)+"obs"
self.logger = logging.getLogger(logger.name+".Observation")
# the observation was part of some project
if project:
self.project = project
else:
self.logger.error("__init__: requires a project")
raise Exception("Where are the session's working files?")
# the observation was done on some date
if date:
y,d = date.split('/')
self.year = int(y);
self.DOY = int(d)
projdatapath, self.sessionpath, rawdatapath = \
get_obs_dirs(project, dss, self.year, self.DOY,
datafmt=None)
self.logger.debug("__init__: session path: %s", self.sessionpath)
else:
self.logger.error("__init__: requires a date")
raise Exception("When were the date taken?")
# accomodate subclass arguments
self.aliases = {}
# what I really want to do here is see if this was called by a subclass,
# in which case I do not try to get the channel info until this
# initialization has finished.
#
#if hasattr(self, "get_data_channels"):
# channels = self, get_data_channels()
# self.make_channels(channels)
#else:
# self.logger.info("__init__: initialize() may now be called")
def splitkey(self, longlat):
"""
Checks for presence of coordinates in pairs or singles
@param longlat : "azel", or "radec", or "radecEPOC"
@type longlat : str
"""
longitude = longlat[:2] # 'az' or 'ra'
if len(longlat) > 5: # has epoch
epoch = longlat[-4:]
longitude += epoch
latitude = longlat[2:-4]+epoch
else: # date of observation
latitude = longlat[2:]
epoch = None
return longitude, latitude, epoch
def check_for(self, data, longlat):
"""
Checks for separate coordinates and splits if coord pairs
Args:
data (dict): attribute ``data``
longlat (str): "azel", or "radec", or "radecEPOC"
"""
longitude, latitude, epoch = self.splitkey(longlat)
if longitude in data.dtype.names and \
latitude in data.dtype.names:
self.logger.debug("check_for: data has %s and %s", longitude, latitude)
self.data[longitude] = data[longitude]
self.data[latitude] = data[latitude]
return True
elif longlat in data.dtype.names:
self.logger.debug("check_for: data has %s", longlat)
self.data[longitude],self.data[latitude] = map(None, *data[longlat])
self.logger.debug("check_for: added %s and %s to data",
longitude, latitude)
return True
else:
# coords need to be computed from other coords
return False
def unpack_to_complex(self, rawdata):
"""
Converts a sequence of alternating real/imag samples to complex
@param rawdata : alternating real and imaginary bytes
@type rawdata : numpy array of signed int8
@return: numpy array of complex
"""
datalen = len(rawdata)
real = rawdata[0:datalen:2]
imag = rawdata[1:datalen:2]
data = real + 1j*imag
return data
def sideband_separate(self, data):
"""
Converts a complex spectrum array and returns two reals with USB and LSB
This applies a Hilbert transform to the complex data.
"""
usb = (data.real + scipy.fftpack.hilbert(data).imag)
lsb = (scipy.fftpack.hilbert(data).real + data.imag)
return lsb,usb
class Channel(support.PropertiedClass):
"""
Class for a signal path
"""
def __init__(self, parent, name, freq=None, bw=None, pol=None, IFtype=None,
atten=None):
"""
Notes
=====
The properties can be accessed as if the class were a dict.
Arguments
=========
freq:float or int: center frequency in MHz
bw:float or int: bandwidth in MHz
pol:str: polarization code
"""
support.PropertiedClass.__init__(self)
self.parent = parent
self.logger = logging.getLogger(self.parent.name+".Channel")
self.logger.debug("__init__: created %s", self.logger.name)
self.logger.debug("__init__: parent is %s", self.parent)
self.name = name
self.data['freq'] = freq
self.data['bw'] = bw
self.data['pol'] = pol
self.data['ifmode'] = IFtype
self.data['atten'] = atten
class DataGetterMixin(object):
"""
Class for getting data from a CSV file.
"""
def initialize(self, filename, delimiter=" ", names=True, skip_header=0,
source=None):
"""
Get the data and make a data structure for the observations.
This is not included by default in ``__init__()`` to keep it simple for
subclasses.
Args:
filename (str): name only, required; the path is provided
delimiter (str): what separates the columns
names (bool): the first line has column names
skip_header (int) : number of rows to skip
"""
# get the data
data = self.open_datafile(filename, delimiter=delimiter, names=names,
skip_header=skip_header)
# get the signal columns and names
metadata, signals = self.get_data_channels(data)
# create Channel objects for the signal properties
self.make_channels(signals)
# create the data structure
self.make_data_struct(data, metadata, signals)
# compute the offsets from the source center for each data point
if source:
self.get_offsets(source=source)
else:
self.logger.warning("initialize: no source specified; no offsets")
def open_datafile(self, filename, delimiter=" ", names=True, skip_header=0):
"""
Opens and reads a data file
This is used by ``Malargue`` (one data files) and ``GAVRT`` (one data file
for each signal).
Args:
filename (str): text data file name
delimiter (str): separator between columns (default: whitespace)
names (bool): file row has column names (default: True)
skip_header (int): number of rows to skip at beginning of file
Returns:
ndarray:
"""
data = NP.genfromtxt(self.sessionpath+filename,
delimiter=delimiter,
dtype=None,
names=names,
case_sensitive='lower',
skip_header=skip_header,
encoding=None)
return data
def get_data_channels(self, data, ignore=None):
"""
Gets or sets the names of the signal columns
Column names are separated into metadata and signals. Names in
``ignore`` re ignored. Names in ``aliases`` are replaced.
Args:
data (ndarray): data read from text file
ignore (list of str): columns to ignore; default None
Returns:
(list of str, list of str): metadata, signals
"""
names = data.dtype.names
metadata = []
signals = []
for name in names:
if ignore:
if name in ignore:
pass
if name.casefold() in map(str.casefold, self.aliases):
key = self.aliases[name].lower() # we use only lower case names
else:
key = name.lower()
self.logger.debug("get_data_channels: doing %s for %s", key, name)
if key in map(str.casefold, Observation.reserved):
if key.casefold() in ['top', 'tsys']:
signals.append(key)
else:
metadata.append(key)
else:
signals.append(key)
self.logger.debug("get_data_channels: signals: %s", signals)
self.logger.debug("get_data_channels: metadata: %s", metadata)
return metadata, signals
def make_data_struct(self, data, metadata, signals):
"""
Takes a text table with headers and converts it into a numpy ``ndarray``.
That means that a column can be extracted using `data[label]`.
Args
====
data: (ndarray) the data from the text file
metadata: (list of str) the column names for metadata
signals: (list of str) the column names for power-like data
"""
# get the known columns:
self.data = {}
self.numdata = len(data)
#self.logger.debug("make_data_struct: using aliases: %s", self.aliases)
# get columns that are not metadata; each has power for a channel
for signal in signals:
#self.logger.debug("make_data_struct: for signal: %s", signal)
#if signal in self.aliases.items():
# get the key in 'data' which matches 'value' in 'aliases'
# power = data[next(key for key, value in self.aliases.items()
# if value == signal)][idx]
#else:
# power = data[signal]
#self.channel[signal]['power'] = power
self.channel[signal]['power'] = data[signal]
# get UNIX time
if 'unixtime' in metadata:
if 'unixtime' in data.dtype.names:
self.data['unixtime'] = data['unixtime']
else:
# look up the equivalent of UNIX time in the data table
self.data['unixtime'] = data[next(key
for key, value in self.aliases.items()
if value == 'unixtime')]
# compute other convenient forms of time
self.data['datetime'] = [] # Python datetime.date
self.data['date_num'] = [] # matplotlib.dates date number
for idx in list(range(self.numdata)):
if 'unixtime' in data.dtype.names:
tm = data['unixtime'][idx]
else:
tm = data[next(key for key, value in self.aliases.items()
if value == 'unixtime')][idx]
dt = datetime.datetime.utcfromtimestamp(tm)
self.data['datetime'].append(dt)
self.data['date_num'].append(MPLd.date2num(dt))
self.start = self.data['unixtime'][0]
self.end = self.data['unixtime'][-1]
else:
# figure out how to process the time data columns
pass
# compute alternate coordinates
if self.check_for(data, 'azel'):
# azel exists; compute radec if needed; then radec2000 if needed
if self.check_for(data, 'radec'):
pass
else:
self.radec_from_azel()
if self.check_for(data, 'radec2000'):
# ra2000 and dec2000 already exist
pass
else:
self.radec2000_from_radec()
elif self.check_for(data, 'radec2000'):
# coordinates exist; compute back to azimuth and elevation
if self.check_for(data, 'radec'):
pass
else:
# compute observed RA and dec
self.radec_from_radec2000()
if self.check_for(data, 'azel'):
pass
else:
self.azel_from_radec()
# in here check for 'radec'
else:
self.logger.error("no coordinates found in data")
raise Exception("check INFO logging for columns found")
self.start = self.data['unixtime'].min()
self.end = self.data['unixtime'].max()
def make_channels(self, signals, props=None):
"""
Assign properties to the channels.
The prop keys are "freq", "pol", and "IFtype".
Args:
props (dict of dicts): signal channel properties.
"""
self.channel = {}
for ch in signals:
chindex = signals.index(ch)
if props:
self.channel[ch] = self.Channel(self, ch,
freq =props[ch]['freq'],
bw =props[ch]['bw'],
pol =props[ch]['pol'],
IFtype=props[ch]['IFtype'],
atten =props[ch]['atten'])
else:
self.channel[ch] = self.Channel(self, ch)
class GriddingMixin(object):
"""
Class for all the data and methods associated with a raster scan map
It is expected that the parent class is a subclass of ``Observation`` already
by virtue of it being a superclass of subclass which inherits these methods.
Attrs:
cfg (dict):
data (numpy array): from ``Observation``
logger (logging.Logger): replaces ``Observation`` logger
name (str): replaces ``Observation`` name
session (Session):
source (str):
step (float): map step size
"""
def get_grid_stepsize(self, xy=None):
"""
Determine the stepsize of gridded data
This assumes xdec and dec data increase incrementally by 'stepsize'.
The sequences may repeat in a sawtooth-like series. The number of
'xdec' and 'dec' points is multiple times the gridsize.
Arguments:
xy (tuple or list) - X-array and Y-array (default Map.data)
"""
# get the absolute value of coordinate intervals
if xy:
dxdecs = abs(xy[0][1:] - xy[0][:-1])
ddecs = abs(xy[1][1:] - xy[1][:-1])
else:
dxdecs = abs(self.data['xdec_offset'][1:]-self.data['xdec_offset'][:-1])
ddecs = abs(self.data['dec_offset'][1:] -self.data['dec_offset'][:-1])
# form array of X,Y pairs
coords = NP.array(list(zip(dxdecs,ddecs)))
# expect two clusters (default)
cluster_pos = VQ.find_clusters(coords).round(4) # tenths of mdeg
# return the non-zero intervals
return cluster_pos[0].max(), cluster_pos[1].max()
def regrid(self, width=1.0, height=1.0, step=None, power_key=None):
"""
converts a map from observed coordinates to map coordinates
If ``step`` is not given then the step size will be the average step size
in X and the average step in Y. In this case, the effect is to make a
regular grid if the original positions were not exact, i.e., pointing error.
@param width : map width in deg
@type width : float
@param height : map height in deg
@type height : float
@param step : map step size in X and Y in deg
@type step : (float, float)
@param power_key : dict key of Z-value
@type power_key : str
"""
# what is the power-like quantity?
if power_key:
pass
else:
# take the first that matches
for key in Observation.power_keys:
if key in self.data:
power_key = key
self.logger.info("regrid: using '%s'", power_key)
break
else:
continue
if power_key:
pass
else:
self.logger.error("regrid: no power data key found")
return None
if step == None:
# use the original stepsize
self.xstep, self.ystep = self.get_grid_stepsize()
else:
self.xstep, self.ystep = step
self.data['grid_x'] = NP.arange(
-width/2, width/2+self.xstep/2, self.xstep/2)
self.data['grid_y'] = NP.arange(
-height/2,height/2+self.ystep/2, self.ystep/2)
self.logger.debug("regrid: grid shape is %dx%d", len(self.data['grid_x']),
len(self.data['grid_y']))
self.data['grid_z'] = {}
for chnl in self.channel:
self.logger.debug("regrid: processing %s", chnl)
points = list(zip(self.data['xdec_offset'],self.data['dec_offset']))
self.logger.debug("regrid: %d positions", len(points))
values = self.data[power_key][chnl]
self.logger.debug("regrid: %d values", len(values))
xi, yi = NP.meshgrid(self.data['grid_x'], self.data['grid_y'])
try:
self.data['grid_z'][chnl] = scipy.interpolate.griddata(points, values,
(xi, yi), method='nearest')
except ValueError as details:
self.logger.error("regrid: gridding failed: %s", str(details))
self.logger.debug("regrid: channel %s length of points is %d",
chnl, len(points))
self.logger.debug("regrid: channel %s length of values is %d", chnl,
len(values))
continue
def radec_from_azel(self):
"""
compute RA and dec from az and el
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
dt = self.data['datetime'][idx]
# format time as (YEAR, DOY.fff)
time_tuple = (dt.year,
DT.day_of_year(dt.year,dt.month,dt.day)
+ ( dt.hour
+ dt.minute/60.
+ dt.second/3600.
+ dt.microsecond/3600./1e6)/24.)
azimuth = self.data['az'][idx]
elevation = self.data['el'][idx]
# compute
ra,dec = A.AzEl_to_RaDec(azimuth, elevation,
self.latitude,
-self.longitude,
time_tuple)
RA.append(ra)
decs.append(dec)
RAdecs.append((RA,decs))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def radec2000_from_radec(self):
"""
compute RA2000 and dec2000 from observed RA and dec
"""
RA2000 = []; decs2000 = []; RAdec2000 = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra = self.data['ra']
dec = self.data['dec']
# compute
ra2000,dec2000 = A.apparent_to_J2000(MJD,UT,
ra, dec,
self.longitude, self.latitude)
RA2000.append(ra2000)
decs2000.append(dec2000)
RAdec2000.append((ra2000,dec2000))
self.data['ra2000'] = RA2000
self.data['dec2000'] = dec2000
self.data['radec2000'] = RAdec2000
def radec_from_radec2000(self):
"""
compute apparent RA and dec. from J2000 RA and dec
"""
RA = []; decs = []; RAdecs = []
for idx in list(range(self.numdata)):
# setup
tm = self.data['unixtime'][idx]
mjd = DT.UnixTime_to_MJD(tm)
MJD = int(mjd)
UT = 24*(mjd-MJD)
ra2000 = self.data['ra2000'][idx]
dec2000 = self.data['dec2000'][idx]
# compute
ra, dec = A.J2000_to_apparent(MJD, UT,
ra2000*math.pi/12, dec2000*math.pi/180)
RA.append(ra)
decs.append(dec)
RAdecs.append((ra,dec))
self.data['ra'] = RA
self.data['dec'] = decs
self.data['radec'] = RAdecs
def azel_from_radec(self):
"""
compute azimuth and elevation from apparent right ascension and declination
"""
azs = []; els = []; azels = []
for idx in list(range(self.numdata)):
# setup
ra = self.data['ra'][idx]
dec = self.data['dec'][idx]
timetuple = self.data['datetime'][idx].timetuple()
year = timetuple.tm_year
doy = timetuple.tm_yday + (timetuple.tm_hour
+(timetuple.tm_min+timetuple.tm_sec/60)/60)/24
# compute
az, el = A.RaDec_to_AzEl(ra, dec,
self.latitude, self.longitude, (year,doy))
azs.append(az)
els.append(el)
azels.append((az,el))
self.data['az'] = azs
self.data['el'] = els
self.data['azel'] = azels
def get_offsets(self, source="Sun", xdec_ofst=0., dec_ofst=0.):
"""
Generates a map in coordinates relative to a source
If the source is the default, the position of the Sun will be computed for
the time of each sample. IT SEEMS LIKE A GOOD IDEA TO DO THIS FOR PLANETS
ALSO.
This adds elements with keys ``xdec_offset`` and ``dec_offset`` to the
attribute ``data``.
@param source : source at map center
@type source : ephem source instance
@param xdec_ofst : relative X-dec position of sample
@type xdec_ofst : float
@param dec_ofst : relative dec position of sample
@type dec_ofst : float
@return: (dxdecs,ddecs) in degrees
"""
if source.lower() == "sun":
src = AE.ephem.Sun()
else:
src = AE.calibrator(source)
self.data['dec_offset'] = []
self.data['xdec_offset'] = []
for count in range(len(self.data['unixtime'])):
dt = datetime.datetime.utcfromtimestamp(
self.data['unixtime'][count])
if type(src) == AE.Quasar:
pass
else:
src.compute(dt)
ra_center = src.ra*12/math.pi # hours
dec_center = src.dec*180/math.pi # degrees
decrad = src.dec
# right ascension increases to the left, cross-dec to the right
self.data['xdec_offset'].append(xdec_ofst -
(self.data['ra'][count] - ra_center)*15*math.cos(decrad) )
self.data['dec_offset'].append( dec_ofst +
self.data['dec'][count] - dec_center)
# change list to NP.array
self.data['xdec_offset'] = NP.array(self.data['xdec_offset'])
self.data['dec_offset'] = NP.array(self.data['dec_offset'])
class Map(Observation, GriddingMixin):
"""
Map class without special features for GAVRT and Malargue
Most of the methods are mixed in to avoid conflicting with subclasses
"""
def __init__(self, parent=None, name=None, dss=None, date=None, project=None):
"""
Create a Map object
Args:
parent (Session): an observing session to which this belongs
name (str): an identifier, like a scan number
dss (int): station where the data were taken
date (str): date of observation as "YEAR/DOY"
project (str): project for which this observation was made
"""
Observation.__init__(self, parent=parent, name=name, dss=dss, date=date,
project=project)
class Recording(h5py.File):
"""
Class for raw data
This is typically the contents of a data file transcribed into a standard
format. It may be the data of one Observation object, or data for multiple
Observation objects, or contain part of the data for an Observation object.
If the data being curated are not in a standard project, and they are not
in a standard place,
"""
def __init__(self, session=None, path=None, date=None, dss=None, name=None):
"""
Initialize a metadata container and data directory
Args
====
session (Session): required, unless:
path (str) : location of raw data files
date
"""
self.logger = logging.getLogger(logger.name+".Recording")
if session:
self.session = session
if not name:
name = session.project + "-" + str(session.year) + "-" + \
('%03d' % session.doy) + "-dss" + str(session.dss)+".info"
self.year = session.year
self.doy = session.doy
self.dss = session.dss
self.project = session.project
self.session_dir = session.session_dir
elif path and name:
self.session = Session() # for its methods and attributes
self.session_dir = path
self.name = name
else:
raise RuntimeError("either a session or a path and filename required")
h5py.File.__init__(self, name, 'w')
self.attrs['project'] = self.project
self.attrs['dss'] = self.dss
self.attrs['year'] = self.year
self.attrs['doy'] = self.doy
class Session(object):
"""
Base class for an observing session on a given year and DOY
Public Attributes::
doy (int) - day of year for session
logger (logging.Logger) - logging.Logger object
parent (object) - a data reduction session (mult. observ. sessions)
year (int) -
doy (int) -
project (str) -
session_dir (str) - path to results from this session
A session usually refers to a telescope, date and project. This will
normally define a path to the session directory.
"""
def __init__(self, parent=None, date=None, project=None, dss=None,
path=None):
"""
initialize data reduction for one observing session
Args
====
parent: (object) optional class for a data reduction tool
date: (str) required, format YEAR/DOY
project: (str) required
dss (int) required
path (str) optional
If `path` is given for a non-standard observing files location, and it does
not exist, it will be created. Then the Recording and Observation instances
must be directed to where the files are.
"""
self.logger = logging.getLogger(logger.name+".Session")
if parent:
self.session = parent
if date and project and dss:
y,d = date.split('/')
self.year = int(y);
self.doy = int(d)
self.project = project
self.dss = dss
self.name = "'%s %4d/%03d'" % (self.project, self.year, self.doy)
else:
self.logger.error("__init__: missing DSS or year or DOY or project")
raise Exception("Where and when and for what project were the data taken?")
self.find_session_dir(path=path)
def find_session_dir(self, path=None):
"""
find or make the sessions directory
Args:
path (str) - explicit path to files
"""
self.logger.debug("find_session_dir: entered for path=%s", path)
if path:
self.session_dir = path
else:
obs_dir = local_dirs.projects_dir + self.project \
+"/Observations/dss"+str(self.dss)+"/"
self.session_dir = obs_dir+ "%4d" % self.year +"/"+ "%03d" % self.doy +"/"
if not os.path.exists(self.session_dir):
os.makedirs(self.session_dir, mode=0o775)
def select_data_files(self, datapath=None, name_pattern="", auto=True,
load_hdf=False):
"""
Provide the user with menu to select data files.
Finding the right data store is complicated as there are many kinds of data
files
* If datapath is ...RA_data/HDF5/... then the files could be .h5 (Ashish)
or .hdf5 (Dean).
* If datapath is ...RA_data/FITS/... then the extent is .fits.
* If datapath is ...project_data/... then the extent is .pkl
* If datapath is ...projects/... (default) then the extent is probably
.csv or .dat or .prd.
@param datapath : path to top of the tree where the DSS subdirectories are
@type datapath : str
@param name_pattern : pattern for selecting file names, e.g. source
@type name_pattern : str
@param load_hdf : use RA_data/HDF5 directory if True
@type load_hdf : bool
@para auto : take all files found
@type auto : bool
@return: list of str
"""
# Get the data files to be processed
self.logger.debug("select_data_files: looking in %s", datapath)
if name_pattern:
name,extent = os.path.splitext(name_pattern)
if extent.isalpha(): # a proper extent with no wildcards
# take name pattern as is
pass
else:
# only one * at front and back of pattern
name_pattern = "*"+name_pattern.rstrip('*')+"*"
else:
# no pattern specified. All files.
name_pattern = "*"
self.logger.debug("select_data_files: for pattern %s", name_pattern)
if datapath:
if re.search('HDF5', datapath):
load_hdf = True
elif re.search('project_data', datapath):
load_hdf = False
datafiles = support.text.select_files(datapath+name_pattern+"[0-9].pkl")
elif re.search('FITS', datapath):
datafiles = support.text.select_files(datapath+name_pattern+".fits")
if load_hdf:
full = datapath+name_pattern+".h*5"
else:
full = datapath+name_pattern
else:
full = self.session_dir + name_pattern
self.logger.debug("select_data_files: from: %s", full)
if auto:
datafiles = glob.glob(full)
else:
datafiles = support.text.select_files(full)
self.logger.debug("select_data_files: found %s", datafiles)
if datafiles == []:
self.logger.error(
"select_data_files: None found. Is the data directory mounted?")
raise RuntimeError('No data files found.')
if type(datafiles) == str:
datafiles = [datafiles]
self.logger.info("select_data_files: to be processed: %s", datafiles)
return datafiles
class Spectrum(Observation):
"""
Class for spectra
"""
def __init__(self):
"""
needs a spectrum attribute
"""
self.logger = logging.getLogger(logger.name+".Spectrum")
def get_num_chans(self, linefreq, bandwidth, max_vel_width):
"""
compute the base 2 number of output channels for the specified resolution
"""
kmpspMHz = 300000./linefreq
BW_kmps = bandwidth*kmpspMHz
est_num_chan_out = BW_kmps/max_vel_width
self.logger.debug("get_num_chans: estimated num chans out = %d",
est_num_chan_out)
return 2**int(math.ceil(math.log(est_num_chan_out,2)))
def reduce_spectrum_channels(self, refval, refpix, delta,
num_chan=1024, axis=0):
"""
Reduce the number of channels in the spectrum.
The default option is to reduce the spectrum to a specified number of
channels with a default of 1024. The input spectrum is presumed to have
2**N channels so that num_chan/num_chan_in is an integer.
If 'spectrum' is an N-D array, then the spectrum axis is given by 'axis'
which defaults to 0.
'delta' is negative for lower sideband or reversed double sideband spectra.
@param spectrum : spectrum values
@type spectrum : list or nparray
@param refval : X-axis value at the reference pixel of 'spectrum'
@type refval : float
@param refpix : reference pixel for 'spectrum'
@type refpix : int
@param delta : interval between pixels on the X-axis
@type delta : float
@param num_chan : optional number of channels to be returned (default: 2^10)
@type num_chan : int
@return: numpy.array
"""
if math.log(num_chan,2) % 1:
raise RuntimeError("num_chan = %d is not a power of 2", num_chan)
if type(self.spectrum) == NP.ndarray:
num_chans_in = self.spectrum.shape[axis]
else:
num_chans_in = len(self.spectrum)
if math.log(num_chans_in,2) % 1:
raise RuntimeError("input spectrum length = %d is not a power of 2",
num_chans_in)
self.logger.debug("reduce_spectrum_channels: %d channels in", num_chans_in)
num_chan_avg = num_chans_in/num_chan
newrefpix = refpix/num_chan_avg
self.logger.debug("reduce_spectrum_channels: refpix from %d to %d",
refpix, newrefpix)
newdelta = delta*num_chan_avg
self.logger.debug("reduce_spectrum_channels: delta from %.3f to %.3f",
delta, newdelta)
newrefval = refval + delta*(num_chan_avg/2 - 1)
self.logger.debug("reduce_spectrum_channels: refval from %.3f to %.3f",
refval, newrefval)
self.logger.debug("reduce_spectrum_channels: averaging %d channels", num_chan_avg)
specout = NP.array([spectrum[index*num_chan_avg:(index+1)*num_chan_avg].mean()
for index in range(num_chan)])
self.logger.debug("reduce_spectrum_channels: %d channels out", num_chan)
return specout, newrefval, newrefpix, newdelta
def get_freq_array(self, bandwidth, n_chans):
"""
Create an array of frequencies for the channels of a backend
@param bandwidth : bandwidth
@type bandwidth : float
@param n_chans : number of channels
@type n_chans : int
@return: frequency of each channel in same units as bandwidth
"""
return NP.arange(n_chans)*float(bandwidth)/n_chans
def freq_to_chan(frequency,bandwidth,n_chans):
"""
Returns the channel number where a given frequency is to be found.
@param frequency : frequency of channel in sane units as bandwidth.
@type frequency : float
@param bandwidth : upper limit of spectrometer passband
@type bandwidth : float
@param n_chans : number of channels in the spectrometer
@type n_chans : int
@return: channel number (int)
"""
if frequency < 0:
frequency = bandwidth + frequency
if frequency > bandwidth:
raise RuntimeError("that frequency is too high.")
return round(float(frequency)/bandwidth*n_chans) % n_chans
def get_smoothed_bandshape(self, degree = None, poly_order=15):
"""
Do a Gaussian smoothing of the spectrum and then fit a polynomial.
Optionally, the raw and smoothed data and the fitted polynomial can be
plotted.
Note
====
``numpy.polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False)``
Least squares polynomial fit.
Fit a polynomial::
p(x) = p[0] * x**deg + ... + p[deg]
of degree deg to points (x, y).
Returns a vector of coefficients p that minimises the squared error.
@param spectrum : input data
@type spectrum : list of float
@param degree : number of samples to smoothed (Gaussian FWHM)
@type degree : int
@param poly_order : order of the polynomial
@type poly_order : int
@param plot : plotting option
@type plot : boolean
@return: (polynomial_coefficient, smoothed_spectrum)
"""
if degree == None:
degree = len(self.spectrum)/100
# normalize the spectrum so max is 1 and convert to dB.
max_lev = NP.max(self.spectrum)
norm_spec = NP.array(self.spectrum)/float(max_lev)
norm_spec_db = 10*NP.log10(norm_spec)
# do a Gaussian smoothing
norm_spec_db_smoothed = smoothListGaussian(norm_spec_db, degree=degree)
# deal with the edges by making them equal to the smoothed end points
norm_spec_db_smoothed_resized = NP.ones(len(self.spectrum))
# left end
norm_spec_db_smoothed_resized[0:degree] = norm_spec_db_smoothed[0]
# middle
norm_spec_db_smoothed_resized[degree:degree+len(norm_spec_db_smoothed)] = \
norm_spec_db_smoothed
# right end
norm_spec_db_smoothed_resized[degree+len(norm_spec_db_smoothed):] = \
norm_spec_db_smoothed[-1]
return poly, norm_spec_db_smoothed_resized
# ------------------------ module functions -------------------------------
def examine_text_data_file(filename):
"""
Examine a file to guide ``genfromtxt()``
Things to look for::
* Is there a header line with column names? If not, use argument ``names``.
* Is the number of names equal to the number of columns? If not::
- use argument ``names`` and ``skip_header=1``, or
- use argument ``delimiter`` with a list of column widths
and ``skip_header=1``.
"""
print(examine_text_data_file.__doc__)
fd = open(filename, "r")
lines = fd.readlines()
fd.close()
topline = lines[0].strip().split()
print(" 1 2 3 4 5 6 7")
print("01234567890123456789012345678901234567890123456789012345678901234567890123456789")
print(lines[0].strip())
print(lines[1].strip())
print(" ...")
print(lines[-1].strip())
data = NP.genfromtxt(filename, dtype=None, names=None, skip_header=1, encoding=None)
print("%d datatypes:" % len(data.dtype.fields))
for item in data.dtype.fields:
print(item, data.dtype.fields[item])
def get_obs_dirs(project, station, year, DOY, datafmt=None):
"""
Returns the directories where data and working files are kept
@param project : project code string, e.g., RRL
@type project : str
@param station : DSN station number
@type station : int
@param year : year of observation
@type year : int
@param DOY : day of year of observations
@type DOY : int
@param datafmt : raw data format
@type datafmt : str
"""
#logger.debug("get_obs_dirs: type %s for %s, DSS%d, %4d/%03d",
# datafmt, project, station, year, DOY)
obspath = "dss%2d/%4d/%03d/" % (station,year,DOY)
if project:
projdatapath = "/usr/local/project_data/"+project+"/"+obspath
projworkpath = "/usr/local/projects/"+project+"/Observations/"+obspath
else:
projdatapath = ""
projworkpath = ""
if datafmt:
rawdatapath = "/usr/local/RA_data/"+datafmt+"/"+obspath
else:
rawdatapath = ""
return projdatapath, projworkpath, rawdatapath
# --------- old stuff to be discarded still needed for now ---------------
def old_get_obs_session(project=None, dss=None, date=None, path='proj'):
"""
Provides project, station, year and DOY, asking as needed.
It follows one of several possible paths to get to the session::
proj - path through /usr/local/projects/<project>
hdf5 - path through /usr/local/RA_data/HDF5
fits - path through /usr/local/RA_data/FITS
wvsr - path through /data
@param project : optional name as defined in /usr/local/projects
@type project : str
@param dss : optional station number
@type dss : int
@param date : optional YYYY/DDD
@type date : str
@return: project, DSS, year, DOY.
"""
def get_directory(path):
"""
"""
# only one trailing /
path = path.rstrip('/')+"/*"
logger.debug("get_obs_session:get_directory: from %s", path)
names = glob.glob(path)
if names:
dirs = []
for name in names:
if os.path.isdir(name):
dirs.append(os.path.basename(name))
dirs.sort()
for name in dirs:
print((name), end=' ')
return input('\n>')
else:
return []
def from_wvsr_dir():
"""
this needs to be completed and tested on crab14 or an auto host
"""
session = get_directory(local_dirs.wvsr_dir)
return session
cwd = os.getcwd()
# get the project
if project:
pass
else:
os.chdir(local_dirs.projects_dir)
project = get_directory(local_dirs.projects_dir)
logger.debug("from_wvsr_dir: project is %s", project)
projectpath = local_dirs.projects_dir+project
# get the station
if path[:4].lower() == 'wvsr':
# special call
print("from_wvsr_dir()")
if path[:4].lower() == 'proj':
os.chdir(projectpath+"/Observations/")
elif path[:4].lower() == 'hdf5':
os.chdir(local_dirs.hdf5_dir)
elif path[:4].lower() == 'fits':
os.chdir(local_dirs.fits_dir)
# get the station
if dss:
pass
else:
# This seems odd but get_directory() needs '/' and int does not
station = get_directory(os.getcwd()+"/").rstrip('/')
dss = int(station[-2:])
stationpath = os.getcwd()+"/dss"+str(dss)
# get the date
if date:
items = date.split('/')
year = int(items[0])
DOY = int(items[1])
else:
year = int(get_directory(stationpath))
yearpath = stationpath+"/"+str(year)
DOY = int(get_directory(yearpath))
os.chdir(cwd)
return project, dss, year, DOY
|
[
"logging.getLogger",
"Astronomy.apparent_to_J2000",
"datetime.datetime.utcfromtimestamp",
"numpy.log10",
"math.log",
"math.cos",
"numpy.array",
"Math.clusters.find_clusters",
"Astronomy.J2000_to_apparent",
"numpy.genfromtxt",
"numpy.arange",
"re.search",
"os.path.exists",
"readline.parse_and_bind",
"support.text.select_files",
"numpy.max",
"os.path.isdir",
"Astronomy.Ephem.calibrator",
"DatesTimes.UnixTime_to_MJD",
"numpy.meshgrid",
"h5py.File.__init__",
"glob.glob",
"matplotlib.dates.date2num",
"os.path.splitext",
"Astronomy.RaDec_to_AzEl",
"support.PropertiedClass.__init__",
"Astronomy.Ephem.ephem.Sun",
"DatesTimes.day_of_year",
"Astronomy.AzEl_to_RaDec",
"os.makedirs",
"os.getcwd",
"os.chdir",
"Astronomy.DSN_coordinates.DSS",
"os.path.basename"
] |
[((2597, 2637), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: complete"""'], {}), "('tab: complete')\n", (2620, 2637), False, 'import readline\n'), ((2648, 2675), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2665, 2675), False, 'import logging\n'), ((43188, 43265), 'numpy.genfromtxt', 'NP.genfromtxt', (['filename'], {'dtype': 'None', 'names': 'None', 'skip_header': '(1)', 'encoding': 'None'}), '(filename, dtype=None, names=None, skip_header=1, encoding=None)\n', (43201, 43265), True, 'import numpy as NP\n'), ((45758, 45769), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45767, 45769), False, 'import os\n'), ((46816, 46829), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (46824, 46829), False, 'import os\n'), ((7573, 7620), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Observation')"], {}), "(logger.name + '.Observation')\n", (7590, 7620), False, 'import logging\n'), ((8097, 8144), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Observation')"], {}), "(logger.name + '.Observation')\n", (8114, 8144), False, 'import logging\n'), ((14217, 14378), 'numpy.genfromtxt', 'NP.genfromtxt', (['(self.sessionpath + filename)'], {'delimiter': 'delimiter', 'dtype': 'None', 'names': 'names', 'case_sensitive': '"""lower"""', 'skip_header': 'skip_header', 'encoding': 'None'}), "(self.sessionpath + filename, delimiter=delimiter, dtype=None,\n names=names, case_sensitive='lower', skip_header=skip_header, encoding=None\n )\n", (14230, 14378), True, 'import numpy as NP\n'), ((22713, 22778), 'numpy.arange', 'NP.arange', (['(-width / 2)', '(width / 2 + self.xstep / 2)', '(self.xstep / 2)'], {}), '(-width / 2, width / 2 + self.xstep / 2, self.xstep / 2)\n', (22722, 22778), True, 'import numpy as NP\n'), ((22830, 22897), 'numpy.arange', 'NP.arange', (['(-height / 2)', '(height / 2 + self.ystep / 2)', '(self.ystep / 2)'], {}), '(-height / 2, height / 2 + self.ystep / 2, self.ystep / 2)\n', (22839, 22897), True, 'import numpy as NP\n'), ((28963, 28997), 'numpy.array', 'NP.array', (["self.data['xdec_offset']"], {}), "(self.data['xdec_offset'])\n", (28971, 28997), True, 'import numpy as NP\n'), ((29028, 29061), 'numpy.array', 'NP.array', (["self.data['dec_offset']"], {}), "(self.data['dec_offset'])\n", (29036, 29061), True, 'import numpy as NP\n'), ((30517, 30562), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Recording')"], {}), "(logger.name + '.Recording')\n", (30534, 30562), False, 'import logging\n'), ((31174, 31209), 'h5py.File.__init__', 'h5py.File.__init__', (['self', 'name', '"""w"""'], {}), "(self, name, 'w')\n", (31192, 31209), False, 'import h5py\n'), ((32576, 32619), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Session')"], {}), "(logger.name + '.Session')\n", (32593, 32619), False, 'import logging\n'), ((36501, 36545), 'logging.getLogger', 'logging.getLogger', (["(logger.name + '.Spectrum')"], {}), "(logger.name + '.Spectrum')\n", (36518, 36545), False, 'import logging\n'), ((41525, 41546), 'numpy.max', 'NP.max', (['self.spectrum'], {}), '(self.spectrum)\n', (41531, 41546), True, 'import numpy as NP\n'), ((45289, 45304), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (45298, 45304), False, 'import glob\n'), ((45825, 45858), 'os.chdir', 'os.chdir', (['local_dirs.projects_dir'], {}), '(local_dirs.projects_dir)\n', (45833, 45858), False, 'import os\n'), ((46154, 46194), 'os.chdir', 'os.chdir', (["(projectpath + '/Observations/')"], {}), "(projectpath + '/Observations/')\n", (46162, 46194), False, 'import os\n'), ((7710, 7725), 'Astronomy.DSN_coordinates.DSS', 'coords.DSS', (['dss'], {}), '(dss)\n', (7720, 7725), True, 'import Astronomy.DSN_coordinates as coords\n'), ((11986, 12024), 'support.PropertiedClass.__init__', 'support.PropertiedClass.__init__', (['self'], {}), '(self)\n', (12018, 12024), False, 'import support\n'), ((12072, 12120), 'logging.getLogger', 'logging.getLogger', (["(self.parent.name + '.Channel')"], {}), "(self.parent.name + '.Channel')\n", (12089, 12120), False, 'import logging\n'), ((23444, 23497), 'numpy.meshgrid', 'NP.meshgrid', (["self.data['grid_x']", "self.data['grid_y']"], {}), "(self.data['grid_x'], self.data['grid_y'])\n", (23455, 23497), True, 'import numpy as NP\n'), ((24731, 24810), 'Astronomy.AzEl_to_RaDec', 'A.AzEl_to_RaDec', (['azimuth', 'elevation', 'self.latitude', '(-self.longitude)', 'time_tuple'], {}), '(azimuth, elevation, self.latitude, -self.longitude, time_tuple)\n', (24746, 24810), True, 'import Astronomy as A\n'), ((25326, 25348), 'DatesTimes.UnixTime_to_MJD', 'DT.UnixTime_to_MJD', (['tm'], {}), '(tm)\n', (25344, 25348), True, 'import DatesTimes as DT\n'), ((25489, 25557), 'Astronomy.apparent_to_J2000', 'A.apparent_to_J2000', (['MJD', 'UT', 'ra', 'dec', 'self.longitude', 'self.latitude'], {}), '(MJD, UT, ra, dec, self.longitude, self.latitude)\n', (25508, 25557), True, 'import Astronomy as A\n'), ((26099, 26121), 'DatesTimes.UnixTime_to_MJD', 'DT.UnixTime_to_MJD', (['tm'], {}), '(tm)\n', (26117, 26121), True, 'import DatesTimes as DT\n'), ((26281, 26357), 'Astronomy.J2000_to_apparent', 'A.J2000_to_apparent', (['MJD', 'UT', '(ra2000 * math.pi / 12)', '(dec2000 * math.pi / 180)'], {}), '(MJD, UT, ra2000 * math.pi / 12, dec2000 * math.pi / 180)\n', (26300, 26357), True, 'import Astronomy as A\n'), ((27075, 27143), 'Astronomy.RaDec_to_AzEl', 'A.RaDec_to_AzEl', (['ra', 'dec', 'self.latitude', 'self.longitude', '(year, doy)'], {}), '(ra, dec, self.latitude, self.longitude, (year, doy))\n', (27090, 27143), True, 'import Astronomy as A\n'), ((28094, 28108), 'Astronomy.Ephem.ephem.Sun', 'AE.ephem.Sun', ([], {}), '()\n', (28106, 28108), True, 'import Astronomy.Ephem as AE\n'), ((28131, 28152), 'Astronomy.Ephem.calibrator', 'AE.calibrator', (['source'], {}), '(source)\n', (28144, 28152), True, 'import Astronomy.Ephem as AE\n'), ((28283, 28347), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (["self.data['unixtime'][count]"], {}), "(self.data['unixtime'][count])\n", (28317, 28347), False, 'import datetime\n'), ((33606, 33638), 'os.path.exists', 'os.path.exists', (['self.session_dir'], {}), '(self.session_dir)\n', (33620, 33638), False, 'import os\n'), ((33646, 33685), 'os.makedirs', 'os.makedirs', (['self.session_dir'], {'mode': '(509)'}), '(self.session_dir, mode=509)\n', (33657, 33685), False, 'import os\n'), ((34858, 34888), 'os.path.splitext', 'os.path.splitext', (['name_pattern'], {}), '(name_pattern)\n', (34874, 34888), False, 'import os\n'), ((35293, 35320), 're.search', 're.search', (['"""HDF5"""', 'datapath'], {}), "('HDF5', datapath)\n", (35302, 35320), False, 'import re\n'), ((35874, 35889), 'glob.glob', 'glob.glob', (['full'], {}), '(full)\n', (35883, 35889), False, 'import glob\n'), ((35918, 35949), 'support.text.select_files', 'support.text.select_files', (['full'], {}), '(full)\n', (35943, 35949), False, 'import support\n'), ((38060, 38081), 'math.log', 'math.log', (['num_chan', '(2)'], {}), '(num_chan, 2)\n', (38068, 38081), False, 'import math\n'), ((38304, 38329), 'math.log', 'math.log', (['num_chans_in', '(2)'], {}), '(num_chans_in, 2)\n', (38312, 38329), False, 'import math\n'), ((41563, 41586), 'numpy.array', 'NP.array', (['self.spectrum'], {}), '(self.spectrum)\n', (41571, 41586), True, 'import numpy as NP\n'), ((41624, 41643), 'numpy.log10', 'NP.log10', (['norm_spec'], {}), '(norm_spec)\n', (41632, 41643), True, 'import numpy as NP\n'), ((46232, 46261), 'os.chdir', 'os.chdir', (['local_dirs.hdf5_dir'], {}), '(local_dirs.hdf5_dir)\n', (46240, 46261), False, 'import os\n'), ((46550, 46561), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46559, 46561), False, 'import os\n'), ((17776, 17814), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['tm'], {}), '(tm)\n', (17810, 17814), False, 'import datetime\n'), ((21262, 21286), 'Math.clusters.find_clusters', 'VQ.find_clusters', (['coords'], {}), '(coords)\n', (21278, 21286), True, 'import Math.clusters as VQ\n'), ((35357, 35392), 're.search', 're.search', (['"""project_data"""', 'datapath'], {}), "('project_data', datapath)\n", (35366, 35392), False, 'import re\n'), ((39777, 39795), 'numpy.arange', 'NP.arange', (['n_chans'], {}), '(n_chans)\n', (39786, 39795), True, 'import numpy as NP\n'), ((45371, 45390), 'os.path.isdir', 'os.path.isdir', (['name'], {}), '(name)\n', (45384, 45390), False, 'import os\n'), ((46301, 46330), 'os.chdir', 'os.chdir', (['local_dirs.fits_dir'], {}), '(local_dirs.fits_dir)\n', (46309, 46330), False, 'import os\n'), ((17893, 17910), 'matplotlib.dates.date2num', 'MPLd.date2num', (['dt'], {}), '(dt)\n', (17906, 17910), True, 'import matplotlib.dates as MPLd\n'), ((24407, 24448), 'DatesTimes.day_of_year', 'DT.day_of_year', (['dt.year', 'dt.month', 'dt.day'], {}), '(dt.year, dt.month, dt.day)\n', (24421, 24448), True, 'import DatesTimes as DT\n'), ((35439, 35503), 'support.text.select_files', 'support.text.select_files', (["(datapath + name_pattern + '[0-9].pkl')"], {}), "(datapath + name_pattern + '[0-9].pkl')\n", (35464, 35503), False, 'import support\n'), ((35511, 35538), 're.search', 're.search', (['"""FITS"""', 'datapath'], {}), "('FITS', datapath)\n", (35520, 35538), False, 'import re\n'), ((36953, 36982), 'math.log', 'math.log', (['est_num_chan_out', '(2)'], {}), '(est_num_chan_out, 2)\n', (36961, 36982), False, 'import math\n'), ((28771, 28787), 'math.cos', 'math.cos', (['decrad'], {}), '(decrad)\n', (28779, 28787), False, 'import math\n'), ((35560, 35620), 'support.text.select_files', 'support.text.select_files', (["(datapath + name_pattern + '.fits')"], {}), "(datapath + name_pattern + '.fits')\n", (35585, 35620), False, 'import support\n'), ((45414, 45436), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (45430, 45436), False, 'import os\n'), ((46477, 46488), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (46486, 46488), False, 'import os\n')]
|
import numpy as np
from wordreps import WordReps
from algebra import cosine, normalize
import tensorflow as tf
import random
from dataset import DataSet
import CGRE_Model
from Eval import eval_SemEval
import sklearn.preprocessing
# ============ End Imports ============
class Training():
def __init__(self):
# Compositional relation embeddings (G1) Hyperparameters
self.batchSize=100
G1_HL=3
G1_Hdim=WR.dim
G1_BN=True #boolean variable T/F for batch normalization on G1 MLP
G1_l2_reg=0.001 # L2 regularization coefficient
self.G1_pkeep=1.0 # 1.0 means no Dropout applied during training on G1
# LSTM pattern encoding (G2) Hyperparameters
G2_HL=1
G2_Hdim=WR.dim
self.G2_pkeep=1.0 # 1.0 means no Dropout applied during training on G2
activ='tanh'
# Create relational model instance
self.RelModel=CGRE_Model.CGRE(activ,self.batchSize)
self.RelModel.G1_model(Ea,G1_BN,G1_HL,G1_Hdim,G1_l2_reg)
self.RelModel.G2_rnn_model(DS.max_length,G2_HL,G2_Hdim)
# --------------------------------------------------
def Train_Model(self):
# Hyperparameters
epochs=500
hist_loss=[]
hist_acc=[]
winn_loss=1e7
win_acc=-1
# Discriminator Hyperparameters (for Rel-Rep-alignment model)
D_HL=0
D_Hdim=WR.dim
D_BN=False # boolean variable T/F for batch normalization on D
self.D_pkeep=1.0 # 1.0 means no Dropout applied during training on the Discriminator D
D_l2_reg=0.001 # L2 regularization coefficient (to perform l2 regularized cross-entropy)
Train = DS.Training_triplesIDs
Train_Relations=set([rel for (a,b,p,w,rel) in Train])
Num_of_Classes=len(Train_Relations)
print ("Number of relation labels for cross-entropy objective=",Num_of_Classes)
# Assign ids to relations
Rel2id={}
i=0
for rel in Train_Relations:
Rel2id[rel]=i
i+=1
Train_dic={}
for (a,b,p,w,rel) in Train:
Train_dic.setdefault((a,b,rel),[])
Train_dic[(a,b,rel)].append((p,w))
Training_patterns=set([p for (_,_,p,_,_) in Train])
print ('Number of training patterns after removing test instances=',len(Training_patterns))
Train_list=list(Train_dic.keys())
print ("Number of training word-pairs (a,b,[(p,w)])",len(Train_list))
self.RelModel.define_loss(D_HL,D_Hdim,D_BN,D_l2_reg,Num_of_Classes)
self.RelModel.optimize()
self.sess=tf.Session()
self.sess.run(tf.global_variables_initializer())
print ("==========================================================================")
for epoch in range(epochs):
# Randomly shuffle training instances for each epoch
random.shuffle(Train_list)
# performance every 20 steps
if epoch%1==0:
Pair_Embeddings=self.Gen_Pair_Embeddings()
acc_1,corr_1=eval_SemEval(Pair_Embeddings,'Test')
acc_2,corr_2=eval_SemEval(Pair_Embeddings,'Valid')
acc_3,corr_3=eval_SemEval(Pair_Embeddings,'All')
print ("Epoch:%d, Acc_Test:%f, Acc_Valid:%f, Acc_All:%f, Corr_Test:%f, Corr_Valid:%f, Corr_All:%f"%(epoch,acc_1,acc_2,acc_3,corr_1,corr_2,corr_3))
hist_acc.append(acc_2)
# For early stopping
if acc_2>win_acc:
win_acc=acc_2
self.Save_Trained_Model()
print ("Parameters and Pair-Embeddings are changed...")
best_epoch=epoch
patient_cnt=0
else:
patient_cnt+=1
if patient_cnt>10:
print ("early stopping ... epoch number %d"%epoch)
print ("Winner acc:%f at epoch:%d"%(win_acc,best_epoch))
# break
# Training
for minibatch in next_batch(self.batchSize,Train_list):
a_ids,b_ids,labels=shred_tuples(minibatch)
Train_Y=np.zeros((len(minibatch),Num_of_Classes))
for i,rel in enumerate(labels):
rel_id=Rel2id[rel]
Train_Y[i,rel_id]=1.0
train_data={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:self.G1_pkeep,\
self.RelModel.is_training:True,self.RelModel.D_pkeep:self.D_pkeep}
minibatch_patterns=[Train_dic[(a,b,rel)] for (a,b,rel) in minibatch]
max_num_of_patterns,pattern_seq,early_stop,weights=Pattern_Sequences(a_ids,b_ids,minibatch_patterns)
train_data[self.RelModel.max_num_of_patterns]=max_num_of_patterns
train_data[self.RelModel.patterns_ids]=pattern_seq
train_data[self.RelModel.early_stop]=early_stop
train_data[self.RelModel.weights]=weights
train_data[self.RelModel.G2_pkeep]=self.G2_pkeep
# Loss options
train_data[self.RelModel.Y_]=Train_Y
self.sess.run(self.RelModel.train_step,feed_dict=train_data)
# --------------------------------------------------
def Save_Trained_Model(self):
Pair_Embeddings_dic=self.Gen_Pair_Embeddings()
np.save("res/Pair_Embeddings.npy",Pair_Embeddings_dic)
# --------------------------------------------------
def Gen_Pair_Embeddings(self):
word_pairs_ids=[(DS.word2id[a],DS.word2id[b]) for (a,b) in DS.Test_Pairs]
a_ids=[t[0] for t in word_pairs_ids]
b_ids=[t[1] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings1=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings1=sklearn.preprocessing.normalize(Pair_Embeddings1,axis=1,norm='l2') #L2 norm of r(a,b)
a_ids=[t[1] for t in word_pairs_ids]
b_ids=[t[0] for t in word_pairs_ids]
dic={self.RelModel.a_ids:a_ids,self.RelModel.b_ids:b_ids,self.RelModel.G1_pkeep:1.0,self.RelModel.is_training:False}
Pair_Embeddings2=self.sess.run(self.RelModel.Last_G1_output,feed_dict=dic)
# Pair_Embeddings2=sklearn.preprocessing.normalize(Pair_Embeddings2,axis=1,norm='l2') #L2 norm of r(b,a)
Pair_Embeddings=np.hstack((Pair_Embeddings1,Pair_Embeddings2))
Pair_Embeddings_dic={}
for i,(a,b) in enumerate(DS.Test_Pairs):
Pair_Embeddings_dic[(a,b)]=Pair_Embeddings[i]
return Pair_Embeddings_dic
# ============ End of the Evaluation class ============
def next_batch(batchSize,data):
# loop over our dataset in mini-batches of size `batchSize`
for i in np.arange(0, len(data), batchSize):
# yield the current batched data
yield data[i:i + batchSize]
# -------------------------------------------------------
def shred_tuples(tuples):
a_ids=[t[0] for t in tuples]
b_ids=[t[1] for t in tuples]
labels=[t[2] for t in tuples]
return a_ids,b_ids,labels
# -------------------------------------------------------
def Pattern_Sequences(a_ids,b_ids,minibatch_patterns):
max_num_of_patterns=np.max([len(L) for L in minibatch_patterns])
min_num_of_patterns=np.min([len(L) for L in minibatch_patterns])
# print ("Max num of patterns:",max_num_of_patterns)
# print ("Min num of patterns:",min_num_of_patterns)
pattern_seq=np.zeros((len(a_ids)*max_num_of_patterns,DS.max_length+2),dtype=int) #+2 is for the targeted two entities a and b
early_stop=[0 for i in range(len(a_ids)*max_num_of_patterns)]
weights=[0.0 for i in range(len(a_ids)*max_num_of_patterns)]
for i in range(len(a_ids)):
set_of_patterns=minibatch_patterns[i]
for j in range(max_num_of_patterns):
if j<len(set_of_patterns):
pattern_id,w=set_of_patterns[j][0],set_of_patterns[j][1]
pattern=DS.id2Patterns[pattern_id]
words=pattern.strip().split(' ')
words.insert(0,DS.id2word[a_ids[i]])
words.append(DS.id2word[b_ids[i]])
early_stop[(i*max_num_of_patterns)+j]=len(words)
weights[(i*max_num_of_patterns)+j]=w
for k,word in enumerate(words):
pattern_seq[(i*max_num_of_patterns)+j,k]=DS.word2id[word]
return max_num_of_patterns,pattern_seq,early_stop,weights
# -----------------------------------------------------------
if __name__=="__main__":
'''
Word Embeddings
'''
pretrained_glove_300=("../glove.6B.300d.zip","glove",300)
WR=WordReps()
norm=1
standardise=0
WR.Read_Embeddings_zip_file(pretrained_glove_300,norm,standardise)
WR.vects['<PAD>']=np.zeros(WR.dim)
# WR.vects['X']=np.random.rand(WR.dim)
# WR.vects['Y']=np.random.rand(WR.dim)
WR.vects['X']=np.random.normal(size=(WR.dim)).astype('float32')
WR.vects['Y']=np.random.normal(size=(WR.dim)).astype('float32')
'''
Dataset
'''
corpus='Wikipedia_English'
Train_dataset=('DiffVec',"DiffVec_Pairs")
Test_dataset=('SemEval',"SemEval_Pairs.txt")
labels_type='proxy'
Reverse_pairs=True
DS=DataSet(corpus,Train_dataset,Test_dataset,labels_type,Reverse_pairs)
id2Patterns="../Relational_Patterns/Patterns_Xmid5Y"
Patterns_per_pair="../Relational_Patterns/Patterns_Xmid5Y_PerPair"
DS.Retrieve_Patterns(id2Patterns,Patterns_per_pair)
Ea=DS.Generate_Embedding_Matrix(WR)
'''
Training & Evaluation
'''
Eval=Training()
Eval.Train_Model()
|
[
"numpy.random.normal",
"random.shuffle",
"CGRE_Model.CGRE",
"dataset.DataSet",
"wordreps.WordReps",
"numpy.hstack",
"tensorflow.Session",
"Eval.eval_SemEval",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"numpy.save"
] |
[((7593, 7603), 'wordreps.WordReps', 'WordReps', ([], {}), '()\n', (7601, 7603), False, 'from wordreps import WordReps\n'), ((7714, 7730), 'numpy.zeros', 'np.zeros', (['WR.dim'], {}), '(WR.dim)\n', (7722, 7730), True, 'import numpy as np\n'), ((8127, 8199), 'dataset.DataSet', 'DataSet', (['corpus', 'Train_dataset', 'Test_dataset', 'labels_type', 'Reverse_pairs'], {}), '(corpus, Train_dataset, Test_dataset, labels_type, Reverse_pairs)\n', (8134, 8199), False, 'from dataset import DataSet\n'), ((828, 866), 'CGRE_Model.CGRE', 'CGRE_Model.CGRE', (['activ', 'self.batchSize'], {}), '(activ, self.batchSize)\n', (843, 866), False, 'import CGRE_Model\n'), ((2289, 2301), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2299, 2301), True, 'import tensorflow as tf\n'), ((4539, 4594), 'numpy.save', 'np.save', (['"""res/Pair_Embeddings.npy"""', 'Pair_Embeddings_dic'], {}), "('res/Pair_Embeddings.npy', Pair_Embeddings_dic)\n", (4546, 4594), True, 'import numpy as np\n'), ((5540, 5587), 'numpy.hstack', 'np.hstack', (['(Pair_Embeddings1, Pair_Embeddings2)'], {}), '((Pair_Embeddings1, Pair_Embeddings2))\n', (5549, 5587), True, 'import numpy as np\n'), ((2318, 2351), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2349, 2351), True, 'import tensorflow as tf\n'), ((2529, 2555), 'random.shuffle', 'random.shuffle', (['Train_list'], {}), '(Train_list)\n', (2543, 2555), False, 'import random\n'), ((7827, 7856), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'WR.dim'}), '(size=WR.dim)\n', (7843, 7856), True, 'import numpy as np\n'), ((7892, 7921), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'WR.dim'}), '(size=WR.dim)\n', (7908, 7921), True, 'import numpy as np\n'), ((2671, 2708), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""Test"""'], {}), "(Pair_Embeddings, 'Test')\n", (2683, 2708), False, 'from Eval import eval_SemEval\n'), ((2725, 2763), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""Valid"""'], {}), "(Pair_Embeddings, 'Valid')\n", (2737, 2763), False, 'from Eval import eval_SemEval\n'), ((2780, 2816), 'Eval.eval_SemEval', 'eval_SemEval', (['Pair_Embeddings', '"""All"""'], {}), "(Pair_Embeddings, 'All')\n", (2792, 2816), False, 'from Eval import eval_SemEval\n')]
|
"""
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
With support for missing data.
"""
import numpy as np
import scipy as sci
from scipy import linalg
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
from .._hals_update import _hals_update
def mncp_hals(X, rank, mask, random_state=None, init='rand', **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method. Supports missing data.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
A binary tensor with the same shape as ``X``. All entries equal to zero
correspond to held out or missing data in ``X``. All entries equal to
one correspond to observed entries in ``X`` and the decomposition is
fit to these datapoints.
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTensor, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and <NAME>. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask missing elements.
X = np.copy(X)
X[~mask] = np.linalg.norm(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = linalg.norm(X[mask].ravel())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
# First, HALS update.
for n in range(X.ndim):
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = sci.multiply.reduce([arr.T.dot(arr) for arr in components])
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
p = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, p)
# Then, update masked elements.
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Compute objective function
# grams *= U[X.ndim - 1].T.dot(U[X.ndim - 1])
# obj = np.sqrt( (sci.sum(grams) - 2 * sci.sum(U[X.ndim - 1] * p) + normX**2)) / normX
resid = X - pred
result.update(linalg.norm(resid.ravel()) / normX)
# end optimization loop, return result.
return result.finalize()
|
[
"tensortools.optimize.optim_utils._check_cpd_inputs",
"numpy.copy",
"tensortools.operations.khatri_rao",
"tensortools.operations.unfold",
"tensortools.optimize.FitResult",
"tensortools.optimize.optim_utils._get_initial_ktensor",
"numpy.linalg.norm"
] |
[((3073, 3083), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (3080, 3083), True, 'import numpy as np\n'), ((3099, 3122), 'numpy.linalg.norm', 'np.linalg.norm', (['X[mask]'], {}), '(X[mask])\n', (3113, 3122), True, 'import numpy as np\n'), ((3148, 3186), 'tensortools.optimize.optim_utils._check_cpd_inputs', 'optim_utils._check_cpd_inputs', (['X', 'rank'], {}), '(X, rank)\n', (3177, 3186), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((3229, 3290), 'tensortools.optimize.optim_utils._get_initial_ktensor', 'optim_utils._get_initial_ktensor', (['init', 'X', 'rank', 'random_state'], {}), '(init, X, rank, random_state)\n', (3261, 3290), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((3304, 3339), 'tensortools.optimize.FitResult', 'FitResult', (['U', '"""NCP_HALS"""'], {}), "(U, 'NCP_HALS', **options)\n", (3313, 3339), False, 'from tensortools.optimize import FitResult, optim_utils\n'), ((4188, 4210), 'tensortools.operations.khatri_rao', 'khatri_rao', (['components'], {}), '(components)\n', (4198, 4210), False, 'from tensortools.operations import unfold, khatri_rao\n'), ((4227, 4239), 'tensortools.operations.unfold', 'unfold', (['X', 'n'], {}), '(X, n)\n', (4233, 4239), False, 'from tensortools.operations import unfold, khatri_rao\n')]
|
"""
@author: <NAME> "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
Contains several tools to convert, load, save and plot data
"""
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
# TODO: generalize
root_index_name = "__index__"
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
root_index = None
import root_numpy
if root_index_name in root_numpy.list_branches(
filename=data_in["filenames"], treename=data_in.get("treename")
):
root_index = root_numpy.root2array(
filenames=data_in["filenames"],
treename=data_in.get("treename"),
selection=data_in.get("selection"),
branches=root_index_name,
)
data_in = root_numpy.root2array(**data_in) # why **? it's a root dict
if is_list(data_in):
data_in = np.array(data_in)
if is_ndarray(data_in):
if (isinstance(columns, (list, tuple)) and len(columns) == 1) or isinstance(
columns, str
):
data_in = to_ndarray(data_in)
data_in = pd.DataFrame(data_in, columns=columns, index=root_index)
if index is not None:
data_in = data_in.loc[index]
elif isinstance(data_in, pd.DataFrame):
pass
else:
raise TypeError("Could not convert data to pandas. Data: " + data_in)
return data_in
def to_pandas(data_in, index=None, columns=None):
"""Convert data from numpy or root to pandas dataframe.
Convert data safely to pandas, whatever the format is.
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
data_in = dev_tool.entries_to_str(data_in)
if is_root(data_in):
if columns is None:
columns = data_in["branches"]
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
if "__index__" in tree.keys(): # legacy, we can also convert this
return to_pandas_old(data_in=data_in, index=index, columns=columns)
branches = to_list(columns)
loaded = tree.arrays(branches, library="pd")
if index is not None:
loaded = loaded.loc[index]
return loaded
else:
# HACK START
return to_pandas_old(data_in=data_in, index=index, columns=columns)
# HACK END
# from root_pandas import read_root
#
# root_pandas_numpy_map = dict(filenames='paths', treename='key', branches='columns',
# selection='where')
#
# if is_root(data_in):
# is_root2array = False
# for key, val in copy.deepcopy(list(data_in.items())):
# if key in root_pandas_numpy_map:
# is_root2array = True
# del data_in[key]
# data_in[root_pandas_numpy_map[key]] = val
# data_in['columns'] = to_list(data_in['columns'])
# if is_root2array:
# data_in['columns'] = ['noexpand:'+col for col in data_in['columns'] if not col.startswith('noexpand:')]
# remove the noexpand:
# data_in = read_root(**data_in) # why **? it's a root dict
# if is_list(data_in):
# data_in = np.array(data_in)
# if is_ndarray(data_in):
# if ((isinstance(columns, (list, tuple)) and len(columns) == 1) or
# isinstance(columns, string)):
#
# data_in = to_ndarray(data_in)
# data_in = pd.DataFrame(data_in, columns=columns)
# if index is not None:
# data_in = data_in.loc[index]
# elif isinstance(data_in, pd.DataFrame):
# pass
# else:
# raise TypeError("Could not convert data to pandas. Data: " + data_in)
# return data_in
def adv_return(return_value, save_name=None):
"""Save the value if save_name specified, otherwise just return input.
Can be wrapped around the return value. Without any arguments, the return
of your function will be exactly the same. With arguments, the value can
be saved (**pickled**) before it is returned.
Parameters
----------
return_value : any python object
The python object which should be pickled.
save_name : str, None
| The (file-)name for the pickled file. File-extension will be added \
automatically if specified in *raredecay.meta_config*.
| If *None* is passed, the object won't be pickled.
Return
------
out : python object
Return return_value without changes.
**Usage**:
Instead of a simple return statement
>>> return my_variable/my_object
one can use the **completely equivalent** statement
>>> return adv_return(my_variable/my_object)
If the return value should be saved in addition to be returned, use
>>> return adv_return(my_variable/my_object, save_name='my_object.pickle')
(*the .pickle ending is not required but added automatically if omitted*)
which returns the value and saves it.
"""
save_name = dev_tool.entries_to_str(save_name)
if save_name not in (None, False):
if isinstance(save_name, str):
save_name = meta_cfg.PICKLE_PATH + save_name
if not is_pickle(save_name):
save_name += "." + meta_cfg.PICKLE_DATATYPE
with open(str(save_name), "wb") as f:
pickle.dump(return_value, f, meta_cfg.PICKLE_PROTOCOL)
print(str(return_value) + " pickled to " + save_name)
else:
pass
# HACK how to solve logger problem?
# logger.error("Could not pickle data, name for file (" +
# str(save_name) + ") is not a string!" +
# "\n Therefore, the following data was only returned" +
# " but not saved! \n Data:" + str(return_value))
return return_value
def try_unpickle(file_to_unpickle, use_metapath_bkwcomp=False):
"""Try to unpickle a file and return, otherwise just return input."""
file_to_unpickle = dev_tool.entries_to_str(file_to_unpickle)
if is_pickle(file_to_unpickle):
extra_path = meta_cfg.PICKLE_PATH if use_metapath_bkwcomp else ""
with open(extra_path + file_to_unpickle, "rb") as f:
file_to_unpickle = pickle.load(f)
return file_to_unpickle
|
[
"root_numpy.array2root",
"copy.deepcopy",
"pickle.dump",
"pickle.load",
"numpy.logical_or",
"os.path.isfile",
"numpy.stack",
"numpy.array",
"numpy.asfarray",
"root_numpy.root2array",
"numpy.core.records.fromarrays",
"uproot.open",
"rootpy.io.root_open",
"pandas.DataFrame",
"numpy.percentile",
"warnings.warn"
] |
[((1340, 1378), 'numpy.percentile', 'np.percentile', (['signal_data', 'percentile'], {}), '(signal_data, percentile)\n', (1353, 1378), True, 'import numpy as np\n'), ((3382, 3406), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3396, 3406), False, 'import os\n'), ((1419, 1476), 'numpy.logical_or', 'np.logical_or', (['(bkg_data < lower_cut)', '(bkg_data > upper_cut)'], {}), '(bkg_data < lower_cut, bkg_data > upper_cut)\n', (1432, 1476), True, 'import numpy as np\n'), ((3841, 3900), 'numpy.core.records.fromarrays', 'np.core.records.fromarrays', (['[new_branch]'], {'names': 'branch_name'}), '([new_branch], names=branch_name)\n', (3867, 3900), True, 'import numpy as np\n'), ((3909, 3983), 'root_numpy.array2root', 'array2root', ([], {'arr': 'arr', 'filename': 'filename', 'treename': 'treename', 'mode': 'write_mode'}), '(arr=arr, filename=filename, treename=treename, mode=write_mode)\n', (3919, 3983), False, 'from root_numpy import array2root\n'), ((9929, 9978), 'numpy.stack', 'np.stack', (['[loaded[branch] for branch in branches]'], {}), '([loaded[branch] for branch in branches])\n', (9937, 9978), True, 'import numpy as np\n'), ((10302, 10339), 'numpy.array', 'np.array', (['[val[0] for val in data_in]'], {}), '([val[0] for val in data_in])\n', (10310, 10339), True, 'import numpy as np\n'), ((10507, 10524), 'numpy.array', 'np.array', (['data_in'], {}), '(data_in)\n', (10515, 10524), True, 'import numpy as np\n'), ((11449, 11469), 'numpy.asfarray', 'np.asfarray', (['data_in'], {}), '(data_in)\n', (11460, 11469), True, 'import numpy as np\n'), ((12447, 12479), 'root_numpy.root2array', 'root_numpy.root2array', ([], {}), '(**data_in)\n', (12468, 12479), False, 'import root_numpy\n'), ((12552, 12569), 'numpy.array', 'np.array', (['data_in'], {}), '(data_in)\n', (12560, 12569), True, 'import numpy as np\n'), ((12779, 12835), 'pandas.DataFrame', 'pd.DataFrame', (['data_in'], {'columns': 'columns', 'index': 'root_index'}), '(data_in, columns=columns, index=root_index)\n', (12791, 12835), True, 'import pandas as pd\n'), ((3421, 3450), 'rootpy.io.root_open', 'root_open', (['filename'], {'mode': '"""a"""'}), "(filename, mode='a')\n", (3430, 3450), False, 'from rootpy.io import root_open\n'), ((9715, 9748), 'uproot.open', 'uproot.open', (["data_in['filenames']"], {}), "(data_in['filenames'])\n", (9726, 9748), False, 'import uproot\n'), ((10633, 10655), 'copy.deepcopy', 'copy.deepcopy', (['data_in'], {}), '(data_in)\n', (10646, 10655), False, 'import copy\n'), ((11365, 11409), 'warnings.warn', 'warnings.warn', (['"""Could not force float array"""'], {}), "('Could not force float array')\n", (11378, 11409), False, 'import warnings\n'), ((13503, 13536), 'uproot.open', 'uproot.open', (["data_in['filenames']"], {}), "(data_in['filenames'])\n", (13514, 13536), False, 'import uproot\n'), ((18030, 18044), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18041, 18044), False, 'import pickle\n'), ((17057, 17111), 'pickle.dump', 'pickle.dump', (['return_value', 'f', 'meta_cfg.PICKLE_PROTOCOL'], {}), '(return_value, f, meta_cfg.PICKLE_PROTOCOL)\n', (17068, 17111), False, 'import pickle\n')]
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.mxnet.mx_reshape_to_reshape import MXReshapeToReshape
from openvino.tools.mo.ops.Reverse import Reverse
from openvino.tools.mo.ops.mxreshape import MXReshape
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.common.replacement import FrontReplacementOp
from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.reshape import Reshape
from openvino.tools.mo.ops.shape import Shape
from openvino.tools.mo.ops.squeeze import Squeeze
from openvino.tools.mo.ops.unsqueeze import Unsqueeze
class MXReshapeReverse(FrontReplacementOp):
"""
If reshape layer with reverse True, special values will inferred from right to left.
The Replacer simulate the behavior. The replaced subgraph reverse input data and special dims,
and after reshape reverse output result to backward.
Resulting subgraph: reshape(reverse=True) -> reverse - reshape(reverse=False) -reverse subgraph.
"""
op = 'MXReshape'
enabled = True
def run_before(self):
return [MXReshapeToReshape]
def replace_sub_graph(self, graph: Graph, match: dict):
mxreshape = match['op']
if not mxreshape.reverse:
return
shape_node = Shape(graph, dict(name=mxreshape.id + '/Shape')).create_node()
forward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardUnsqueeze'))
forward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/ForwardReverse', axis=1)).create_node()
forward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/ForwardSqueeze'))
reshape_node = Reshape(graph, dict(name=mxreshape.id + '/Reshape')).create_node()
shape_node.in_port(0).connect(mxreshape.in_port(0).get_source())
mxreshape.in_port(0).get_connection().set_destination(reshape_node.in_port(0))
forward_reverse_unsqueeze_node.in_port(0).connect(shape_node.out_port(0))
forward_reverse_node.in_port(0).connect(forward_reverse_unsqueeze_node.out_port(0))
forward_reverse_squeeze_node.in_port(0).connect(forward_reverse_node.out_port(0))
reshape_node.in_port(1).connect(forward_reverse_squeeze_node.out_port(0))
reshape_shape_node = create_op_node_with_second_input(graph, Reshape, int64_array(np.flip(mxreshape.dim, 0)),
dict(name=str(mxreshape.id) + '/ReshapeShape'))
if np.sum(np.in1d([-2, -3, -4], mxreshape.dim), axis=0):
reshape_shape_node = MXReshape(graph, dict(name=mxreshape.id + '/Reshape',
dim=int64_array(np.flip(mxreshape.dim, 0)))).create_node()
reshape_shape_node.in_port(0).connect(reshape_node.out_port(0))
backward_shape_node = Shape(graph, dict(name=mxreshape.id + '/BackwardShape')).create_node()
backward_reverse_unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardUnsqueeze'))
backward_reverse_node = Reverse(graph, dict(name=mxreshape.id + '/BackwardReverse', axis=1)).create_node()
backward_reverse_squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]),
dict(name=str(mxreshape.id) + '/BackwardSqueeze'))
backward_reshape_node = Reshape(graph, dict(name=mxreshape.id + '/BackwardReshape')).create_node()
backward_shape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reverse_unsqueeze_node.in_port(0).connect(backward_shape_node.out_port(0))
backward_reverse_node.in_port(0).connect(backward_reverse_unsqueeze_node.out_port(0))
backward_reverse_squeeze_node.in_port(0).connect(backward_reverse_node.out_port(0))
backward_reshape_node.in_port(0).connect(reshape_shape_node.out_port(0))
backward_reshape_node.in_port(1).connect(backward_reverse_squeeze_node.out_port(0))
mxreshape.out_port(0).get_connection().set_source(backward_reshape_node.out_port(0))
|
[
"numpy.in1d",
"numpy.flip",
"openvino.tools.mo.front.common.partial_infer.utils.int64_array"
] |
[((1606, 1622), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (1617, 1622), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((1952, 1968), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (1963, 1968), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((2936, 2972), 'numpy.in1d', 'np.in1d', (['[-2, -3, -4]', 'mxreshape.dim'], {}), '([-2, -3, -4], mxreshape.dim)\n', (2943, 2972), True, 'import numpy as np\n'), ((3434, 3450), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (3445, 3450), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((3784, 3800), 'openvino.tools.mo.front.common.partial_infer.utils.int64_array', 'int64_array', (['[0]'], {}), '([0])\n', (3795, 3800), False, 'from openvino.tools.mo.front.common.partial_infer.utils import int64_array\n'), ((2780, 2805), 'numpy.flip', 'np.flip', (['mxreshape.dim', '(0)'], {}), '(mxreshape.dim, 0)\n', (2787, 2805), True, 'import numpy as np\n'), ((3123, 3148), 'numpy.flip', 'np.flip', (['mxreshape.dim', '(0)'], {}), '(mxreshape.dim, 0)\n', (3130, 3148), True, 'import numpy as np\n')]
|
# <NAME> (<EMAIL>)
from __future__ import division, print_function
from builtins import range
import numpy as np
import scipy.stats as ss
import mlpaper.constants as cc
import mlpaper.mlpaper as bt
import mlpaper.perf_curves as pc
from mlpaper.classification import DEFAULT_NGRID, curve_boot
from mlpaper.test_constants import FPR
from mlpaper.util import area, interp1d
_FPR = FPR / 3.0 # Divide by number of test funcs
def fail_check_stat(fail, runs, expect_p_fail, fpr):
pvals_2side = [ss.binom_test(ff, runs, expect_p_fail) for ff in fail]
pvals_1side = [ss.binom_test(ff, runs, expect_p_fail, alternative="greater") for ff in fail]
# Note that we are not going multiple comparison correction between the
# two sided and one sided tests.
print(fail)
print(pvals_2side)
assert np.min(pvals_2side) >= fpr / len(pvals_2side)
print(pvals_1side)
assert np.min(pvals_1side) >= fpr / len(pvals_1side)
def test_boot(runs=100):
N = 201
confidence = 0.95
# Drawing more seeds than we need to be safe
seeds = np.nditer(np.random.randint(low=0, high=int(1e6), size=runs * 5))
def run_trial(y_true, y_score, y_score_ref, true_curve, curve_f, seed, x_grid=None):
epsilon = 1e-6
curve, _ = curve_f(y_true, y_score[:, 1])
auc, = area(*curve)
curve, _ = curve_f(y_true, y_score_ref[:, 1])
auc_ref, = area(*curve)
true_value, = area(*true_curve)
np.random.seed(seed)
(auc_, EB, pval), curve = curve_boot(
y_true, y_score, ref=true_value, curve_f=curve_f, confidence=confidence, x_grid=x_grid
)
true_curve_grid, = interp1d(curve[cc.XGRID].values, *true_curve)
assert auc_ == auc
fail_EB = np.abs(auc - true_value) > EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P = pval < 1.0 - confidence
fail_curve = (true_curve_grid < curve[cc.LB].values - epsilon) | (
curve[cc.UB].values + epsilon < true_curve_grid
)
assert (x_grid is None) or np.all(curve[cc.XGRID].values == x_grid)
np.random.seed(seed)
(auc_, EB_, pval), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=False, x_grid=x_grid
)
assert auc_ == auc
assert EB_ == EB
# Could also test distn with 1-sided KS test but this easier for now
fail_P2 = pval < 1.0 - confidence
assert np.all(curve_.values == curve.values)
np.random.seed(seed)
(auc_, EB, pval_), curve_ = curve_boot(
y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=True, x_grid=x_grid
)
assert auc_ == auc
fail_EB2 = np.abs(auc - auc_ref) > EB
# Could also test distn with 1-sided KS test but this easier for now
assert pval_ == pval
assert np.all(curve_.values == curve.values)
return fail_EB, fail_P, fail_EB2, fail_P2, fail_curve
fail = [0] * 12
fail_curve_roc = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_ap = np.zeros(DEFAULT_NGRID, dtype=int)
fail_curve_prg = np.zeros(DEFAULT_NGRID, dtype=int)
for ii in range(runs):
mu = np.random.randn(2)
S = np.random.randn(2, 2)
S = np.dot(S, S.T)
# Coverage, esp at edges, is worse for imbalanced data. See issue #20.
p = 0.5
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 1.0]]), pc.LINEAR)
y_true = np.random.rand(N) <= p
y_score = np.random.multivariate_normal(mu, S, size=N)
if np.random.randn() <= 0.5: # resample to test dupes
idx = np.random.choice(N, size=N, replace=True)
y_score = y_score[idx, :]
y_score, y_score_ref = y_score.T
y_score = np.stack((np.zeros(N), y_score), axis=1)
y_score_ref = np.stack((np.zeros(N), y_score_ref), axis=1)
# Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20.
x_grid = np.linspace(0.05, 0.95, DEFAULT_NGRID)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.roc_curve, next(seeds), x_grid
)
fail[0] += fail_EB
fail[1] += fail_P
fail[2] += fail_EB2
fail[3] += fail_P2
fail_curve_roc += fail_curve
true_curve = (np.array([[0.0, 1.0]]), np.array([[p, p]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.recall_precision_curve, next(seeds), x_grid
)
fail[4] += fail_EB
fail[5] += fail_P
fail[6] += fail_EB2
fail[7] += fail_P2
fail_curve_ap += fail_curve
x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID)
true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 0.0]]), pc.PREV)
fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial(
y_true, y_score, y_score_ref, true_curve, pc.prg_curve, next(seeds), x_grid
)
fail[8] += fail_EB
fail[9] += fail_P
fail[10] += fail_EB2
fail[11] += fail_P2
fail_curve_prg += fail_curve
sub_FPR = _FPR / 4.0
expect_p_fail = 1.0 - confidence
fail_check_stat(fail, runs, expect_p_fail, sub_FPR)
print("ROC curve")
fail_check_stat(fail_curve_roc, runs, expect_p_fail, sub_FPR)
print("RP curve")
fail_check_stat(fail_curve_ap, runs, expect_p_fail, sub_FPR)
print("PRG curve")
fail_check_stat(fail_curve_prg, runs, expect_p_fail, sub_FPR)
def test_boot_mean(runs=100):
N = 201
confidence = 0.95
fail = 0
for ii in range(runs):
mu = np.random.randn()
S = np.abs(np.random.randn())
x = mu + S * np.random.randn(N)
mu_est = np.mean(x)
EB = bt.boot_EB(x, confidence=0.95)
fail += np.abs(mu - mu_est) > EB
expect_p_fail = 1.0 - confidence
print("boot mean")
fail_check_stat([fail], runs, expect_p_fail, _FPR)
def test_boot_EB_and_test(runs=100):
"""Arguably this should do out to its own file since it tests bt core."""
mu = np.random.randn()
stdev = np.abs(np.random.randn())
N = 201
confidence = 0.95
def run_trial(x, true_value):
_, _, CI = bt._boot_EB_and_test(x, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI = (true_value < LB) or (UB < true_value)
_, pval, CI = bt._boot_EB_and_test(x - true_value, confidence=confidence, return_CI=True)
LB, UB = CI
fail_CI2 = (0 < LB) or (UB < 0)
fail_P = pval < 1.0 - confidence
return fail_CI, fail_CI2, fail_P
fail = [0] * 3
for ii in range(runs):
x = mu + stdev * np.random.randn(N)
fail_CI, fail_CI2, fail_P = run_trial(x, mu)
fail[0] += fail_CI
fail[1] += fail_CI2
fail[2] += fail_P
expect_p_fail = 1.0 - confidence
print("boot mean and test")
fail_check_stat(fail, runs, expect_p_fail, _FPR)
if __name__ == "__main__":
np.random.seed(56467)
test_boot()
test_boot_mean()
test_boot_EB_and_test()
print("passed")
|
[
"numpy.random.rand",
"mlpaper.util.area",
"numpy.array",
"builtins.range",
"numpy.mean",
"mlpaper.classification.curve_boot",
"scipy.stats.binom_test",
"mlpaper.util.interp1d",
"numpy.dot",
"numpy.linspace",
"numpy.random.seed",
"numpy.min",
"mlpaper.mlpaper.boot_EB",
"numpy.abs",
"numpy.random.multivariate_normal",
"numpy.random.choice",
"mlpaper.mlpaper._boot_EB_and_test",
"numpy.random.randn",
"numpy.zeros",
"numpy.all"
] |
[((3095, 3129), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3103, 3129), True, 'import numpy as np\n'), ((3150, 3184), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3158, 3184), True, 'import numpy as np\n'), ((3206, 3240), 'numpy.zeros', 'np.zeros', (['DEFAULT_NGRID'], {'dtype': 'int'}), '(DEFAULT_NGRID, dtype=int)\n', (3214, 3240), True, 'import numpy as np\n'), ((3255, 3266), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (3260, 3266), False, 'from builtins import range\n'), ((5793, 5804), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (5798, 5804), False, 'from builtins import range\n'), ((6271, 6288), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6286, 6288), True, 'import numpy as np\n'), ((6831, 6842), 'builtins.range', 'range', (['runs'], {}), '(runs)\n', (6836, 6842), False, 'from builtins import range\n'), ((7178, 7199), 'numpy.random.seed', 'np.random.seed', (['(56467)'], {}), '(56467)\n', (7192, 7199), True, 'import numpy as np\n'), ((500, 538), 'scipy.stats.binom_test', 'ss.binom_test', (['ff', 'runs', 'expect_p_fail'], {}), '(ff, runs, expect_p_fail)\n', (513, 538), True, 'import scipy.stats as ss\n'), ((574, 635), 'scipy.stats.binom_test', 'ss.binom_test', (['ff', 'runs', 'expect_p_fail'], {'alternative': '"""greater"""'}), "(ff, runs, expect_p_fail, alternative='greater')\n", (587, 635), True, 'import scipy.stats as ss\n'), ((815, 834), 'numpy.min', 'np.min', (['pvals_2side'], {}), '(pvals_2side)\n', (821, 834), True, 'import numpy as np\n'), ((895, 914), 'numpy.min', 'np.min', (['pvals_1side'], {}), '(pvals_1side)\n', (901, 914), True, 'import numpy as np\n'), ((1309, 1321), 'mlpaper.util.area', 'area', (['*curve'], {}), '(*curve)\n', (1313, 1321), False, 'from mlpaper.util import area, interp1d\n'), ((1395, 1407), 'mlpaper.util.area', 'area', (['*curve'], {}), '(*curve)\n', (1399, 1407), False, 'from mlpaper.util import area, interp1d\n'), ((1431, 1448), 'mlpaper.util.area', 'area', (['*true_curve'], {}), '(*true_curve)\n', (1435, 1448), False, 'from mlpaper.util import area, interp1d\n'), ((1458, 1478), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1472, 1478), True, 'import numpy as np\n'), ((1513, 1616), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'true_value', 'curve_f': 'curve_f', 'confidence': 'confidence', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=true_value, curve_f=curve_f, confidence=\n confidence, x_grid=x_grid)\n', (1523, 1616), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((1661, 1706), 'mlpaper.util.interp1d', 'interp1d', (['curve[cc.XGRID].values', '*true_curve'], {}), '(curve[cc.XGRID].values, *true_curve)\n', (1669, 1706), False, 'from mlpaper.util import area, interp1d\n'), ((2130, 2150), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2144, 2150), True, 'import numpy as np\n'), ((2187, 2310), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'y_score_ref', 'curve_f': 'curve_f', 'confidence': 'confidence', 'pairwise_CI': '(False)', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=\n confidence, pairwise_CI=False, x_grid=x_grid)\n', (2197, 2310), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((2514, 2551), 'numpy.all', 'np.all', (['(curve_.values == curve.values)'], {}), '(curve_.values == curve.values)\n', (2520, 2551), True, 'import numpy as np\n'), ((2561, 2581), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2575, 2581), True, 'import numpy as np\n'), ((2618, 2740), 'mlpaper.classification.curve_boot', 'curve_boot', (['y_true', 'y_score'], {'ref': 'y_score_ref', 'curve_f': 'curve_f', 'confidence': 'confidence', 'pairwise_CI': '(True)', 'x_grid': 'x_grid'}), '(y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=\n confidence, pairwise_CI=True, x_grid=x_grid)\n', (2628, 2740), False, 'from mlpaper.classification import DEFAULT_NGRID, curve_boot\n'), ((2952, 2989), 'numpy.all', 'np.all', (['(curve_.values == curve.values)'], {}), '(curve_.values == curve.values)\n', (2958, 2989), True, 'import numpy as np\n'), ((3281, 3299), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (3296, 3299), True, 'import numpy as np\n'), ((3312, 3333), 'numpy.random.randn', 'np.random.randn', (['(2)', '(2)'], {}), '(2, 2)\n', (3327, 3333), True, 'import numpy as np\n'), ((3346, 3360), 'numpy.dot', 'np.dot', (['S', 'S.T'], {}), '(S, S.T)\n', (3352, 3360), True, 'import numpy as np\n'), ((3474, 3511), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.99)', 'DEFAULT_NGRID'], {}), '(0.0, 0.99, DEFAULT_NGRID)\n', (3485, 3511), True, 'import numpy as np\n'), ((3651, 3695), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'S'], {'size': 'N'}), '(mu, S, size=N)\n', (3680, 3695), True, 'import numpy as np\n'), ((4119, 4157), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', 'DEFAULT_NGRID'], {}), '(0.05, 0.95, DEFAULT_NGRID)\n', (4130, 4157), True, 'import numpy as np\n'), ((4886, 4923), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.99)', 'DEFAULT_NGRID'], {}), '(0.0, 0.99, DEFAULT_NGRID)\n', (4897, 4923), True, 'import numpy as np\n'), ((5819, 5836), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5834, 5836), True, 'import numpy as np\n'), ((5933, 5943), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (5940, 5943), True, 'import numpy as np\n'), ((5957, 5987), 'mlpaper.mlpaper.boot_EB', 'bt.boot_EB', (['x'], {'confidence': '(0.95)'}), '(x, confidence=0.95)\n', (5967, 5987), True, 'import mlpaper.mlpaper as bt\n'), ((6308, 6325), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6323, 6325), True, 'import numpy as np\n'), ((6416, 6478), 'mlpaper.mlpaper._boot_EB_and_test', 'bt._boot_EB_and_test', (['x'], {'confidence': 'confidence', 'return_CI': '(True)'}), '(x, confidence=confidence, return_CI=True)\n', (6436, 6478), True, 'import mlpaper.mlpaper as bt\n'), ((6579, 6654), 'mlpaper.mlpaper._boot_EB_and_test', 'bt._boot_EB_and_test', (['(x - true_value)'], {'confidence': 'confidence', 'return_CI': '(True)'}), '(x - true_value, confidence=confidence, return_CI=True)\n', (6599, 6654), True, 'import mlpaper.mlpaper as bt\n'), ((1752, 1776), 'numpy.abs', 'np.abs', (['(auc - true_value)'], {}), '(auc - true_value)\n', (1758, 1776), True, 'import numpy as np\n'), ((2080, 2120), 'numpy.all', 'np.all', (['(curve[cc.XGRID].values == x_grid)'], {}), '(curve[cc.XGRID].values == x_grid)\n', (2086, 2120), True, 'import numpy as np\n'), ((2804, 2825), 'numpy.abs', 'np.abs', (['(auc - auc_ref)'], {}), '(auc - auc_ref)\n', (2810, 2825), True, 'import numpy as np\n'), ((3534, 3556), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (3542, 3556), True, 'import numpy as np\n'), ((3558, 3580), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (3566, 3580), True, 'import numpy as np\n'), ((3610, 3627), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (3624, 3627), True, 'import numpy as np\n'), ((3707, 3724), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3722, 3724), True, 'import numpy as np\n'), ((3777, 3818), 'numpy.random.choice', 'np.random.choice', (['N'], {'size': 'N', 'replace': '(True)'}), '(N, size=N, replace=True)\n', (3793, 3818), True, 'import numpy as np\n'), ((4492, 4514), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (4500, 4514), True, 'import numpy as np\n'), ((4516, 4534), 'numpy.array', 'np.array', (['[[p, p]]'], {}), '([[p, p]])\n', (4524, 4534), True, 'import numpy as np\n'), ((4946, 4968), 'numpy.array', 'np.array', (['[[0.0, 1.0]]'], {}), '([[0.0, 1.0]])\n', (4954, 4968), True, 'import numpy as np\n'), ((4970, 4992), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (4978, 4992), True, 'import numpy as np\n'), ((5856, 5873), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (5871, 5873), True, 'import numpy as np\n'), ((6005, 6024), 'numpy.abs', 'np.abs', (['(mu - mu_est)'], {}), '(mu - mu_est)\n', (6011, 6024), True, 'import numpy as np\n'), ((3926, 3937), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3934, 3937), True, 'import numpy as np\n'), ((3989, 4000), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (3997, 4000), True, 'import numpy as np\n'), ((5896, 5914), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (5911, 5914), True, 'import numpy as np\n'), ((6869, 6887), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (6884, 6887), True, 'import numpy as np\n')]
|
from typing import Any, Dict
import numpy as np
import pandas as pd
import core.artificial_signal_generators as sig_gen
import core.statistics as stats
import core.timeseries_study as tss
import helpers.unit_test as hut
class TestTimeSeriesDailyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsds = tss.TimeSeriesDailyStudy(ts)
tsds.execute()
class TestTimeSeriesMinutelyStudy(hut.TestCase):
def test_usual_case(self) -> None:
idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T")
vals = np.random.randn(len(idx))
ts = pd.Series(vals, index=idx)
tsms = tss.TimeSeriesMinutelyStudy(ts, freq_name="5 minutes")
tsms.execute()
class TestMapDictToDataframeTest1(hut.TestCase):
def test1(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict, functions=stat_funcs
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test2(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
add_prefix=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
def test3(self) -> None:
stat_funcs = {
"norm_": stats.apply_normality_test,
"adf_": stats.apply_adf_test,
"kpss_": stats.apply_kpss_test,
}
result_dict = self._get_dict_of_series(1)
actual = tss.map_dict_to_dataframe(
dict_=result_dict,
functions=stat_funcs,
progress_bar=False,
)
actual_string = hut.convert_df_to_string(actual, index=True)
self.check_string(actual_string)
@staticmethod
def _get_series(seed: int) -> pd.Series:
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arma_process = sig_gen.ArmaProcess(arparams, maparams)
date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"}
series = arma_process.generate_sample(
date_range_kwargs=date_range, seed=seed
)
return series
def _get_dict_of_series(self, seed: int) -> Dict[Any, pd.Series]:
n_items = 15
test_keys = ["test_key_" + str(x) for x in range(n_items)]
result_dict = {key: self._get_series(seed) for key in test_keys}
return result_dict
|
[
"pandas.Series",
"core.timeseries_study.TimeSeriesDailyStudy",
"core.timeseries_study.map_dict_to_dataframe",
"core.artificial_signal_generators.ArmaProcess",
"numpy.array",
"helpers.unit_test.convert_df_to_string",
"pandas.date_range",
"core.timeseries_study.TimeSeriesMinutelyStudy"
] |
[((323, 364), 'pandas.date_range', 'pd.date_range', (['"""2018-12-31"""', '"""2019-01-31"""'], {}), "('2018-12-31', '2019-01-31')\n", (336, 364), True, 'import pandas as pd\n'), ((419, 445), 'pandas.Series', 'pd.Series', (['vals'], {'index': 'idx'}), '(vals, index=idx)\n', (428, 445), True, 'import pandas as pd\n'), ((461, 489), 'core.timeseries_study.TimeSeriesDailyStudy', 'tss.TimeSeriesDailyStudy', (['ts'], {}), '(ts)\n', (485, 489), True, 'import core.timeseries_study as tss\n'), ((617, 669), 'pandas.date_range', 'pd.date_range', (['"""2018-12-31"""', '"""2019-01-31"""'], {'freq': '"""5T"""'}), "('2018-12-31', '2019-01-31', freq='5T')\n", (630, 669), True, 'import pandas as pd\n'), ((724, 750), 'pandas.Series', 'pd.Series', (['vals'], {'index': 'idx'}), '(vals, index=idx)\n', (733, 750), True, 'import pandas as pd\n'), ((766, 820), 'core.timeseries_study.TimeSeriesMinutelyStudy', 'tss.TimeSeriesMinutelyStudy', (['ts'], {'freq_name': '"""5 minutes"""'}), "(ts, freq_name='5 minutes')\n", (793, 820), True, 'import core.timeseries_study as tss\n'), ((1159, 1225), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs'}), '(dict_=result_dict, functions=stat_funcs)\n', (1184, 1225), True, 'import core.timeseries_study as tss\n'), ((1272, 1316), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (1296, 1316), True, 'import helpers.unit_test as hut\n'), ((1623, 1711), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs', 'add_prefix': '(False)'}), '(dict_=result_dict, functions=stat_funcs,\n add_prefix=False)\n', (1648, 1711), True, 'import core.timeseries_study as tss\n'), ((1779, 1823), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (1803, 1823), True, 'import helpers.unit_test as hut\n'), ((2130, 2220), 'core.timeseries_study.map_dict_to_dataframe', 'tss.map_dict_to_dataframe', ([], {'dict_': 'result_dict', 'functions': 'stat_funcs', 'progress_bar': '(False)'}), '(dict_=result_dict, functions=stat_funcs,\n progress_bar=False)\n', (2155, 2220), True, 'import core.timeseries_study as tss\n'), ((2288, 2332), 'helpers.unit_test.convert_df_to_string', 'hut.convert_df_to_string', (['actual'], {'index': '(True)'}), '(actual, index=True)\n', (2312, 2332), True, 'import helpers.unit_test as hut\n'), ((2457, 2480), 'numpy.array', 'np.array', (['[0.75, -0.25]'], {}), '([0.75, -0.25])\n', (2465, 2480), True, 'import numpy as np\n'), ((2500, 2522), 'numpy.array', 'np.array', (['[0.65, 0.35]'], {}), '([0.65, 0.35])\n', (2508, 2522), True, 'import numpy as np\n'), ((2546, 2585), 'core.artificial_signal_generators.ArmaProcess', 'sig_gen.ArmaProcess', (['arparams', 'maparams'], {}), '(arparams, maparams)\n', (2565, 2585), True, 'import core.artificial_signal_generators as sig_gen\n')]
|
import GeneralStats as gs
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosistest
import pandas as pd
if __name__ == "__main__":
gen=gs.GeneralStats()
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
print("data = ", data)
print("data1 = ", data1)
res=gen.average(data,rowvar=True)
res1=gen.average(data1,rowvar=True)
print("data平均值 = ",res)
print("data1平均值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.median(data,rowvar=True)
res1=gen.median(data1,rowvar=True)
print("data中位值 = ",res)
print("data1中位值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.mode(data,rowvar=True)
res1=gen.mode(data1,rowvar=True)
print("data众数值 = ",res)
print("data1众数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数
res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数
print("data 0.5分位数值 = ",res)
print("data1 0.5分位数值 = ",res1)
res=gen.quantile(data,0.25,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower')
print("data 0.25分位数值s = ",res)
print("data1 0.25分位数值 = ",res1)
res=gen.quantile(data,0.75,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower')
print("data 0.75分位数值 = ",res)
print("data1 0.75分位数值 = ",res1)
res=gen.quantile(data,1.0,rowvar=True,interpolation='lower')
res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower')
print("data 1.0分位数值 = ",res)
print("data1 1.0分位数值 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.range(data,rowvar=True)
res1=gen.range(data1,rowvar=True)
print("data极差 = ",res)
print("data1极差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.variance(data,rowvar=True)
res1=gen.variance(data1,rowvar=True)
print("data方差 = ",res)
print("data1方差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.standard_dev(data,rowvar=True)
res1=gen.standard_dev(data1,rowvar=True)
print("data标准差 = ",res)
print("data1标准差 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([1,2,3,4,5])
res=gen.skewness(data,rowvar=True)
res1=gen.skewness(data1,rowvar=True)
print("data偏度 = ",res)
print("data1偏度 = ",res1)
res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])])
print("使用scipy skew方法验证的data偏度 = ",res)
res1=np.array(skew(data1))
print("使用scipy skew方法验证的data1偏度 = ",res1)
data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]])
data1=np.array([53, 61, 49, 66, 78, 47])
res=gen.kurtosis(data,rowvar=True)
res1=gen.kurtosis(data1,rowvar=True)
print("data峰度 = ",res)
print("data1峰度 = ",res1)
data_0=pd.Series(data[0])
data_1=pd.Series(data[1])
data_2=pd.Series(data[2])
data_3=pd.Series(data[3])
print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()])
data1=pd.Series(data1)
print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
|
[
"GeneralStats.GeneralStats",
"numpy.array",
"pandas.Series",
"scipy.stats.skew"
] |
[((178, 195), 'GeneralStats.GeneralStats', 'gs.GeneralStats', ([], {}), '()\n', (193, 195), True, 'import GeneralStats as gs\n'), ((208, 286), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (216, 286), True, 'import numpy as np\n'), ((295, 320), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (303, 320), True, 'import numpy as np\n'), ((531, 609), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (539, 609), True, 'import numpy as np\n'), ((618, 643), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (626, 643), True, 'import numpy as np\n'), ((790, 868), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (798, 868), True, 'import numpy as np\n'), ((877, 902), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (885, 902), True, 'import numpy as np\n'), ((1045, 1123), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (1053, 1123), True, 'import numpy as np\n'), ((1132, 1157), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1140, 1157), True, 'import numpy as np\n'), ((2074, 2152), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2082, 2152), True, 'import numpy as np\n'), ((2161, 2186), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2169, 2186), True, 'import numpy as np\n'), ((2329, 2407), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2337, 2407), True, 'import numpy as np\n'), ((2416, 2441), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2424, 2441), True, 'import numpy as np\n'), ((2590, 2668), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2598, 2668), True, 'import numpy as np\n'), ((2677, 2702), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2685, 2702), True, 'import numpy as np\n'), ((2861, 2939), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (2869, 2939), True, 'import numpy as np\n'), ((2948, 2973), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2956, 2973), True, 'import numpy as np\n'), ((3323, 3401), 'numpy.array', 'np.array', (['[[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]]'], {}), '([[1, 1, 2, 2, 3], [2, 2, 3, 3, 5], [1, 4, 3, 3, 3], [2, 4, 5, 5, 3]])\n', (3331, 3401), True, 'import numpy as np\n'), ((3410, 3444), 'numpy.array', 'np.array', (['[53, 61, 49, 66, 78, 47]'], {}), '([53, 61, 49, 66, 78, 47])\n', (3418, 3444), True, 'import numpy as np\n'), ((3597, 3615), 'pandas.Series', 'pd.Series', (['data[0]'], {}), '(data[0])\n', (3606, 3615), True, 'import pandas as pd\n'), ((3628, 3646), 'pandas.Series', 'pd.Series', (['data[1]'], {}), '(data[1])\n', (3637, 3646), True, 'import pandas as pd\n'), ((3659, 3677), 'pandas.Series', 'pd.Series', (['data[2]'], {}), '(data[2])\n', (3668, 3677), True, 'import pandas as pd\n'), ((3690, 3708), 'pandas.Series', 'pd.Series', (['data[3]'], {}), '(data[3])\n', (3699, 3708), True, 'import pandas as pd\n'), ((3820, 3836), 'pandas.Series', 'pd.Series', (['data1'], {}), '(data1)\n', (3829, 3836), True, 'import pandas as pd\n'), ((3251, 3262), 'scipy.stats.skew', 'skew', (['data1'], {}), '(data1)\n', (3255, 3262), False, 'from scipy.stats import skew\n'), ((3129, 3142), 'scipy.stats.skew', 'skew', (['data[0]'], {}), '(data[0])\n', (3133, 3142), False, 'from scipy.stats import skew\n'), ((3143, 3156), 'scipy.stats.skew', 'skew', (['data[1]'], {}), '(data[1])\n', (3147, 3156), False, 'from scipy.stats import skew\n'), ((3157, 3170), 'scipy.stats.skew', 'skew', (['data[2]'], {}), '(data[2])\n', (3161, 3170), False, 'from scipy.stats import skew\n'), ((3171, 3184), 'scipy.stats.skew', 'skew', (['data[3]'], {}), '(data[3])\n', (3175, 3184), False, 'from scipy.stats import skew\n')]
|
"""
A simple, good-looking plot
===========================
Demoing some simple features of matplotlib
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(5, 4), dpi=72)
axes = fig.add_axes([0.01, 0.01, .98, 0.98])
X = np.linspace(0, 2, 200)
Y = np.sin(2*np.pi*X)
plt.plot(X, Y, lw=2)
plt.ylim(-1.1, 1.1)
plt.grid()
plt.show()
|
[
"matplotlib.pyplot.grid",
"matplotlib.use",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show"
] |
[((146, 167), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (160, 167), False, 'import matplotlib\n'), ((207, 241), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)', 'dpi': '(72)'}), '(figsize=(5, 4), dpi=72)\n', (217, 241), True, 'import matplotlib.pyplot as plt\n'), ((291, 313), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(200)'], {}), '(0, 2, 200)\n', (302, 313), True, 'import numpy as np\n'), ((318, 339), 'numpy.sin', 'np.sin', (['(2 * np.pi * X)'], {}), '(2 * np.pi * X)\n', (324, 339), True, 'import numpy as np\n'), ((336, 356), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {'lw': '(2)'}), '(X, Y, lw=2)\n', (344, 356), True, 'import matplotlib.pyplot as plt\n'), ((357, 376), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (365, 376), True, 'import matplotlib.pyplot as plt\n'), ((377, 387), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (385, 387), True, 'import matplotlib.pyplot as plt\n'), ((389, 399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (397, 399), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import scipy.sparse as sps
from mars.tensor.execution.core import Executor
from mars import tensor as mt
from mars.tensor.expressions.datasource import tensor, ones, zeros, arange
from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \
flip, flipud, fliplr, repeat, tile, isin
from mars.tensor.expressions.merge import stack
from mars.tensor.expressions.reduction import all as tall
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testRechunkExecution(self):
raw = np.random.random((11, 8))
arr = tensor(raw, chunks=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunks=1)
b = tensor([3, -1, 3], chunks=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.astype('i8')))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.T))
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
self.assertTrue(np.array_equal(res[0], raw.transpose(1, 2, 0)))
raw = sps.random(11, 8)
arr = tensor(raw, chunks=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.T.toarray()))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], raw.swapaxes(2, 0)))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunks=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0)))
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunks=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunks=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0], np.broadcast_to(raw, (5, 10, 5, 6))))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunks=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunks=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
np.testing.assert_equal(r, e)
def testWhereExecution(self):
raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?')
raw_x = np.random.rand(4, 1)
raw_y = np.random.rand(4, 4)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)
self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y)))
raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?'))
raw_x = sps.random(4, 1, density=.1)
raw_y = sps.random(4, 4, density=.1)
cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)[0]
self.assertTrue(np.array_equal(res.toarray(),
np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray())))
def testReshapeExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 30)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 30)))
y2 = x.reshape(10, -1)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(10, -1)))
y3 = x.reshape(-1)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1)))
y4 = x.ravel()
res = self.executor.execute_tensor(y4, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.ravel()))
raw_data = np.random.rand(30, 100, 20)
x = tensor(raw_data, chunks=6)
y = x.reshape(-1, 20, 5, 5, 4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4)))
y2 = x.reshape(3000, 10, 2)
res = self.executor.execute_tensor(y2, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(3000, 10, 2)))
y3 = x.reshape(60, 25, 40)
res = self.executor.execute_tensor(y3, concat=True)
self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40)))
def testExpandDimsExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunks=6)
y = expand_dims(x, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1)))
y = expand_dims(x, 0)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0)))
y = expand_dims(x, 3)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3)))
y = expand_dims(x, -1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1)))
y = expand_dims(x, -4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4)))
with self.assertRaises(np.AxisError):
expand_dims(x, -5)
with self.assertRaises(np.AxisError):
expand_dims(x, 4)
def testRollAxisExecution(self):
x = ones((3, 4, 5, 6), chunks=1)
y = rollaxis(x, 3, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1)))
def testAtleast1dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_1d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([1])))
self.assertTrue(np.array_equal(res[1], np.ones(3)))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast2dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_2d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([[1]])))
self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast3dExecution(self):
x = 1
y = ones(3, chunks=2)
z = ones((3, 4), chunks=2)
t = atleast_3d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.atleast_3d(x)))
self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4)))))
def testArgwhereExecution(self):
x = arange(6, chunks=2).reshape(2, 3)
t = argwhere(x > 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(np.arange(6).reshape(2, 3) > 1)
self.assertTrue(np.array_equal(res, expected))
def testArraySplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = array_split(x, 3, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = array_split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
def testSplitExecution(self):
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = split(x, 4, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# hsplit
x = arange(120, chunks=3).reshape(2, 12, 5)
ss = hsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# vsplit
x = arange(48, chunks=3).reshape(8, 3, 2)
ss = vsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# dsplit
x = arange(48, chunks=3).reshape(2, 3, 8)
ss = dsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
x_data = sps.random(12, 8, density=.1)
x = tensor(x_data, chunks=3)
ss = split(x, 4, axis=0)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(x_data.toarray(), 4, axis=0)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)]
def testRollExecution(self):
x = arange(10, chunks=2)
t = roll(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10), 2)
np.testing.assert_equal(res, expected)
x2 = x.reshape(2, 5)
t = roll(x2, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1)
np.testing.assert_equal(res, expected)
def testSqueezeExecution(self):
data = np.array([[[0], [1], [2]]])
x = tensor(data, chunks=1)
t = squeeze(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data)
np.testing.assert_equal(res, expected)
t = squeeze(x, axis=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data, axis=2)
np.testing.assert_equal(res, expected)
def testPtpExecution(self):
x = arange(4, chunks=1).reshape(2, 2)
t = ptp(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=0)
np.testing.assert_equal(res, expected)
t = ptp(x, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ptp(np.arange(4).reshape(2, 2), axis=1)
np.testing.assert_equal(res, expected)
t = ptp(x)
res = self.executor.execute_tensor(t)[0]
expected = np.ptp(np.arange(4).reshape(2, 2))
np.testing.assert_equal(res, expected)
def testDiffExecution(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, n=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, n=2)
np.testing.assert_equal(res, expected)
data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
x = tensor(data, chunks=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, axis=0)
np.testing.assert_equal(res, expected)
x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64))
np.testing.assert_equal(res, expected)
def testEdiff1d(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunks=2)
t = ediff1d(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
to_begin = tensor(-99, chunks=2)
to_end = tensor([88, 99], chunks=2)
t = ediff1d(x, to_begin=to_begin, to_end=to_end)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99]))
np.testing.assert_equal(res, expected)
data = [[1, 2, 4], [1, 6, 24]]
t = ediff1d(tensor(data, chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
def testDigitizeExecution(self):
data = np.array([0.2, 6.4, 3.0, 1.6])
x = tensor(data, chunks=2)
bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
b = tensor(bins, chunks=2)
inds = digitize(x, b)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins)
np.testing.assert_equal(res, expected)
data = np.array([1.2, 10.0, 12.4, 15.5, 20.])
x = tensor(data, chunks=2)
bins = np.array([0, 5, 10, 15, 20])
inds = digitize(x, bins, right=True)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=True)
np.testing.assert_equal(res, expected)
inds = digitize(x, bins, right=False)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data, bins, right=False)
np.testing.assert_equal(res, expected)
data = sps.random(10, 1, density=.1) * 12
x = tensor(data, chunks=2)
bins = np.array([1.0, 2.0, 2.5, 4.0, 10.0])
inds = digitize(x, bins)
res = self.executor.execute_tensor(inds, concat=True)[0]
expected = np.digitize(data.toarray(), bins, right=False)
np.testing.assert_equal(res.toarray(), expected)
def testAverageExecution(self):
data = arange(1, 5, chunks=1)
t = average(data)
res = self.executor.execute_tensor(t)[0]
expected = np.average(np.arange(1, 5))
self.assertEqual(res, expected)
t = average(arange(1, 11, chunks=2), weights=arange(10, 0, -1, chunks=2))
res = self.executor.execute_tensor(t)[0]
expected = np.average(range(1, 11), weights=range(10, 0, -1))
self.assertEqual(res, expected)
data = arange(6, chunks=2).reshape((3, 2))
t = average(data, axis=1, weights=tensor([1./4, 3./4], chunks=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.average(np.arange(6).reshape(3, 2), axis=1, weights=(1./4, 3./4))
np.testing.assert_equal(res, expected)
with self.assertRaises(TypeError):
average(data, weights=tensor([1./4, 3./4], chunks=2))
def testCovExecution(self):
data = np.array([[0, 2], [1, 1], [2, 0]]).T
x = tensor(data, chunks=1)
t = cov(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.cov(data)
np.testing.assert_equal(res, expected)
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
X = stack((x, y), axis=0)
t = cov(x, y)
r = tall(t == cov(X))
self.assertTrue(self.executor.execute_tensor(r)[0])
def testCorrcoefExecution(self):
data_x = [-2.1, -1, 4.3]
data_y = [3, 1.1, 0.12]
x = tensor(data_x, chunks=1)
y = tensor(data_y, chunks=1)
t = corrcoef(x, y)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.corrcoef(data_x, data_y)
np.testing.assert_equal(res, expected)
def testFlipExecution(self):
a = arange(8, chunks=2).reshape((2, 2, 2))
t = flip(a, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
np.testing.assert_equal(res, expected)
t = flip(a, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
np.testing.assert_equal(res, expected)
t = flipud(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flipud(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
t = fliplr(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
def testRepeatExecution(self):
a = repeat(3, 4)
res = self.executor.execute_tensor(a)[0]
expected = np.repeat(3, 4)
np.testing.assert_equal(res, expected)
x_data = np.random.randn(20, 30)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 2)
np.testing.assert_equal(res, expected)
t = repeat(x, 3, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 3, axis=1)
np.testing.assert_equal(res, expected)
t = repeat(x, np.arange(20), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
t = repeat(x, arange(20, chunks=5), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
x_data = sps.random(20, 30, density=.1)
x = tensor(x_data, chunks=(3, 4))
t = repeat(x, 2, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data.toarray(), 2, axis=1)
np.testing.assert_equal(res.toarray(), expected)
def testTileExecution(self):
a_data = np.array([0, 1, 2])
a = tensor(a_data, chunks=2)
t = tile(a, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, 2)
np.testing.assert_equal(res, expected)
t = tile(a, (2, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 2))
np.testing.assert_equal(res, expected)
t = tile(a, (2, 1, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 1, 2))
np.testing.assert_equal(res, expected)
b_data = np.array([[1, 2], [3, 4]])
b = tensor(b_data, chunks=1)
t = tile(b, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, 2)
np.testing.assert_equal(res, expected)
t = tile(b, (2, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, (2, 1))
np.testing.assert_equal(res, expected)
c_data = np.array([1, 2, 3, 4])
c = tensor(c_data, chunks=3)
t = tile(c, (4, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(c_data, (4, 1))
np.testing.assert_equal(res, expected)
def testIsInExecution(self):
element = 2 * arange(4, chunks=1).reshape((2, 2))
test_elements = [1, 2, 4, 8]
mask = isin(element, test_elements)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([2, 4])
np.testing.assert_equal(res, expected)
mask = isin(element, test_elements, invert=True)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([0, 6])
np.testing.assert_equal(res, expected)
test_set = {1, 2, 4, 8}
mask = isin(element, test_set)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set)
np.testing.assert_equal(res, expected)
|
[
"mars.tensor.expressions.base.ptp",
"numpy.testing.assert_equal",
"numpy.random.rand",
"mars.tensor.expressions.base.argwhere",
"mars.tensor.expressions.base.moveaxis",
"mars.tensor.expressions.base.copyto",
"mars.tensor.expressions.base.vsplit",
"mars.tensor.expressions.base.average",
"numpy.array",
"mars.tensor.expressions.base.flipud",
"mars.tensor.expressions.base.expand_dims",
"mars.tensor.arange",
"mars.tensor.expressions.base.hsplit",
"numpy.cov",
"numpy.arange",
"mars.tensor.expressions.base.split",
"mars.tensor.expressions.base.roll",
"mars.tensor.expressions.base.atleast_2d",
"mars.tensor.expressions.merge.stack",
"numpy.repeat",
"numpy.random.random",
"numpy.where",
"mars.tensor.expressions.base.rollaxis",
"numpy.diff",
"mars.tensor.expressions.base.corrcoef",
"mars.tensor.expressions.datasource.zeros",
"scipy.sparse.random",
"mars.tensor.expressions.base.ediff1d",
"mars.tensor.expressions.base.flip",
"mars.tensor.expressions.datasource.ones",
"mars.tensor.expressions.datasource.arange",
"mars.tensor.execution.core.Executor",
"numpy.tile",
"numpy.ones",
"mars.tensor.expressions.base.atleast_3d",
"mars.tensor.expressions.base.squeeze",
"numpy.digitize",
"mars.tensor.expressions.base.broadcast_to",
"numpy.corrcoef",
"numpy.ediff1d",
"mars.tensor.expressions.base.broadcast_arrays",
"mars.tensor.expressions.base.transpose",
"numpy.squeeze",
"mars.tensor.expressions.base.tile",
"mars.tensor.expressions.base.isin",
"mars.tensor.expressions.base.repeat",
"numpy.broadcast_arrays",
"numpy.random.randn",
"mars.tensor.expressions.base.where",
"mars.tensor.expressions.base.digitize",
"numpy.broadcast_to",
"mars.tensor.expressions.base.cov",
"numpy.atleast_3d",
"mars.tensor.expressions.base.diff",
"numpy.random.randint",
"numpy.array_equal",
"mars.tensor.expressions.base.atleast_1d",
"numpy.expand_dims",
"mars.tensor.expressions.base.array_split",
"mars.tensor.expressions.base.fliplr",
"mars.tensor.expressions.datasource.tensor",
"mars.tensor.expressions.base.dsplit"
] |
[((1394, 1411), 'mars.tensor.execution.core.Executor', 'Executor', (['"""numpy"""'], {}), "('numpy')\n", (1402, 1411), False, 'from mars.tensor.execution.core import Executor\n'), ((1463, 1488), 'numpy.random.random', 'np.random.random', (['(11, 8)'], {}), '((11, 8))\n', (1479, 1488), True, 'import numpy as np\n'), ((1503, 1524), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (1509, 1524), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2022, 2044), 'mars.tensor.expressions.datasource.ones', 'ones', (['(2, 3)'], {'chunks': '(1)'}), '((2, 3), chunks=1)\n', (2026, 2044), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2057, 2085), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[3, -1, 3]'], {'chunks': '(2)'}), '([3, -1, 3], chunks=2)\n', (2063, 2085), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2095, 2120), 'mars.tensor.expressions.base.copyto', 'copyto', (['a', 'b'], {'where': '(b > 1)'}), '(a, b, where=b > 1)\n', (2101, 2120), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((2203, 2235), 'numpy.array', 'np.array', (['[[3, 1, 3], [3, 1, 3]]'], {}), '([[3, 1, 3], [3, 1, 3]])\n', (2211, 2235), True, 'import numpy as np\n'), ((2245, 2283), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (2268, 2283), True, 'import numpy as np\n'), ((2334, 2359), 'numpy.random.random', 'np.random.random', (['(10, 5)'], {}), '((10, 5))\n', (2350, 2359), True, 'import numpy as np\n'), ((2374, 2395), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2380, 2395), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2572, 2602), 'scipy.sparse.random', 'sps.random', (['(10)', '(5)'], {'density': '(0.2)'}), '(10, 5, density=0.2)\n', (2582, 2602), True, 'import scipy.sparse as sps\n'), ((2616, 2637), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2622, 2637), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2872, 2900), 'numpy.random.random', 'np.random.random', (['(11, 8, 5)'], {}), '((11, 8, 5))\n', (2888, 2900), True, 'import numpy as np\n'), ((2915, 2936), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (2921, 2936), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((2952, 2966), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {}), '(arr)\n', (2961, 2966), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3102, 3135), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {'axes': '(-2, -1, -3)'}), '(arr, axes=(-2, -1, -3))\n', (3111, 3135), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3287, 3304), 'scipy.sparse.random', 'sps.random', (['(11)', '(8)'], {}), '(11, 8)\n', (3297, 3304), True, 'import scipy.sparse as sps\n'), ((3319, 3340), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3325, 3340), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((3356, 3370), 'mars.tensor.expressions.base.transpose', 'transpose', (['arr'], {}), '(arr)\n', (3365, 3370), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((3604, 3632), 'numpy.random.random', 'np.random.random', (['(11, 8, 5)'], {}), '((11, 8, 5))\n', (3620, 3632), True, 'import numpy as np\n'), ((3647, 3668), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3653, 3668), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((3850, 3880), 'scipy.sparse.random', 'sps.random', (['(11)', '(8)'], {'density': '(0.2)'}), '(11, 8, density=0.2)\n', (3860, 3880), True, 'import scipy.sparse as sps\n'), ((3894, 3915), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(3)'}), '(raw, chunks=3)\n', (3900, 3915), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4152, 4178), 'mars.tensor.expressions.datasource.zeros', 'zeros', (['(3, 4, 5)'], {'chunks': '(2)'}), '((3, 4, 5), chunks=2)\n', (4157, 4178), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4192, 4210), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '(0)', '(-1)'], {}), '(x, 0, -1)\n', (4200, 4210), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4334, 4352), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '(-1)', '(0)'], {}), '(x, -1, 0)\n', (4342, 4352), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4476, 4505), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '[0, 1]', '[-1, -2]'], {}), '(x, [0, 1], [-1, -2])\n', (4484, 4505), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4629, 4665), 'mars.tensor.expressions.base.moveaxis', 'moveaxis', (['x', '[0, 1, 2]', '[-1, -2, -3]'], {}), '(x, [0, 1, 2], [-1, -2, -3])\n', (4637, 4665), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((4831, 4859), 'numpy.random.random', 'np.random.random', (['(10, 5, 1)'], {}), '((10, 5, 1))\n', (4847, 4859), True, 'import numpy as np\n'), ((4874, 4895), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw'], {'chunks': '(2)'}), '(raw, chunks=2)\n', (4880, 4895), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((4911, 4943), 'mars.tensor.expressions.base.broadcast_to', 'broadcast_to', (['arr', '(5, 10, 5, 6)'], {}), '(arr, (5, 10, 5, 6))\n', (4923, 4943), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((5180, 5204), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(1)'}), '(x_data, chunks=1)\n', (5186, 5204), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5250, 5274), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['y_data'], {'chunks': '(2)'}), '(y_data, chunks=2)\n', (5256, 5274), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5288, 5310), 'mars.tensor.expressions.base.broadcast_arrays', 'broadcast_arrays', (['x', 'y'], {}), '(x, y)\n', (5304, 5310), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((5410, 5445), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['x_data', 'y_data'], {}), '(x_data, y_data)\n', (5429, 5445), True, 'import numpy as np\n'), ((5583, 5630), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(4, 4)', 'dtype': '"""?"""'}), "(0, 2, size=(4, 4), dtype='?')\n", (5600, 5630), True, 'import numpy as np\n'), ((5647, 5667), 'numpy.random.rand', 'np.random.rand', (['(4)', '(1)'], {}), '(4, 1)\n', (5661, 5667), True, 'import numpy as np\n'), ((5684, 5704), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (5698, 5704), True, 'import numpy as np\n'), ((5819, 5836), 'mars.tensor.expressions.base.where', 'where', (['cond', 'x', 'y'], {}), '(cond, x, y)\n', (5824, 5836), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((6080, 6109), 'scipy.sparse.random', 'sps.random', (['(4)', '(1)'], {'density': '(0.1)'}), '(4, 1, density=0.1)\n', (6090, 6109), True, 'import scipy.sparse as sps\n'), ((6125, 6154), 'scipy.sparse.random', 'sps.random', (['(4)', '(4)'], {'density': '(0.1)'}), '(4, 4, density=0.1)\n', (6135, 6154), True, 'import scipy.sparse as sps\n'), ((6268, 6285), 'mars.tensor.expressions.base.where', 'where', (['cond', 'x', 'y'], {}), '(cond, x, y)\n', (6273, 6285), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((6564, 6590), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)', '(30)'], {}), '(10, 20, 30)\n', (6578, 6590), True, 'import numpy as np\n'), ((6603, 6629), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (6609, 6629), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((7292, 7319), 'numpy.random.rand', 'np.random.rand', (['(30)', '(100)', '(20)'], {}), '(30, 100, 20)\n', (7306, 7319), True, 'import numpy as np\n'), ((7332, 7358), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (7338, 7358), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((7953, 7979), 'numpy.random.rand', 'np.random.rand', (['(10)', '(20)', '(30)'], {}), '(10, 20, 30)\n', (7967, 7979), True, 'import numpy as np\n'), ((7992, 8018), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_data'], {'chunks': '(6)'}), '(raw_data, chunks=6)\n', (7998, 8018), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((8032, 8049), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(1)'], {}), '(x, 1)\n', (8043, 8049), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8200, 8217), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(0)'], {}), '(x, 0)\n', (8211, 8217), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8368, 8385), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(3)'], {}), '(x, 3)\n', (8379, 8385), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8536, 8554), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-1)'], {}), '(x, -1)\n', (8547, 8554), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((8706, 8724), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-4)'], {}), '(x, -4)\n', (8717, 8724), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9068, 9096), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4, 5, 6)'], {'chunks': '(1)'}), '((3, 4, 5, 6), chunks=1)\n', (9072, 9096), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9109, 9126), 'mars.tensor.expressions.base.rollaxis', 'rollaxis', (['x', '(3)', '(1)'], {}), '(x, 3, 1)\n', (9117, 9126), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9342, 9359), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (9346, 9359), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9372, 9394), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (9376, 9394), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9408, 9427), 'mars.tensor.expressions.base.atleast_1d', 'atleast_1d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (9418, 9427), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9758, 9775), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (9762, 9775), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9788, 9810), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (9792, 9810), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9824, 9843), 'mars.tensor.expressions.base.atleast_2d', 'atleast_2d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (9834, 9843), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10191, 10208), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3)'], {'chunks': '(2)'}), '(3, chunks=2)\n', (10195, 10208), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10221, 10243), 'mars.tensor.expressions.datasource.ones', 'ones', (['(3, 4)'], {'chunks': '(2)'}), '((3, 4), chunks=2)\n', (10225, 10243), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10257, 10276), 'mars.tensor.expressions.base.atleast_3d', 'atleast_3d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (10267, 10276), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10671, 10686), 'mars.tensor.expressions.base.argwhere', 'argwhere', (['(x > 1)'], {}), '(x > 1)\n', (10679, 10686), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10972, 10997), 'mars.tensor.expressions.base.array_split', 'array_split', (['x', '(3)'], {'axis': '(2)'}), '(x, 3, axis=2)\n', (10983, 10997), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((11287, 11324), 'mars.tensor.expressions.base.array_split', 'array_split', (['x', '[3, 5, 6, 10]'], {'axis': '(2)'}), '(x, [3, 5, 6, 10], axis=2)\n', (11298, 11324), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((11710, 11729), 'mars.tensor.expressions.base.split', 'split', (['x', '(4)'], {'axis': '(2)'}), '(x, 4, axis=2)\n', (11715, 11729), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12013, 12044), 'mars.tensor.expressions.base.split', 'split', (['x', '[3, 5, 6, 10]'], {'axis': '(2)'}), '(x, [3, 5, 6, 10], axis=2)\n', (12018, 12044), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12409, 12421), 'mars.tensor.expressions.base.hsplit', 'hsplit', (['x', '(4)'], {}), '(x, 4)\n', (12415, 12421), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((12767, 12779), 'mars.tensor.expressions.base.vsplit', 'vsplit', (['x', '(4)'], {}), '(x, 4)\n', (12773, 12779), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13123, 13135), 'mars.tensor.expressions.base.dsplit', 'dsplit', (['x', '(4)'], {}), '(x, 4)\n', (13129, 13135), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13416, 13446), 'scipy.sparse.random', 'sps.random', (['(12)', '(8)'], {'density': '(0.1)'}), '(12, 8, density=0.1)\n', (13426, 13446), True, 'import scipy.sparse as sps\n'), ((13458, 13482), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3)'}), '(x_data, chunks=3)\n', (13464, 13482), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13496, 13515), 'mars.tensor.expressions.base.split', 'split', (['x', '(4)'], {'axis': '(0)'}), '(x, 4, axis=0)\n', (13501, 13515), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13827, 13847), 'mars.tensor.expressions.datasource.arange', 'arange', (['(10)'], {'chunks': '(2)'}), '(10, chunks=2)\n', (13833, 13847), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13861, 13871), 'mars.tensor.expressions.base.roll', 'roll', (['x', '(2)'], {}), '(x, 2)\n', (13865, 13871), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((13988, 14026), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14011, 14026), True, 'import numpy as np\n'), ((14070, 14081), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {}), '(x2, 1)\n', (14074, 14081), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14212, 14250), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14235, 14250), True, 'import numpy as np\n'), ((14264, 14283), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {'axis': '(0)'}), '(x2, 1, axis=0)\n', (14268, 14283), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14422, 14460), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14445, 14460), True, 'import numpy as np\n'), ((14474, 14493), 'mars.tensor.expressions.base.roll', 'roll', (['x2', '(1)'], {'axis': '(1)'}), '(x2, 1, axis=1)\n', (14478, 14493), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14632, 14670), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14655, 14670), True, 'import numpy as np\n'), ((14723, 14750), 'numpy.array', 'np.array', (['[[[0], [1], [2]]]'], {}), '([[[0], [1], [2]]])\n', (14731, 14750), True, 'import numpy as np\n'), ((14763, 14785), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(1)'}), '(data, chunks=1)\n', (14769, 14785), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((14799, 14809), 'mars.tensor.expressions.base.squeeze', 'squeeze', (['x'], {}), '(x)\n', (14806, 14809), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((14892, 14908), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (14902, 14908), True, 'import numpy as np\n'), ((14917, 14955), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (14940, 14955), True, 'import numpy as np\n'), ((14969, 14987), 'mars.tensor.expressions.base.squeeze', 'squeeze', (['x'], {'axis': '(2)'}), '(x, axis=2)\n', (14976, 14987), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15070, 15094), 'numpy.squeeze', 'np.squeeze', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (15080, 15094), True, 'import numpy as np\n'), ((15103, 15141), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15126, 15141), True, 'import numpy as np\n'), ((15234, 15248), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (15237, 15248), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15382, 15420), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15405, 15420), True, 'import numpy as np\n'), ((15434, 15448), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (15437, 15448), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15582, 15620), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15605, 15620), True, 'import numpy as np\n'), ((15634, 15640), 'mars.tensor.expressions.base.ptp', 'ptp', (['x'], {}), '(x)\n', (15637, 15640), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((15753, 15791), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (15776, 15791), True, 'import numpy as np\n'), ((15841, 15866), 'numpy.array', 'np.array', (['[1, 2, 4, 7, 0]'], {}), '([1, 2, 4, 7, 0])\n', (15849, 15866), True, 'import numpy as np\n'), ((15879, 15901), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (15885, 15901), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((15915, 15922), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (15919, 15922), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16005, 16018), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (16012, 16018), True, 'import numpy as np\n'), ((16027, 16065), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16050, 16065), True, 'import numpy as np\n'), ((16079, 16091), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {'n': '(2)'}), '(x, n=2)\n', (16083, 16091), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16174, 16192), 'numpy.diff', 'np.diff', (['data'], {'n': '(2)'}), '(data, n=2)\n', (16181, 16192), True, 'import numpy as np\n'), ((16201, 16239), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16224, 16239), True, 'import numpy as np\n'), ((16256, 16295), 'numpy.array', 'np.array', (['[[1, 3, 6, 10], [0, 5, 6, 8]]'], {}), '([[1, 3, 6, 10], [0, 5, 6, 8]])\n', (16264, 16295), True, 'import numpy as np\n'), ((16308, 16330), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (16314, 16330), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((16344, 16351), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (16348, 16351), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16434, 16447), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (16441, 16447), True, 'import numpy as np\n'), ((16456, 16494), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16479, 16494), True, 'import numpy as np\n'), ((16508, 16523), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (16512, 16523), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16606, 16627), 'numpy.diff', 'np.diff', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (16613, 16627), True, 'import numpy as np\n'), ((16636, 16674), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16659, 16674), True, 'import numpy as np\n'), ((16688, 16746), 'mars.tensor.arange', 'mt.arange', (['"""1066-10-13"""', '"""1066-10-16"""'], {'dtype': 'mt.datetime64'}), "('1066-10-13', '1066-10-16', dtype=mt.datetime64)\n", (16697, 16746), True, 'from mars import tensor as mt\n'), ((16759, 16766), 'mars.tensor.expressions.base.diff', 'diff', (['x'], {}), '(x)\n', (16763, 16766), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((16925, 16963), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (16948, 16963), True, 'import numpy as np\n'), ((17007, 17032), 'numpy.array', 'np.array', (['[1, 2, 4, 7, 0]'], {}), '([1, 2, 4, 7, 0])\n', (17015, 17032), True, 'import numpy as np\n'), ((17045, 17067), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17051, 17067), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17081, 17091), 'mars.tensor.expressions.base.ediff1d', 'ediff1d', (['x'], {}), '(x)\n', (17088, 17091), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((17174, 17190), 'numpy.ediff1d', 'np.ediff1d', (['data'], {}), '(data)\n', (17184, 17190), True, 'import numpy as np\n'), ((17199, 17237), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17222, 17237), True, 'import numpy as np\n'), ((17258, 17279), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['(-99)'], {'chunks': '(2)'}), '(-99, chunks=2)\n', (17264, 17279), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17297, 17323), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[88, 99]'], {'chunks': '(2)'}), '([88, 99], chunks=2)\n', (17303, 17323), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17336, 17380), 'mars.tensor.expressions.base.ediff1d', 'ediff1d', (['x'], {'to_begin': 'to_begin', 'to_end': 'to_end'}), '(x, to_begin=to_begin, to_end=to_end)\n', (17343, 17380), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((17529, 17567), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17552, 17567), True, 'import numpy as np\n'), ((17735, 17751), 'numpy.ediff1d', 'np.ediff1d', (['data'], {}), '(data)\n', (17745, 17751), True, 'import numpy as np\n'), ((17760, 17798), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (17783, 17798), True, 'import numpy as np\n'), ((17852, 17882), 'numpy.array', 'np.array', (['[0.2, 6.4, 3.0, 1.6]'], {}), '([0.2, 6.4, 3.0, 1.6])\n', (17860, 17882), True, 'import numpy as np\n'), ((17895, 17917), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17901, 17917), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17933, 17969), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.5, 4.0, 10.0]'], {}), '([0.0, 1.0, 2.5, 4.0, 10.0])\n', (17941, 17969), True, 'import numpy as np\n'), ((17985, 18002), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {}), '(x, bins)\n', (17993, 18002), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18088, 18111), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {}), '(data, bins)\n', (18099, 18111), True, 'import numpy as np\n'), ((18120, 18158), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18143, 18158), True, 'import numpy as np\n'), ((18172, 18194), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['bins'], {'chunks': '(2)'}), '(bins, chunks=2)\n', (18178, 18194), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18210, 18224), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'b'], {}), '(x, b)\n', (18218, 18224), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18310, 18333), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {}), '(data, bins)\n', (18321, 18333), True, 'import numpy as np\n'), ((18342, 18380), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18365, 18380), True, 'import numpy as np\n'), ((18397, 18436), 'numpy.array', 'np.array', (['[1.2, 10.0, 12.4, 15.5, 20.0]'], {}), '([1.2, 10.0, 12.4, 15.5, 20.0])\n', (18405, 18436), True, 'import numpy as np\n'), ((18448, 18470), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (18454, 18470), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18486, 18514), 'numpy.array', 'np.array', (['[0, 5, 10, 15, 20]'], {}), '([0, 5, 10, 15, 20])\n', (18494, 18514), True, 'import numpy as np\n'), ((18530, 18559), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {'right': '(True)'}), '(x, bins, right=True)\n', (18538, 18559), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18645, 18680), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {'right': '(True)'}), '(data, bins, right=True)\n', (18656, 18680), True, 'import numpy as np\n'), ((18689, 18727), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18712, 18727), True, 'import numpy as np\n'), ((18744, 18774), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {'right': '(False)'}), '(x, bins, right=False)\n', (18752, 18774), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((18860, 18896), 'numpy.digitize', 'np.digitize', (['data', 'bins'], {'right': '(False)'}), '(data, bins, right=False)\n', (18871, 18896), True, 'import numpy as np\n'), ((18905, 18943), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (18928, 18943), True, 'import numpy as np\n'), ((19007, 19029), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (19013, 19029), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19045, 19081), 'numpy.array', 'np.array', (['[1.0, 2.0, 2.5, 4.0, 10.0]'], {}), '([1.0, 2.0, 2.5, 4.0, 10.0])\n', (19053, 19081), True, 'import numpy as np\n'), ((19097, 19114), 'mars.tensor.expressions.base.digitize', 'digitize', (['x', 'bins'], {}), '(x, bins)\n', (19105, 19114), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((19356, 19378), 'mars.tensor.expressions.datasource.arange', 'arange', (['(1)', '(5)'], {'chunks': '(1)'}), '(1, 5, chunks=1)\n', (19362, 19378), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19391, 19404), 'mars.tensor.expressions.base.average', 'average', (['data'], {}), '(data)\n', (19398, 19404), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20070, 20108), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (20093, 20108), True, 'import numpy as np\n'), ((20316, 20338), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(1)'}), '(data, chunks=1)\n', (20322, 20338), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20352, 20358), 'mars.tensor.expressions.base.cov', 'cov', (['x'], {}), '(x)\n', (20355, 20358), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20441, 20453), 'numpy.cov', 'np.cov', (['data'], {}), '(data)\n', (20447, 20453), True, 'import numpy as np\n'), ((20462, 20500), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (20485, 20500), True, 'import numpy as np\n'), ((20581, 20605), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_x'], {'chunks': '(1)'}), '(data_x, chunks=1)\n', (20587, 20605), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20618, 20642), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_y'], {'chunks': '(1)'}), '(data_y, chunks=1)\n', (20624, 20642), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20656, 20677), 'mars.tensor.expressions.merge.stack', 'stack', (['(x, y)'], {'axis': '(0)'}), '((x, y), axis=0)\n', (20661, 20677), False, 'from mars.tensor.expressions.merge import stack\n'), ((20690, 20699), 'mars.tensor.expressions.base.cov', 'cov', (['x', 'y'], {}), '(x, y)\n', (20693, 20699), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((20905, 20929), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_x'], {'chunks': '(1)'}), '(data_x, chunks=1)\n', (20911, 20929), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20942, 20966), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data_y'], {'chunks': '(1)'}), '(data_y, chunks=1)\n', (20948, 20966), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20980, 20994), 'mars.tensor.expressions.base.corrcoef', 'corrcoef', (['x', 'y'], {}), '(x, y)\n', (20988, 20994), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21077, 21104), 'numpy.corrcoef', 'np.corrcoef', (['data_x', 'data_y'], {}), '(data_x, data_y)\n', (21088, 21104), True, 'import numpy as np\n'), ((21113, 21151), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21136, 21151), True, 'import numpy as np\n'), ((21250, 21260), 'mars.tensor.expressions.base.flip', 'flip', (['a', '(0)'], {}), '(a, 0)\n', (21254, 21260), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21393, 21431), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21416, 21431), True, 'import numpy as np\n'), ((21445, 21455), 'mars.tensor.expressions.base.flip', 'flip', (['a', '(1)'], {}), '(a, 1)\n', (21449, 21455), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21588, 21626), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21611, 21626), True, 'import numpy as np\n'), ((21640, 21649), 'mars.tensor.expressions.base.flipud', 'flipud', (['a'], {}), '(a)\n', (21646, 21649), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21781, 21819), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21804, 21819), True, 'import numpy as np\n'), ((21833, 21842), 'mars.tensor.expressions.base.fliplr', 'fliplr', (['a'], {}), '(a)\n', (21839, 21842), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21974, 22012), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (21997, 22012), True, 'import numpy as np\n'), ((22061, 22073), 'mars.tensor.expressions.base.repeat', 'repeat', (['(3)', '(4)'], {}), '(3, 4)\n', (22067, 22073), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22143, 22158), 'numpy.repeat', 'np.repeat', (['(3)', '(4)'], {}), '(3, 4)\n', (22152, 22158), True, 'import numpy as np\n'), ((22167, 22205), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22190, 22205), True, 'import numpy as np\n'), ((22224, 22247), 'numpy.random.randn', 'np.random.randn', (['(20)', '(30)'], {}), '(20, 30)\n', (22239, 22247), True, 'import numpy as np\n'), ((22260, 22289), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3, 4)'}), '(x_data, chunks=(3, 4))\n', (22266, 22289), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((22303, 22315), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(2)'], {}), '(x, 2)\n', (22309, 22315), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22398, 22418), 'numpy.repeat', 'np.repeat', (['x_data', '(2)'], {}), '(x_data, 2)\n', (22407, 22418), True, 'import numpy as np\n'), ((22427, 22465), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22450, 22465), True, 'import numpy as np\n'), ((22479, 22499), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(3)'], {'axis': '(1)'}), '(x, 3, axis=1)\n', (22485, 22499), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((22582, 22610), 'numpy.repeat', 'np.repeat', (['x_data', '(3)'], {'axis': '(1)'}), '(x_data, 3, axis=1)\n', (22591, 22610), True, 'import numpy as np\n'), ((22619, 22657), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22642, 22657), True, 'import numpy as np\n'), ((22835, 22873), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (22858, 22873), True, 'import numpy as np\n'), ((23058, 23096), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23081, 23096), True, 'import numpy as np\n'), ((23115, 23146), 'scipy.sparse.random', 'sps.random', (['(20)', '(30)'], {'density': '(0.1)'}), '(20, 30, density=0.1)\n', (23125, 23146), True, 'import scipy.sparse as sps\n'), ((23158, 23187), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['x_data'], {'chunks': '(3, 4)'}), '(x_data, chunks=(3, 4))\n', (23164, 23187), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23201, 23221), 'mars.tensor.expressions.base.repeat', 'repeat', (['x', '(2)'], {'axis': '(1)'}), '(x, 2, axis=1)\n', (23207, 23221), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23451, 23470), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (23459, 23470), True, 'import numpy as np\n'), ((23483, 23507), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['a_data'], {'chunks': '(2)'}), '(a_data, chunks=2)\n', (23489, 23507), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23521, 23531), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2)'], {}), '(a, 2)\n', (23525, 23531), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23614, 23632), 'numpy.tile', 'np.tile', (['a_data', '(2)'], {}), '(a_data, 2)\n', (23621, 23632), True, 'import numpy as np\n'), ((23641, 23679), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23664, 23679), True, 'import numpy as np\n'), ((23693, 23708), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2, 2)'], {}), '(a, (2, 2))\n', (23697, 23708), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23791, 23814), 'numpy.tile', 'np.tile', (['a_data', '(2, 2)'], {}), '(a_data, (2, 2))\n', (23798, 23814), True, 'import numpy as np\n'), ((23823, 23861), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (23846, 23861), True, 'import numpy as np\n'), ((23875, 23893), 'mars.tensor.expressions.base.tile', 'tile', (['a', '(2, 1, 2)'], {}), '(a, (2, 1, 2))\n', (23879, 23893), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((23976, 24002), 'numpy.tile', 'np.tile', (['a_data', '(2, 1, 2)'], {}), '(a_data, (2, 1, 2))\n', (23983, 24002), True, 'import numpy as np\n'), ((24011, 24049), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24034, 24049), True, 'import numpy as np\n'), ((24068, 24094), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (24076, 24094), True, 'import numpy as np\n'), ((24107, 24131), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['b_data'], {'chunks': '(1)'}), '(b_data, chunks=1)\n', (24113, 24131), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((24145, 24155), 'mars.tensor.expressions.base.tile', 'tile', (['b', '(2)'], {}), '(b, 2)\n', (24149, 24155), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24238, 24256), 'numpy.tile', 'np.tile', (['b_data', '(2)'], {}), '(b_data, 2)\n', (24245, 24256), True, 'import numpy as np\n'), ((24265, 24303), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24288, 24303), True, 'import numpy as np\n'), ((24317, 24332), 'mars.tensor.expressions.base.tile', 'tile', (['b', '(2, 1)'], {}), '(b, (2, 1))\n', (24321, 24332), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24415, 24438), 'numpy.tile', 'np.tile', (['b_data', '(2, 1)'], {}), '(b_data, (2, 1))\n', (24422, 24438), True, 'import numpy as np\n'), ((24447, 24485), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24470, 24485), True, 'import numpy as np\n'), ((24504, 24526), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (24512, 24526), True, 'import numpy as np\n'), ((24539, 24563), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['c_data'], {'chunks': '(3)'}), '(c_data, chunks=3)\n', (24545, 24563), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((24577, 24592), 'mars.tensor.expressions.base.tile', 'tile', (['c', '(4, 1)'], {}), '(c, (4, 1))\n', (24581, 24592), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((24675, 24698), 'numpy.tile', 'np.tile', (['c_data', '(4, 1)'], {}), '(c_data, (4, 1))\n', (24682, 24698), True, 'import numpy as np\n'), ((24707, 24745), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (24730, 24745), True, 'import numpy as np\n'), ((24891, 24919), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_elements'], {}), '(element, test_elements)\n', (24895, 24919), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25070, 25108), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25093, 25108), True, 'import numpy as np\n'), ((25203, 25219), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (25211, 25219), True, 'import numpy as np\n'), ((25228, 25266), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25251, 25266), True, 'import numpy as np\n'), ((25283, 25324), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_elements'], {'invert': '(True)'}), '(element, test_elements, invert=True)\n', (25287, 25324), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25488, 25526), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25511, 25526), True, 'import numpy as np\n'), ((25621, 25637), 'numpy.array', 'np.array', (['[0, 6]'], {}), '([0, 6])\n', (25629, 25637), True, 'import numpy as np\n'), ((25646, 25684), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25669, 25684), True, 'import numpy as np\n'), ((25733, 25756), 'mars.tensor.expressions.base.isin', 'isin', (['element', 'test_set'], {}), '(element, test_set)\n', (25737, 25756), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((25902, 25940), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (25925, 25940), True, 'import numpy as np\n'), ((1630, 1665), 'numpy.array_equal', 'np.array_equal', (['res[0]', 'raw[:4, :4]'], {}), '(res[0], raw[:4, :4])\n', (1644, 1665), True, 'import numpy as np\n'), ((1691, 1726), 'numpy.array_equal', 'np.array_equal', (['res[1]', 'raw[:4, 4:]'], {}), '(res[1], raw[:4, 4:])\n', (1705, 1726), True, 'import numpy as np\n'), ((1752, 1788), 'numpy.array_equal', 'np.array_equal', (['res[2]', 'raw[4:8, :4]'], {}), '(res[2], raw[4:8, :4])\n', (1766, 1788), True, 'import numpy as np\n'), ((1814, 1850), 'numpy.array_equal', 'np.array_equal', (['res[3]', 'raw[4:8, 4:]'], {}), '(res[3], raw[4:8, 4:])\n', (1828, 1850), True, 'import numpy as np\n'), ((1876, 1911), 'numpy.array_equal', 'np.array_equal', (['res[4]', 'raw[8:, :4]'], {}), '(res[4], raw[8:, :4])\n', (1890, 1911), True, 'import numpy as np\n'), ((1937, 1972), 'numpy.array_equal', 'np.array_equal', (['res[5]', 'raw[8:, 4:]'], {}), '(res[5], raw[8:, 4:])\n', (1951, 1972), True, 'import numpy as np\n'), ((3055, 3084), 'numpy.array_equal', 'np.array_equal', (['res[0]', 'raw.T'], {}), '(res[0], raw.T)\n', (3069, 3084), True, 'import numpy as np\n'), ((5499, 5528), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (5522, 5528), True, 'import numpy as np\n'), ((5727, 5753), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_cond'], {'chunks': '(2)'}), '(raw_cond, chunks=2)\n', (5733, 5753), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5755, 5778), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_x'], {'chunks': '(2)'}), '(raw_x, chunks=2)\n', (5761, 5778), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((5780, 5803), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_y'], {'chunks': '(2)'}), '(raw_y, chunks=2)\n', (5786, 5803), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6015, 6062), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(4, 4)', 'dtype': '"""?"""'}), "(0, 2, size=(4, 4), dtype='?')\n", (6032, 6062), True, 'import numpy as np\n'), ((6176, 6202), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_cond'], {'chunks': '(2)'}), '(raw_cond, chunks=2)\n', (6182, 6202), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6204, 6227), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_x'], {'chunks': '(2)'}), '(raw_x, chunks=2)\n', (6210, 6227), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((6229, 6252), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['raw_y'], {'chunks': '(2)'}), '(raw_y, chunks=2)\n', (6235, 6252), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((8922, 8940), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(-5)'], {}), '(x, -5)\n', (8933, 8940), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((9000, 9017), 'mars.tensor.expressions.base.expand_dims', 'expand_dims', (['x', '(4)'], {}), '(x, 4)\n', (9011, 9017), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((10838, 10867), 'numpy.array_equal', 'np.array_equal', (['res', 'expected'], {}), '(res, expected)\n', (10852, 10867), True, 'import numpy as np\n'), ((11211, 11240), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11234, 11240), True, 'import numpy as np\n'), ((11550, 11579), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11573, 11579), True, 'import numpy as np\n'), ((11937, 11966), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (11960, 11966), True, 'import numpy as np\n'), ((12264, 12293), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (12287, 12293), True, 'import numpy as np\n'), ((12624, 12653), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (12647, 12653), True, 'import numpy as np\n'), ((12980, 13009), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (13003, 13009), True, 'import numpy as np\n'), ((13336, 13365), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['r', 'e'], {}), '(r, e)\n', (13359, 13365), True, 'import numpy as np\n'), ((13962, 13975), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (13971, 13975), True, 'import numpy as np\n'), ((16857, 16915), 'numpy.arange', 'np.arange', (['"""1066-10-13"""', '"""1066-10-16"""'], {'dtype': 'np.datetime64'}), "('1066-10-13', '1066-10-16', dtype=np.datetime64)\n", (16866, 16915), True, 'import numpy as np\n'), ((17629, 17651), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['data'], {'chunks': '(2)'}), '(data, chunks=2)\n', (17635, 17651), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((18960, 18990), 'scipy.sparse.random', 'sps.random', (['(10)', '(1)'], {'density': '(0.1)'}), '(10, 1, density=0.1)\n', (18970, 18990), True, 'import scipy.sparse as sps\n'), ((19485, 19500), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (19494, 19500), True, 'import numpy as np\n'), ((19563, 19586), 'mars.tensor.expressions.datasource.arange', 'arange', (['(1)', '(11)'], {'chunks': '(2)'}), '(1, 11, chunks=2)\n', (19569, 19586), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20267, 20301), 'numpy.array', 'np.array', (['[[0, 2], [1, 1], [2, 0]]'], {}), '([[0, 2], [1, 1], [2, 0]])\n', (20275, 20301), True, 'import numpy as np\n'), ((22681, 22694), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (22690, 22694), True, 'import numpy as np\n'), ((22804, 22817), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (22813, 22817), True, 'import numpy as np\n'), ((22897, 22917), 'mars.tensor.expressions.datasource.arange', 'arange', (['(20)'], {'chunks': '(5)'}), '(20, chunks=5)\n', (22903, 22917), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((23027, 23040), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (23036, 23040), True, 'import numpy as np\n'), ((5055, 5090), 'numpy.broadcast_to', 'np.broadcast_to', (['raw', '(5, 10, 5, 6)'], {}), '(raw, (5, 10, 5, 6))\n', (5070, 5090), True, 'import numpy as np\n'), ((5945, 5977), 'numpy.where', 'np.where', (['raw_cond', 'raw_x', 'raw_y'], {}), '(raw_cond, raw_x, raw_y)\n', (5953, 5977), True, 'import numpy as np\n'), ((8157, 8184), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(1)'], {}), '(raw_data, 1)\n', (8171, 8184), True, 'import numpy as np\n'), ((8325, 8352), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(0)'], {}), '(raw_data, 0)\n', (8339, 8352), True, 'import numpy as np\n'), ((8493, 8520), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(3)'], {}), '(raw_data, 3)\n', (8507, 8520), True, 'import numpy as np\n'), ((8662, 8690), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(-1)'], {}), '(raw_data, -1)\n', (8676, 8690), True, 'import numpy as np\n'), ((8832, 8860), 'numpy.expand_dims', 'np.expand_dims', (['raw_data', '(-4)'], {}), '(raw_data, -4)\n', (8846, 8860), True, 'import numpy as np\n'), ((9552, 9565), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (9560, 9565), True, 'import numpy as np\n'), ((9615, 9625), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (9622, 9625), True, 'import numpy as np\n'), ((9675, 9690), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (9682, 9690), True, 'import numpy as np\n'), ((9968, 9983), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (9976, 9983), True, 'import numpy as np\n'), ((10108, 10123), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (10115, 10123), True, 'import numpy as np\n'), ((10401, 10417), 'numpy.atleast_3d', 'np.atleast_3d', (['x'], {}), '(x)\n', (10414, 10417), True, 'import numpy as np\n'), ((10625, 10644), 'mars.tensor.expressions.datasource.arange', 'arange', (['(6)'], {'chunks': '(2)'}), '(6, chunks=2)\n', (10631, 10644), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10921, 10941), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (10927, 10941), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((11659, 11679), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (11665, 11679), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((12356, 12377), 'mars.tensor.expressions.datasource.arange', 'arange', (['(120)'], {'chunks': '(3)'}), '(120, chunks=3)\n', (12362, 12377), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((12716, 12736), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (12722, 12736), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((13072, 13092), 'mars.tensor.expressions.datasource.arange', 'arange', (['(48)'], {'chunks': '(3)'}), '(48, chunks=3)\n', (13078, 13092), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((15187, 15206), 'mars.tensor.expressions.datasource.arange', 'arange', (['(4)'], {'chunks': '(1)'}), '(4, chunks=1)\n', (15193, 15206), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((17501, 17519), 'numpy.array', 'np.array', (['[88, 99]'], {}), '([88, 99])\n', (17509, 17519), True, 'import numpy as np\n'), ((19596, 19623), 'mars.tensor.expressions.datasource.arange', 'arange', (['(10)', '(0)', '(-1)'], {'chunks': '(2)'}), '(10, 0, -1, chunks=2)\n', (19602, 19623), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19801, 19820), 'mars.tensor.expressions.datasource.arange', 'arange', (['(6)'], {'chunks': '(2)'}), '(6, chunks=2)\n', (19807, 19820), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((19879, 19915), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[1.0 / 4, 3.0 / 4]'], {'chunks': '(2)'}), '([1.0 / 4, 3.0 / 4], chunks=2)\n', (19885, 19915), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((20722, 20728), 'mars.tensor.expressions.base.cov', 'cov', (['X'], {}), '(X)\n', (20725, 20728), False, 'from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, flip, flipud, fliplr, repeat, tile, isin\n'), ((21198, 21217), 'mars.tensor.expressions.datasource.arange', 'arange', (['(8)'], {'chunks': '(2)'}), '(8, chunks=2)\n', (21204, 21217), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((9246, 9267), 'numpy.ones', 'np.ones', (['(3, 4, 5, 6)'], {}), '((3, 4, 5, 6))\n', (9253, 9267), True, 'import numpy as np\n'), ((10047, 10057), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10054, 10057), True, 'import numpy as np\n'), ((10481, 10491), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (10488, 10491), True, 'import numpy as np\n'), ((10556, 10571), 'numpy.ones', 'np.ones', (['(3, 4)'], {}), '((3, 4))\n', (10563, 10571), True, 'import numpy as np\n'), ((11109, 11122), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11118, 11122), True, 'import numpy as np\n'), ((11436, 11449), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11445, 11449), True, 'import numpy as np\n'), ((11835, 11848), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (11844, 11848), True, 'import numpy as np\n'), ((12150, 12163), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (12159, 12163), True, 'import numpy as np\n'), ((12528, 12542), 'numpy.arange', 'np.arange', (['(120)'], {}), '(120)\n', (12537, 12542), True, 'import numpy as np\n'), ((12886, 12899), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (12895, 12899), True, 'import numpy as np\n'), ((13242, 13255), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (13251, 13255), True, 'import numpy as np\n'), ((14172, 14185), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14181, 14185), True, 'import numpy as np\n'), ((14374, 14387), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14383, 14387), True, 'import numpy as np\n'), ((14584, 14597), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (14593, 14597), True, 'import numpy as np\n'), ((15338, 15350), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15347, 15350), True, 'import numpy as np\n'), ((15538, 15550), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15547, 15550), True, 'import numpy as np\n'), ((15717, 15729), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (15726, 15729), True, 'import numpy as np\n'), ((20004, 20016), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (20013, 20016), True, 'import numpy as np\n'), ((20187, 20223), 'mars.tensor.expressions.datasource.tensor', 'tensor', (['[1.0 / 4, 3.0 / 4]'], {'chunks': '(2)'}), '([1.0 / 4, 3.0 / 4], chunks=2)\n', (20193, 20223), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((21351, 21363), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21360, 21363), True, 'import numpy as np\n'), ((21546, 21558), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21555, 21558), True, 'import numpy as np\n'), ((21742, 21754), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21751, 21754), True, 'import numpy as np\n'), ((21935, 21947), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (21944, 21947), True, 'import numpy as np\n'), ((24802, 24821), 'mars.tensor.expressions.datasource.arange', 'arange', (['(4)'], {'chunks': '(1)'}), '(4, chunks=1)\n', (24808, 24821), False, 'from mars.tensor.expressions.datasource import tensor, ones, zeros, arange\n'), ((10781, 10793), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (10790, 10793), True, 'import numpy as np\n'), ((25017, 25029), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25026, 25029), True, 'import numpy as np\n'), ((25422, 25434), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25431, 25434), True, 'import numpy as np\n'), ((25854, 25866), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (25863, 25866), True, 'import numpy as np\n')]
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
def _prepare_pdf_data(means, stds, range, norm=True):
x_vals = np.linspace(range[0], range[1], 250)
y_vals = np.zeros_like(x_vals)
# create and sum all PDF of stellar abundances
for d_m, d_s in zip(means, stds):
if np.isfinite([d_m, d_s]).all():
y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s)
# return normalized summed pdf of all stars
if norm and np.nansum(y_vals) > 0.:
y_vals = 1. * y_vals/np.nanmax(y_vals)
return x_vals, y_vals
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.nanmax(heights)
return edges[:-1], heights, width
def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high):
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def fit_abund_trend(p_data, a_data,
steps=3, sigma_low=2.5, sigma_high=2.5,
order=5, window=10, n_min_perc=10.,func='poly'):
idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data))
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
if data_len <= order + 1:
return None, None
p_offset = np.nanmedian(p_data)
for i_f in range(steps): # number of sigma clipping steps
if func == 'cheb':
coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window)
f_data = splev(p_data - p_offset, coef)
idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
a_std = np.nanstd(a_data - f_data)
return [coef, p_offset], a_std
def eval_abund_trend(p_data, m_data, func='poly'):
coef, p_offset = m_data
if func == 'cheb':
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
f_data = splev(p_data - p_offset, coef)
return f_data
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1],
lw=0, s=3, color='C2', label='Field')
sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2],
order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10.,
func='poly')
if np.sum(idx_u5) > 0:
sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5])
else:
ax[-1, -3].legend(handles=[sl1, sl1, sl3])
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
x_p = -2
y_p = -1
ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.])
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3)
# plt.show()
plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250)
plt.close(fig)
chdir('..')
|
[
"numpy.polyfit",
"numpy.array",
"numpy.isfinite",
"numpy.poly1d",
"numpy.histogram",
"numpy.polynomial.chebyshev.chebval",
"numpy.polynomial.legendre.legfit",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.nanmax",
"glob.glob",
"numpy.abs",
"getopt.getopt",
"numpy.nanstd",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"numpy.in1d",
"scipy.stats.norm.pdf",
"numpy.nansum",
"warnings.filterwarnings",
"numpy.polynomial.chebyshev.chebfit",
"astropy.table.Table.read",
"matplotlib.pyplot.subplots_adjust",
"numpy.polynomial.legendre.legval",
"numpy.logical_and",
"numpy.nanmedian",
"numpy.logical_or",
"os.chdir",
"numpy.sum",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((323, 356), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (346, 356), False, 'import warnings\n'), ((4237, 4302), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'clusters/Cantat-Gaudin_2018/members.fits')"], {}), "(data_dir + 'clusters/Cantat-Gaudin_2018/members.fits')\n", (4247, 4302), False, 'from astropy.table import Table, join\n'), ((4314, 4390), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'clusters/cluster_tails/members_open_gaia_tails.fits')"], {}), "(data_dir + 'clusters/cluster_tails/members_open_gaia_tails.fits')\n", (4324, 4390), False, 'from astropy.table import Table, join\n'), ((4513, 4580), 'numpy.in1d', 'np.in1d', (["tails_data['source_id']", "CG_data['source_id']"], {'invert': '(True)'}), "(tails_data['source_id'], CG_data['source_id'], invert=True)\n", (4520, 4580), True, 'import numpy as np\n'), ((5027, 5051), 'os.chdir', 'chdir', (['data_dir_clusters'], {}), '(data_dir_clusters)\n', (5032, 5051), False, 'from os import chdir, system\n'), ((5071, 5103), 'glob.glob', 'glob', (['"""Cluster_orbits_GaiaDR2_*"""'], {}), "('Cluster_orbits_GaiaDR2_*')\n", (5075, 5103), False, 'from glob import glob\n'), ((426, 462), 'numpy.linspace', 'np.linspace', (['range[0]', 'range[1]', '(250)'], {}), '(range[0], range[1], 250)\n', (437, 462), True, 'import numpy as np\n'), ((476, 497), 'numpy.zeros_like', 'np.zeros_like', (['x_vals'], {}), '(x_vals)\n', (489, 497), True, 'import numpy as np\n'), ((929, 968), 'numpy.histogram', 'np.histogram', (['d'], {'bins': 'bins', 'range': 'range'}), '(d, bins=bins, range=range)\n', (941, 968), True, 'import numpy as np\n'), ((981, 1008), 'numpy.abs', 'np.abs', (['(edges[0] - edges[1])'], {}), '(edges[0] - edges[1])\n', (987, 1008), True, 'import numpy as np\n'), ((1255, 1275), 'numpy.nanstd', 'np.nanstd', (['diff[idx]'], {}), '(diff[idx])\n', (1264, 1275), True, 'import numpy as np\n'), ((1332, 1411), 'numpy.logical_or', 'np.logical_or', (['(diff < -1.0 * std_diff * sigma_low)', '(diff > std_diff * sigma_high)'], {}), '(diff < -1.0 * std_diff * sigma_low, diff > std_diff * sigma_high)\n', (1345, 1411), True, 'import numpy as np\n'), ((1458, 1491), 'numpy.logical_and', 'np.logical_and', (['idx', '(~idx_outlier)'], {}), '(idx, ~idx_outlier)\n', (1472, 1491), True, 'import numpy as np\n'), ((1746, 1761), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (1752, 1761), True, 'import numpy as np\n'), ((1787, 1802), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (1793, 1802), True, 'import numpy as np\n'), ((1874, 1894), 'numpy.nanmedian', 'np.nanmedian', (['p_data'], {}), '(p_data)\n', (1886, 1894), True, 'import numpy as np\n'), ((3046, 3072), 'numpy.nanstd', 'np.nanstd', (['(a_data - f_data)'], {}), '(a_data - f_data)\n', (3055, 3072), True, 'import numpy as np\n'), ((3829, 3895), 'getopt.getopt', 'getopt', (['argv[1:]', '""""""', "['dr3=', 'suffix=', 'flags=', 'individual=']"], {}), "(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])\n", (3835, 3895), False, 'from getopt import getopt\n'), ((4794, 4846), 'astropy.table.Table.read', 'Table.read', (["(data_dir + 'GALAH_iDR3_main_191213.fits')"], {}), "(data_dir + 'GALAH_iDR3_main_191213.fits')\n", (4804, 4846), False, 'from astropy.table import Table, join\n'), ((5109, 5127), 'os.chdir', 'chdir', (['cluster_dir'], {}), '(cluster_dir)\n', (5114, 5127), False, 'from os import chdir, system\n'), ((5199, 5208), 'glob.glob', 'glob', (['"""*"""'], {}), "('*')\n", (5203, 5208), False, 'from glob import glob\n'), ((14816, 14827), 'os.chdir', 'chdir', (['""".."""'], {}), "('..')\n", (14821, 14827), False, 'from os import chdir, system\n'), ((1689, 1708), 'numpy.isfinite', 'np.isfinite', (['p_data'], {}), '(p_data)\n', (1700, 1708), True, 'import numpy as np\n'), ((1710, 1729), 'numpy.isfinite', 'np.isfinite', (['a_data'], {}), '(a_data)\n', (1721, 1729), True, 'import numpy as np\n'), ((2824, 2839), 'numpy.sum', 'np.sum', (['idx_fit'], {}), '(idx_fit)\n', (2830, 2839), True, 'import numpy as np\n'), ((3230, 3286), 'numpy.polynomial.chebyshev.chebval', 'np.polynomial.chebyshev.chebval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (3261, 3286), True, 'import numpy as np\n'), ((3328, 3382), 'numpy.polynomial.legendre.legval', 'np.polynomial.legendre.legval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (3357, 3382), True, 'import numpy as np\n'), ((5740, 5754), 'os.chdir', 'chdir', (['sub_dir'], {}), '(sub_dir)\n', (5745, 5754), False, 'from os import chdir, system\n'), ((7540, 7551), 'os.chdir', 'chdir', (['""".."""'], {}), "('..')\n", (7545, 7551), False, 'from os import chdir, system\n'), ((651, 693), 'scipy.stats.norm.pdf', 'gauss_norm.pdf', (['x_vals'], {'loc': 'd_m', 'scale': 'd_s'}), '(x_vals, loc=d_m, scale=d_s)\n', (665, 693), True, 'from scipy.stats import norm as gauss_norm\n'), ((758, 775), 'numpy.nansum', 'np.nansum', (['y_vals'], {}), '(y_vals)\n', (767, 775), True, 'import numpy as np\n'), ((811, 828), 'numpy.nanmax', 'np.nanmax', (['y_vals'], {}), '(y_vals)\n', (820, 828), True, 'import numpy as np\n'), ((1053, 1071), 'numpy.nanmax', 'np.nanmax', (['heights'], {}), '(heights)\n', (1062, 1071), True, 'import numpy as np\n'), ((2005, 2092), 'numpy.polynomial.chebyshev.chebfit', 'np.polynomial.chebyshev.chebfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit],\n order)\n', (2036, 2092), True, 'import numpy as np\n'), ((2110, 2166), 'numpy.polynomial.chebyshev.chebval', 'np.polynomial.chebyshev.chebval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (2141, 2166), True, 'import numpy as np\n'), ((2214, 2299), 'numpy.polynomial.legendre.legfit', 'np.polynomial.legendre.legfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit],\n order)\n', (2243, 2299), True, 'import numpy as np\n'), ((2317, 2371), 'numpy.polynomial.legendre.legval', 'np.polynomial.legendre.legval', (['(p_data - p_offset)', 'coef'], {}), '(p_data - p_offset, coef)\n', (2346, 2371), True, 'import numpy as np\n'), ((2418, 2480), 'numpy.polyfit', 'np.polyfit', (['(p_data[idx_fit] - p_offset)', 'a_data[idx_fit]', 'order'], {}), '(p_data[idx_fit] - p_offset, a_data[idx_fit], order)\n', (2428, 2480), True, 'import numpy as np\n'), ((3423, 3438), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (3432, 3438), True, 'import numpy as np\n'), ((5501, 5523), 'numpy.array', 'np.array', (['source_id_cg'], {}), '(source_id_cg)\n', (5509, 5523), True, 'import numpy as np\n'), ((5578, 5602), 'numpy.array', 'np.array', (['source_id_tail'], {}), '(source_id_tail)\n', (5586, 5602), True, 'import numpy as np\n'), ((5790, 5858), 'astropy.table.Table.read', 'Table.read', (['"""members_init_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('members_init_galah.csv', format='ascii', delimiter='\\t')\n", (5800, 5858), False, 'from astropy.table import Table, join\n'), ((5882, 5936), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_init['source_id']"], {}), "(cannon_data['source_id'], g_init['source_id'])\n", (5889, 5936), True, 'import numpy as np\n'), ((6046, 6118), 'astropy.table.Table.read', 'Table.read', (['"""possible_ejected-step1.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_ejected-step1.csv', format='ascii', delimiter='\\t')\n", (6056, 6118), False, 'from astropy.table import Table, join\n'), ((6138, 6216), 'astropy.table.Table.read', 'Table.read', (['"""possible_ejected-step1_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_ejected-step1_galah.csv', format='ascii', delimiter='\\t')\n", (6148, 6216), False, 'from astropy.table import Table, join\n'), ((6715, 6767), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_in['source_id']"], {}), "(cannon_data['source_id'], g_in['source_id'])\n", (6722, 6767), True, 'import numpy as np\n'), ((7097, 7175), 'astropy.table.Table.read', 'Table.read', (['"""possible_outside-step1_galah.csv"""'], {'format': '"""ascii"""', 'delimiter': '"""\t"""'}), "('possible_outside-step1_galah.csv', format='ascii', delimiter='\\t')\n", (7107, 7175), False, 'from astropy.table import Table, join\n'), ((7406, 7459), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "g_out['source_id']"], {}), "(cannon_data['source_id'], g_out['source_id'])\n", (7413, 7459), True, 'import numpy as np\n'), ((9017, 9071), 'matplotlib.pyplot.subplots', 'plt.subplots', (['y_cols_fig', 'x_cols_fig'], {'figsize': '(15, 10)'}), '(y_cols_fig, x_cols_fig, figsize=(15, 10))\n', (9029, 9071), True, 'import matplotlib.pyplot as plt\n'), ((11756, 11790), 'numpy.isfinite', 'np.isfinite', (['cannon_data[teff_col]'], {}), '(cannon_data[teff_col])\n', (11767, 11790), True, 'import numpy as np\n'), ((11956, 11988), 'numpy.logical_and', 'np.logical_and', (['idx_out', 'idx_val'], {}), '(idx_out, idx_val)\n', (11970, 11988), True, 'import numpy as np\n'), ((12010, 12043), 'numpy.logical_and', 'np.logical_and', (['idx_init', 'idx_val'], {}), '(idx_init, idx_val)\n', (12024, 12043), True, 'import numpy as np\n'), ((12065, 12096), 'numpy.logical_and', 'np.logical_and', (['idx_in', 'idx_val'], {}), '(idx_in, idx_val)\n', (12079, 12096), True, 'import numpy as np\n'), ((12118, 12151), 'numpy.logical_and', 'np.logical_and', (['idx_tail', 'idx_val'], {}), '(idx_tail, idx_val)\n', (12132, 12151), True, 'import numpy as np\n'), ((14564, 14658), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.97)', 'bottom': '(0.02)', 'left': '(0.04)', 'right': '(0.98)', 'hspace': '(0.3)', 'wspace': '(0.3)'}), '(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=\n 0.3, wspace=0.3)\n', (14583, 14658), True, 'import matplotlib.pyplot as plt\n'), ((14691, 14787), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png')"], {'dpi': '(250)'}), "('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix +\n '.png', dpi=250)\n", (14702, 14787), True, 'import matplotlib.pyplot as plt\n'), ((14796, 14810), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14805, 14810), True, 'import matplotlib.pyplot as plt\n'), ((598, 621), 'numpy.isfinite', 'np.isfinite', (['[d_m, d_s]'], {}), '([d_m, d_s])\n', (609, 621), True, 'import numpy as np\n'), ((2502, 2517), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (2511, 2517), True, 'import numpy as np\n'), ((6312, 6404), 'numpy.logical_and', 'np.logical_and', (["(g_in_all['time_in_cluster'] >= 1.0)", "(g_in_all['in_cluster_prob'] >= 68.0)"], {}), "(g_in_all['time_in_cluster'] >= 1.0, g_in_all[\n 'in_cluster_prob'] >= 68.0)\n", (6326, 6404), True, 'import numpy as np\n'), ((6576, 6655), 'numpy.logical_and', 'np.logical_and', (["(g_in['time_in_cluster'] >= 1.0)", "(g_in['in_cluster_prob'] >= 68.0)"], {}), "(g_in['time_in_cluster'] >= 1.0, g_in['in_cluster_prob'] >= 68.0)\n", (6590, 6655), True, 'import numpy as np\n'), ((7265, 7341), 'numpy.logical_and', 'np.logical_and', (["(g_out['time_in_cluster'] <= 0)", "(g_out['in_cluster_prob'] <= 0)"], {}), "(g_out['time_in_cluster'] <= 0, g_out['in_cluster_prob'] <= 0)\n", (7279, 7341), True, 'import numpy as np\n'), ((7564, 7580), 'numpy.sum', 'np.sum', (['idx_init'], {}), '(idx_init)\n', (7570, 7580), True, 'import numpy as np\n'), ((7589, 7603), 'numpy.sum', 'np.sum', (['idx_in'], {}), '(idx_in)\n', (7595, 7603), True, 'import numpy as np\n'), ((7612, 7627), 'numpy.sum', 'np.sum', (['idx_out'], {}), '(idx_out)\n', (7618, 7627), True, 'import numpy as np\n'), ((9449, 9478), 'numpy.isfinite', 'np.isfinite', (['cannon_data[col]'], {}), '(cannon_data[col])\n', (9460, 9478), True, 'import numpy as np\n'), ((9613, 9645), 'numpy.logical_and', 'np.logical_and', (['idx_out', 'idx_val'], {}), '(idx_out, idx_val)\n', (9627, 9645), True, 'import numpy as np\n'), ((9671, 9704), 'numpy.logical_and', 'np.logical_and', (['idx_init', 'idx_val'], {}), '(idx_init, idx_val)\n', (9685, 9704), True, 'import numpy as np\n'), ((9730, 9761), 'numpy.logical_and', 'np.logical_and', (['idx_in', 'idx_val'], {}), '(idx_in, idx_val)\n', (9744, 9761), True, 'import numpy as np\n'), ((9787, 9823), 'numpy.logical_and', 'np.logical_and', (['idx_cg_memb', 'idx_val'], {}), '(idx_cg_memb, idx_val)\n', (9801, 9823), True, 'import numpy as np\n'), ((9849, 9882), 'numpy.logical_and', 'np.logical_and', (['idx_tail', 'idx_val'], {}), '(idx_tail, idx_val)\n', (9863, 9882), True, 'import numpy as np\n'), ((11841, 11890), 'numpy.logical_and', 'np.logical_and', (['idx_val', '(cannon_data[q_flag] == 0)'], {}), '(idx_val, cannon_data[q_flag] == 0)\n', (11855, 11890), True, 'import numpy as np\n'), ((12957, 12971), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (12963, 12971), True, 'import numpy as np\n'), ((13352, 13366), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (13358, 13366), True, 'import numpy as np\n'), ((13368, 13382), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (13374, 13382), True, 'import numpy as np\n'), ((13384, 13398), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (13390, 13398), True, 'import numpy as np\n'), ((14097, 14111), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (14103, 14111), True, 'import numpy as np\n'), ((14344, 14358), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (14350, 14358), True, 'import numpy as np\n'), ((14360, 14374), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (14366, 14374), True, 'import numpy as np\n'), ((14376, 14390), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (14382, 14390), True, 'import numpy as np\n'), ((6875, 6930), 'numpy.in1d', 'np.in1d', (["cannon_data['source_id']", "CG_data['source_id']"], {}), "(cannon_data['source_id'], CG_data['source_id'])\n", (6882, 6930), True, 'import numpy as np\n'), ((9537, 9586), 'numpy.logical_and', 'np.logical_and', (['idx_val', '(cannon_data[q_flag] == 0)'], {}), '(idx_val, cannon_data[q_flag] == 0)\n', (9551, 9586), True, 'import numpy as np\n'), ((11052, 11066), 'numpy.sum', 'np.sum', (['idx_u5'], {}), '(idx_u5)\n', (11058, 11066), True, 'import numpy as np\n'), ((11395, 11409), 'numpy.sum', 'np.sum', (['idx_u1'], {}), '(idx_u1)\n', (11401, 11409), True, 'import numpy as np\n'), ((11411, 11425), 'numpy.sum', 'np.sum', (['idx_u2'], {}), '(idx_u2)\n', (11417, 11425), True, 'import numpy as np\n'), ((11427, 11441), 'numpy.sum', 'np.sum', (['idx_u3'], {}), '(idx_u3)\n', (11433, 11441), True, 'import numpy as np\n'), ((11125, 11155), 'numpy.logical_and', 'np.logical_and', (['idx_u3', 'idx_u5'], {}), '(idx_u3, idx_u5)\n', (11139, 11155), True, 'import numpy as np\n')]
|
import argparse
import multiprocessing
import os
import random
import numpy as np
from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR
from models import SumOfBetaEce
random.seed(2020)
num_cores = multiprocessing.cpu_count()
NUM_BINS = 10
NUM_RUNS = 100
N_list = [100, 200, 500, 1000, 2000, 5000, 10000]
OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/"
def main(args) -> None:
# load data
categories, observations, confidences, idx2category, category2idx, labels = prepare_data(
DATAFILE_LIST[args.dataset], False)
# train a ground_truth ece model
if args.ground_truth_type == 'bayesian':
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
else:
ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=1e-3)
ground_truth_model.update_batch(confidences, observations)
results = np.zeros((args.num_runs, len(N_list), 5))
for run_id in range(args.num_runs):
tmp = list(zip(confidences, observations))
random.shuffle(tmp)
confidences, observations = zip(*tmp)
model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount)
for i in range(len(N_list)):
tmp = 0 if i == 0 else N_list[i - 1]
model.update_batch(confidences[tmp: N_list[i]], observations[tmp: N_list[i]])
results[run_id, i, 0] = N_list[i]
results[run_id, i, 1] = model.eval
results[run_id, i, 2] = model.frequentist_eval
results[run_id, i, 3] = model.calibration_estimation_error(ground_truth_model, args.weight_type)
results[run_id, i, 4] = model.frequentist_calibration_estimation_error(ground_truth_model, args.weight_type)
results_mean = np.mean(results, axis=0)
results_variance = np.std(results, axis=0)
if args.weight_type == 'online':
OUTPUT_DIR += "online_weights/"
try:
os.stat(OUTPUT_DIR)
except:
os.mkdir(OUTPUT_DIR)
if args.ground_truth_type == 'frequentist':
filename_mean = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
else:
filename_mean = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount)
filename_std = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d_std.csv" % (
args.dataset, args.pseudocount)
header = 'N, bayesian_ece, frequentist_ece, bayesian_estimation_error, frequentist_estimation_error'
np.savetxt(filename_mean, results_mean, delimiter=',', header=header)
np.savetxt(filename_std, results_variance, delimiter=',', header=header)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, default='cifar100', help='input dataset')
parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior')
parser.add_argument('-ground_truth_type', type=str, default='bayesian',
help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist')
parser.add_argument('-weight_type', type=str, default='pool',
help='weigh each bin with all data or only data seen so far, online or pool')
parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs')
parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram')
args, _ = parser.parse_known_args()
if args.dataset not in DATASET_LIST:
raise ValueError("%s is not in DATASET_LIST." % args.dataset)
main(args)
|
[
"data_utils.prepare_data",
"numpy.mean",
"random.shuffle",
"argparse.ArgumentParser",
"multiprocessing.cpu_count",
"random.seed",
"os.mkdir",
"numpy.savetxt",
"numpy.std",
"os.stat",
"models.SumOfBetaEce"
] |
[((195, 212), 'random.seed', 'random.seed', (['(2020)'], {}), '(2020)\n', (206, 212), False, 'import random\n'), ((225, 252), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (250, 252), False, 'import multiprocessing\n'), ((517, 565), 'data_utils.prepare_data', 'prepare_data', (['DATAFILE_LIST[args.dataset]', '(False)'], {}), '(DATAFILE_LIST[args.dataset], False)\n', (529, 565), False, 'from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR\n'), ((1798, 1822), 'numpy.mean', 'np.mean', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (1805, 1822), True, 'import numpy as np\n'), ((1846, 1869), 'numpy.std', 'np.std', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (1852, 1869), True, 'import numpy as np\n'), ((2701, 2770), 'numpy.savetxt', 'np.savetxt', (['filename_mean', 'results_mean'], {'delimiter': '""","""', 'header': 'header'}), "(filename_mean, results_mean, delimiter=',', header=header)\n", (2711, 2770), True, 'import numpy as np\n'), ((2775, 2847), 'numpy.savetxt', 'np.savetxt', (['filename_std', 'results_variance'], {'delimiter': '""","""', 'header': 'header'}), "(filename_std, results_variance, delimiter=',', header=header)\n", (2785, 2847), True, 'import numpy as np\n'), ((2891, 2916), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2914, 2916), False, 'import argparse\n'), ((686, 752), 'models.SumOfBetaEce', 'SumOfBetaEce', ([], {'num_bins': 'args.num_bins', 'pseudocount': 'args.pseudocount'}), '(num_bins=args.num_bins, pseudocount=args.pseudocount)\n', (698, 752), False, 'from models import SumOfBetaEce\n'), ((792, 847), 'models.SumOfBetaEce', 'SumOfBetaEce', ([], {'num_bins': 'args.num_bins', 'pseudocount': '(0.001)'}), '(num_bins=args.num_bins, pseudocount=0.001)\n', (804, 847), False, 'from models import SumOfBetaEce\n'), ((1068, 1087), 'random.shuffle', 'random.shuffle', (['tmp'], {}), '(tmp)\n', (1082, 1087), False, 'import random\n'), ((1151, 1217), 'models.SumOfBetaEce', 'SumOfBetaEce', ([], {'num_bins': 'args.num_bins', 'pseudocount': 'args.pseudocount'}), '(num_bins=args.num_bins, pseudocount=args.pseudocount)\n', (1163, 1217), False, 'from models import SumOfBetaEce\n'), ((1965, 1984), 'os.stat', 'os.stat', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (1972, 1984), False, 'import os\n'), ((2005, 2025), 'os.mkdir', 'os.mkdir', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (2013, 2025), False, 'import os\n')]
|
from sigvisa.learn.train_coda_models import get_shape_training_data
import numpy as np
X, y, evids = get_shape_training_data(runid=4, site="AS12", chan="SHZ", band="freq_2.0_3.0", phases=["P",], target="amp_transfer", max_acost=np.float("inf"), min_amp=-2)
np.savetxt("X.txt", X)
np.savetxt("y.txt", y)
np.savetxt("evids.txt", evids)
|
[
"numpy.float",
"numpy.savetxt"
] |
[((258, 280), 'numpy.savetxt', 'np.savetxt', (['"""X.txt"""', 'X'], {}), "('X.txt', X)\n", (268, 280), True, 'import numpy as np\n'), ((281, 303), 'numpy.savetxt', 'np.savetxt', (['"""y.txt"""', 'y'], {}), "('y.txt', y)\n", (291, 303), True, 'import numpy as np\n'), ((304, 334), 'numpy.savetxt', 'np.savetxt', (['"""evids.txt"""', 'evids'], {}), "('evids.txt', evids)\n", (314, 334), True, 'import numpy as np\n'), ((229, 244), 'numpy.float', 'np.float', (['"""inf"""'], {}), "('inf')\n", (237, 244), True, 'import numpy as np\n')]
|
import functools
import numpy as np
import math
import argparse
import ags_solver
import go_problems
import nlopt
import sys
from Simple import SimpleTuner
import itertools
from scipy.spatial import Delaunay
from scipy.optimize import differential_evolution
from scipy.optimize import basinhopping
from sdaopt import sda
from stochopy import Evolutionary
from pyOpt import Optimization
from pyOpt import MIDACO
import pyOpt
from shgo import shgo
from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass
from benchmark_tools.plot import plot_cmcs
from benchmark_tools.stats import save_stats, compute_stats
class AGSWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01, mixedFast=False):
params = self.class_name2params(class_name)
params.mixedFastMode = mixedFast
if dist_stop:
params.eps = 0
params.itersLimit = max_iters
self.solver = ags_solver.Solver()
self.solver.SetParameters(params)
self.dist_stop = dist_stop
self.eps = eps
def class_name2params(self, name):
params = ags_solver.Parameters()
if 'grish' in name:
params.r = 3
elif 'gklss2' in name:
params.r = 4.6
elif 'gklsh2' in name:
params.r = 6.5
elif 'gklss3' in name:
params.r = 3.7
elif 'gklsh3' in name:
params.r = 4.4
elif 'gklss4' in name:
params.r = 4.7
elif 'gklsh4' in name:
params.r = 4.9
elif 'gklss5' in name:
params.r = 4
params.evolventDensity = 10
elif 'gklsh5' in name:
params.r = 4
params.evolventDensity = 10
return params
def Solve(self, problem):
self.solver.SetProblem([lambda x: problem.Calculate(x)], *problem.GetBounds())
#self.solver.SetProblem(problem)
if not self.dist_stop:
point, val, idx = self.solver.Solve()
else:
opt_pt = np.array(problem.GetOptimumPoint())
point, val, idx = self.solver.Solve(lambda x: np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
#calcCounters = self.solver.GetCalculationsStatistics()
calcCounters = problem.GetCalculationsStatistics()
return point, val, calcCounters
class SDAWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
ret = sda(lambda x: problem.Calculate(x), None, bounds=list(zip(lb, ub)), \
seed=100, maxfun=self.max_iters, visit=2.72, maxiter=self.max_iters)
n_evals = problem.GetCalculationsStatistics()
return ret.x, ret.fun, n_evals
class SCBasinhoppingWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def Solve(self, problem):
lb, ub = problem.GetBounds()
#pop_size = self.class_name2params(self.class_name)
class MyBounds(object):
def __init__(self, xmax=[1.1,1.1], xmin=[-1.1,-1.1] ):
self.xmax = np.array(xmax)
self.xmin = np.array(xmin)
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
return tmax and tmin
x0 = [.5]*problem.GetDimension()
result = \
basinhopping(lambda x: problem.Calculate(x), x0, accept_test=MyBounds(ub, lb), seed=100, T=10, stepsize=0.3)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class SCDEWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.class_name = class_name
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
bounds = [(l, u) for l, u in zip(lb, ub)]
pop_size = self.class_name2params(self.class_name)
result = \
differential_evolution(
lambda x: problem.Calculate(x), bounds, mutation=(1.1,1.9),
tol=1e-12, maxiter=int(float(self.max_iters) / (pop_size*problem.GetDimension())), popsize=pop_size, disp=False, seed=100)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
class PyEvolveWrapper(Solver):
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
# Genome instance
genome = G1DList.G1DList(2)
genome.setParams(rangemin=lb[0], rangemax=ub[0], bestRawScore=-100, roundDecimal=2)
genome.initializator.set(Initializators.G1DListInitializatorReal)
genome.mutator.set(Mutators.G1DListMutatorRealGaussian)
# The evaluator function (objective function)
genome.evaluator.set(lambda x: problem.Calculate(x) + 100)
# Genetic Algorithm Instance
ga = GSimpleGA.GSimpleGA(genome)
ga.selector.set(Selectors.GRouletteWheel)
ga.minimax = Consts.minimaxType["minimize"]
ga.setGenerations(5000)
ga.setMutationRate(0.05)
ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.evolve(freq_stats=100)
# Best individual
best = ga.bestIndividual()
print ("\nBest individual score: %.2f" % (best.score - 100,))
print (best)
from bayes_opt import BayesianOptimization
class BOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
def Solve(self, problem):
lb, ub = problem.GetBounds()
bo = BayesianOptimization(lambda x, y: -problem.Calculate([x, y]),
{'x': (lb[0], ub[0]), 'y': (lb[1], ub[1])})
bo.maximize(init_points=5, n_iter=20, kappa=1.5)
n_evals = problem.GetCalculationsStatistics()
opt_val = -bo.res['max']['max_val']
opt_point = [bo.res['max']['max_params']['x'], bo.res['max']['max_params']['y']]
return opt_point, opt_val, n_evals
class SimpleWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.exploration = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
return 0.1
elif 'gklss2' in name:
return 0.15
elif 'gklsh2' in name:
return 0.15
elif 'gklss3' in name:
return 0.15
elif 'gklsh3' in name:
return 0.25
elif 'gklss4' in name:
return 0.2
elif 'gklsh4' in name:
return 0.25
def Solve(self, problem):
objective_function = lambda x: -problem.Calculate(x)
lb, ub = problem.GetBounds()
opt_pt = problem.GetOptimumPoint()
bounds = [[l, u] for l, u in zip(lb, ub)]
points = np.array([point for point in itertools.product(*bounds)])
tri = Delaunay(points)
optimization_domain_vertices = points[tri.simplices]
exploration = self.exploration # optional, default 0.15
tuner = SimpleTuner(optimization_domain_vertices, objective_function, \
exploration_preference=exploration,
stop_criterion=lambda x:np.linalg.norm(np.array(x)-opt_pt, np.inf) < self.eps)
tuner.optimize(self.max_iters)
opt_val, opt_point = tuner.get_best()
#tuner.plot() # only works in 2D
n_evals = problem.GetCalculationsStatistics()
return opt_point, -opt_val, n_evals
class NLOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, method=nlopt.GD_STOGO, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.method = method
self.max_iters = max_iters
self.pop_size = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 150
elif 'gklss2' in name:
popsize = 200
elif 'gklsh2' in name:
popsize = 400
elif 'gklss3' in name:
popsize = 1000
elif 'gklsh3' in name:
popsize = 2000
elif 'gklss4' in name:
popsize = 8000
elif 'gklsh4' in name:
popsize = 16000
elif 'gklss5' in name:
popsize = 25000
elif 'gklsh5' in name:
popsize = 30000
return popsize
def Solve(self, problem):
lb, ub = problem.GetBounds()
self.opt = nlopt.opt(self.method, problem.GetDimension())
self.opt.set_local_optimizer(nlopt.opt(nlopt.LN_SBPLX, problem.GetDimension()))
self.opt.set_lower_bounds(lb)
self.opt.set_upper_bounds(ub)
self.opt.set_min_objective(lambda x, grad: problem.Calculate(x))
self.opt.set_maxeval(self.max_iters)
self.opt.set_xtol_rel(1e-13)
if self.method == nlopt.GN_CRS2_LM:
self.opt.set_population(self.pop_size)
x = self.opt.optimize([.5]*problem.GetDimension())
minf = self.opt.last_optimum_value()
n_evals = problem.GetCalculationsStatistics()
return x, minf, n_evals
class StochOpyWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
self.popsize = self.class_name2params(class_name)
def class_name2params(self, name):
if 'grish' in name:
popsize = 60
elif 'gklss2' in name:
popsize = 60
elif 'gklsh2' in name:
popsize = 60
elif 'gklss3' in name:
popsize = 70
elif 'gklsh3' in name:
popsize = 80
elif 'gklss4' in name:
popsize = 90
elif 'gklsh4' in name:
popsize = 100
elif 'gklss5' in name:
popsize = 120
elif 'gklsh5' in name:
popsize = 140
return popsize
def Solve(self, problem):
objective_function = lambda x: 50 + problem.Calculate(x)
lb, ub = problem.GetBounds()
ea = Evolutionary(objective_function, lower=lb, upper=ub, popsize=self.popsize, \
max_iter=int(self.max_iters/self.popsize), eps1=1e-16, eps2=1e-16)
xopt, gfit = ea.optimize(solver='cpso', sync=False, CR=0.4, F=0.5)
n_evals = problem.GetCalculationsStatistics()
return xopt, gfit, n_evals
class PyOptWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: [problem.Calculate(x), 0, 0]
lb, ub = problem.GetBounds()
opt_prob = pyOpt.Optimization('Problem', objective_function)
opt_prob.addObj('f')
for i in range(problem.GetDimension()):
opt_prob.addVar('x'+str(i),'c',lower=lb[i],upper=ub[i],value=(lb[i] + ub[i])/2.)
midaco_none = MIDACO(pll_type=None)
midaco_none.setOption('IPRINT',-1)
midaco_none.setOption('ISEED', 100)
midaco_none.setOption('MAXEVAL',self.max_iters)
midaco_none.setOption('FOCUS', -4)
fstr, xstr, inform = midaco_none(opt_prob)
n_evals = problem.GetCalculationsStatistics()
return xstr, fstr[0], n_evals
class SHGOWrapper:
def __init__(self, dist_stop, max_iters, class_name, eps=0.01):
self.dist_stop = dist_stop
self.eps = eps
self.max_iters = max_iters
def Solve(self, problem):
objective_function = lambda x: problem.Calculate(x)
bounds = zip(*problem.GetBounds())
opts = {'maxfev': self.max_iters}
result = shgo(objective_function, bounds, options=opts)
n_evals = problem.GetCalculationsStatistics()
return result.x, result.fun, n_evals
algos = {'scd': SCDEWrapper, 'ags': AGSWrapper,
'agsd': functools.partial(AGSWrapper, mixedFast=True),
'direct': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT),
'directl': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT_L),
'stogo': functools.partial(NLOptWrapper, method=nlopt.GD_STOGO),
'mlsl': functools.partial(NLOptWrapper, method=nlopt.G_MLSL_LDS),
'crs': functools.partial(NLOptWrapper, method=nlopt.GN_CRS2_LM),
'simple': SimpleWrapper, 'scb': SCBasinhoppingWrapper,
'sda': SDAWrapper, 'stochopy': StochOpyWrapper, 'shgo': SHGOWrapper,
'pyopt': PyOptWrapper}
algo2cature = {'scd': 'Scipy DE', 'ags': 'AGS', 'direct': 'DIRECT', 'agsd': 'AGSd',
'directl': 'DIRECTl', 'simple': 'Simple',
'stogo': 'StoGO', 'mlsl': 'MLSL', 'crs':'CRS', 'scb': 'Scipy B-H',
'sda': 'SDA', 'stochopy': 'Stochopy', 'pysot': 'PySOT', 'pyopt': 'PyOpt', 'shgo': 'SHGO'}
serg_eps = {2: 0.01, 3: 0.01, 4: math.pow(1e-6, 1./4), 5: math.pow(1e-7, 1./5)}
def main(args):
wrapper_class = algos[args.algo]
if args.problems_class == 'grish':
problems = GrishClass()
else:
assert args.problems_dim > 1 and args.problems_dim < 6
if args.problems_class == 'gklss':
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Simple)
else:
problems = GKLSClass(args.problems_dim, go_problems.GKLSClass.Hard)
eps = 0.01
if args.serg_eps:
eps = serg_eps[args.problems_dim]
wrapper = wrapper_class(args.dist_stop, args.max_iters, args.problems_class+str(args.problems_dim), eps=0.01)
calc_stats, solved_status = solve_class(problems, wrapper, verbose=args.verbose, eps_check=eps)
stats = compute_stats(calc_stats, solved_status)
print('Problems solved: {}'.format(stats['num_solved']))
for i, avg in enumerate(stats['avg_calcs'][:-1]):
print('Average number of calculations of constraint #{}: {}'.format(i, avg))
print('Average number of calculations of objective: {}'.format(stats['avg_calcs'][-1]))
#plot_cmcs([stats['cmc']], captures=[algo2cature(args.algo)], show=True, filename='')
save_stats(stats, args.stats_fname, capture=algo2cature[args.algo])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sample for AGS solver')
parser.add_argument('--max_iters', type=int, default=10000, help='limit of iterations for the method')
parser.add_argument('--problems_class', type=str, choices=['grish','gklss','gklsh'], default='grish')
parser.add_argument('--algo', type=str, choices=algos.keys(), default='scd')
parser.add_argument('--problems_dim', type=int, default=2)
parser.add_argument('--verbose', action='store_true', help='Print additional info to console')
parser.add_argument('--dist_stop', action='store_true', help='Stop algorithm then the next point is close enough to the optimum')
parser.add_argument('--serg_eps', action='store_true')
parser.add_argument('--stats_fname', type=str, default='')
main(parser.parse_args())
|
[
"benchmark_tools.core.solve_class",
"argparse.ArgumentParser",
"benchmark_tools.core.GrishClass",
"math.pow",
"benchmark_tools.stats.save_stats",
"benchmark_tools.stats.compute_stats",
"itertools.product",
"benchmark_tools.core.GKLSClass",
"numpy.array",
"functools.partial",
"shgo.shgo",
"pyOpt.MIDACO",
"scipy.spatial.Delaunay",
"ags_solver.Solver",
"numpy.all",
"ags_solver.Parameters",
"pyOpt.Optimization"
] |
[((13158, 13203), 'functools.partial', 'functools.partial', (['AGSWrapper'], {'mixedFast': '(True)'}), '(AGSWrapper, mixedFast=True)\n', (13175, 13203), False, 'import functools\n'), ((13224, 13284), 'functools.partial', 'functools.partial', (['NLOptWrapper'], {'method': 'nlopt.GN_ORIG_DIRECT'}), '(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT)\n', (13241, 13284), False, 'import functools\n'), ((13306, 13368), 'functools.partial', 'functools.partial', (['NLOptWrapper'], {'method': 'nlopt.GN_ORIG_DIRECT_L'}), '(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT_L)\n', (13323, 13368), False, 'import functools\n'), ((13388, 13442), 'functools.partial', 'functools.partial', (['NLOptWrapper'], {'method': 'nlopt.GD_STOGO'}), '(NLOptWrapper, method=nlopt.GD_STOGO)\n', (13405, 13442), False, 'import functools\n'), ((13461, 13517), 'functools.partial', 'functools.partial', (['NLOptWrapper'], {'method': 'nlopt.G_MLSL_LDS'}), '(NLOptWrapper, method=nlopt.G_MLSL_LDS)\n', (13478, 13517), False, 'import functools\n'), ((13535, 13591), 'functools.partial', 'functools.partial', (['NLOptWrapper'], {'method': 'nlopt.GN_CRS2_LM'}), '(NLOptWrapper, method=nlopt.GN_CRS2_LM)\n', (13552, 13591), False, 'import functools\n'), ((14130, 14154), 'math.pow', 'math.pow', (['(1e-06)', '(1.0 / 4)'], {}), '(1e-06, 1.0 / 4)\n', (14138, 14154), False, 'import math\n'), ((14155, 14179), 'math.pow', 'math.pow', (['(1e-07)', '(1.0 / 5)'], {}), '(1e-07, 1.0 / 5)\n', (14163, 14179), False, 'import math\n'), ((14821, 14888), 'benchmark_tools.core.solve_class', 'solve_class', (['problems', 'wrapper'], {'verbose': 'args.verbose', 'eps_check': 'eps'}), '(problems, wrapper, verbose=args.verbose, eps_check=eps)\n', (14832, 14888), False, 'from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass\n'), ((14901, 14941), 'benchmark_tools.stats.compute_stats', 'compute_stats', (['calc_stats', 'solved_status'], {}), '(calc_stats, solved_status)\n', (14914, 14941), False, 'from benchmark_tools.stats import save_stats, compute_stats\n'), ((15330, 15397), 'benchmark_tools.stats.save_stats', 'save_stats', (['stats', 'args.stats_fname'], {'capture': 'algo2cature[args.algo]'}), '(stats, args.stats_fname, capture=algo2cature[args.algo])\n', (15340, 15397), False, 'from benchmark_tools.stats import save_stats, compute_stats\n'), ((15439, 15499), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sample for AGS solver"""'}), "(description='Sample for AGS solver')\n", (15462, 15499), False, 'import argparse\n'), ((944, 963), 'ags_solver.Solver', 'ags_solver.Solver', ([], {}), '()\n', (961, 963), False, 'import ags_solver\n'), ((1121, 1144), 'ags_solver.Parameters', 'ags_solver.Parameters', ([], {}), '()\n', (1142, 1144), False, 'import ags_solver\n'), ((8158, 8174), 'scipy.spatial.Delaunay', 'Delaunay', (['points'], {}), '(points)\n', (8166, 8174), False, 'from scipy.spatial import Delaunay\n'), ((11978, 12027), 'pyOpt.Optimization', 'pyOpt.Optimization', (['"""Problem"""', 'objective_function'], {}), "('Problem', objective_function)\n", (11996, 12027), False, 'import pyOpt\n'), ((12220, 12241), 'pyOpt.MIDACO', 'MIDACO', ([], {'pll_type': 'None'}), '(pll_type=None)\n', (12226, 12241), False, 'from pyOpt import MIDACO\n'), ((12946, 12992), 'shgo.shgo', 'shgo', (['objective_function', 'bounds'], {'options': 'opts'}), '(objective_function, bounds, options=opts)\n', (12950, 12992), False, 'from shgo import shgo\n'), ((14290, 14302), 'benchmark_tools.core.GrishClass', 'GrishClass', ([], {}), '()\n', (14300, 14302), False, 'from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass\n'), ((14442, 14500), 'benchmark_tools.core.GKLSClass', 'GKLSClass', (['args.problems_dim', 'go_problems.GKLSClass.Simple'], {}), '(args.problems_dim, go_problems.GKLSClass.Simple)\n', (14451, 14500), False, 'from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass\n'), ((14538, 14594), 'benchmark_tools.core.GKLSClass', 'GKLSClass', (['args.problems_dim', 'go_problems.GKLSClass.Hard'], {}), '(args.problems_dim, go_problems.GKLSClass.Hard)\n', (14547, 14594), False, 'from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass\n'), ((3378, 3392), 'numpy.array', 'np.array', (['xmax'], {}), '(xmax)\n', (3386, 3392), True, 'import numpy as np\n'), ((3421, 3435), 'numpy.array', 'np.array', (['xmin'], {}), '(xmin)\n', (3429, 3435), True, 'import numpy as np\n'), ((3542, 3564), 'numpy.all', 'np.all', (['(x <= self.xmax)'], {}), '(x <= self.xmax)\n', (3548, 3564), True, 'import numpy as np\n'), ((3594, 3616), 'numpy.all', 'np.all', (['(x >= self.xmin)'], {}), '(x >= self.xmin)\n', (3600, 3616), True, 'import numpy as np\n'), ((8115, 8141), 'itertools.product', 'itertools.product', (['*bounds'], {}), '(*bounds)\n', (8132, 8141), False, 'import itertools\n'), ((2144, 2155), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2152, 2155), True, 'import numpy as np\n'), ((8487, 8498), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (8495, 8498), True, 'import numpy as np\n')]
|
from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
def parse_settings(args):
fname = args.datasets_list[0]
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
with open(os.path.join(fname, 'settings.yaml'), 'r') as f:
d = yaml.load(f, Loader=yaml.Loader)
args.gg_a0 = d.get('a0')
args.gg_amin = d.get('amin')
args.gg_facmul = d.get('fac_mul')
def parse_dataset(args, i, val=False):
if val:
fname = args.validation_set[2*i]
n = int(args.validation_set[2*i+1])
else:
fname = args.datasets_list[2*i]
n = int(args.datasets_list[2*i+1])
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
X, y, rho_data = load_descriptors(fname)
if val:
# offset in case repeat datasets are used
X, y, rho_data = X[n//2+1:,:], y[n//2+1:], rho_data[:,n//2+1:]
X, y, rho, rho_data = filter_descriptors(X, y, rho_data,
tol=args.density_cutoff)
print(X.shape, n)
if args.randomize:
inds = np.arange(X.shape[0])
np.random.shuffle(inds)
X = X[inds,:]
y = y[inds]
rho = rho[inds]
rho_data = rho_data[:,inds]
return X[::n,:], y[::n], rho[::n], rho_data[:,::n]
def parse_list(lststr, T=int):
return [T(substr) for substr in lststr.split(',')]
def main():
parser = ArgumentParser(description='Trains a GP exchange model')
parser.add_argument('save_file', type=str)
parser.add_argument('feature_file', type=str,
help='serialized FeatureList object in yaml format')
parser.add_argument('datasets_list', nargs='+',
help='pairs of dataset names and inverse sampling densities')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-r', '--randomize', action='store_true')
parser.add_argument('-c', '--density-cutoff', type=float, default=1e-4)
#parser.add_argument('-m', '--model-class', type=str, default=None)
#parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None)
parser.add_argument('-s', '--seed', help='random seed', default=0, type=int)
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('-d', '--delete-k', action='store_true',
help='Delete L (LL^T=K the kernel matrix) to save disk space. Need to refit when reloading to calculate covariance.')
parser.add_argument('--heg', action='store_true', help='HEG exact constraint')
parser.add_argument('--tail', action='store_true', help='atomic tail exact constraint')
parser.add_argument('-o', '--desc-order', default=None,
help='comma-separated list of descriptor order with no spaces. must start with 0,1.')
parser.add_argument('-l', '--length-scale', default=None,
help='comma-separated list initial length-scale guesses')
parser.add_argument('--length-scale-mul', type=float, default=1.0,
help='Used for automatic length-scale initial guess')
parser.add_argument('-a', '--agpr', action='store_true',
help='Whether to use Additive RBF. If False, use RBF')
parser.add_argument('-as', '--agpr-scale', default=None)
parser.add_argument('-ao', '--agpr-order', default=2, type=int)
parser.add_argument('-an', '--agpr-nsingle', default=1, type=int)
parser.add_argument('-x', '--xed-y-code', default='CHACHIYO', type=str)
parser.add_argument('-on', '--optimize-noise', action='store_true',
help='Whether to optimzie exponent of density noise.')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
parse_settings(args)
np.random.seed(args.seed)
feature_list = FeatureList.load(args.feature_file)
if args.length_scale is not None:
args.length_scale = parse_list(args.length_scale, T=float)
if args.agpr_scale is not None:
args.agpr_scale = parse_list(args.agpr_scale, T=float)
if args.desc_order is not None:
args.desc_order = parse_list(args.desc_order)
assert len(args.datasets_list) % 2 == 0, 'Need pairs of entries for datasets list.'
assert len(args.datasets_list) != 0, 'Need training data'
nd = len(args.datasets_list) // 2
if args.validation_set is None:
nv = 0
else:
assert len(args.validation_set) % 2 == 0, 'Need pairs of entries for datasets list.'
nv = len(args.validation_set) // 2
X, y, rho, rho_data = parse_dataset(args, 0)
for i in range(1, nd):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i)
X = np.append(X, Xn, axis=0)
y = np.append(y, yn, axis=0)
rho = np.append(rho, rhon, axis=0)
rho_data = np.append(rho_data, rho_datan, axis=1)
if nv != 0:
Xv, yv, rhov, rho_datav = parse_dataset(args, 0, val=True)
for i in range(1, nv):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i, val=True)
Xv = np.append(Xv, Xn, axis=0)
yv = np.append(yv, yn, axis=0)
rhov = np.append(rhov, rhon, axis=0)
rho_datav = np.append(rho_datav, rho_datan, axis=1)
gpcls = DFTGPR
gpr = gpcls.from_settings(X, feature_list, args)
gpr.fit(X, y, add_heg=args.heg, add_tail=args.tail)
#if args.heg:
# gpr.add_heg_limit()
print('FINAL KERNEL', gpr.gp.kernel_)
if nv != 0:
pred = gpr.xed_to_y(gpr.predict(Xv), Xv)
abserr = np.abs(pred - gpr.xed_to_y(yv, Xv))
print('MAE VAL SET', np.mean(abserr))
# Always attach the arguments to the object to keep track of settings.
gpr.args = args
if args.delete_k:
gpr.L_ = None
dump(gpr, args.save_file)
if __name__ == '__main__':
main()
|
[
"numpy.mean",
"argparse.ArgumentParser",
"os.path.join",
"yaml.load",
"numpy.append",
"mldftdat.data.load_descriptors",
"mldftdat.data.filter_descriptors",
"numpy.random.seed",
"joblib.dump",
"numpy.arange",
"numpy.random.shuffle"
] |
[((390, 480), 'os.path.join', 'os.path.join', (['SAVE_ROOT', '"""DATASETS"""', 'args.functional', 'args.basis', 'args.version', 'fname'], {}), "(SAVE_ROOT, 'DATASETS', args.functional, args.basis, args.\n version, fname)\n", (402, 480), False, 'import os\n'), ((1042, 1132), 'os.path.join', 'os.path.join', (['SAVE_ROOT', '"""DATASETS"""', 'args.functional', 'args.basis', 'args.version', 'fname'], {}), "(SAVE_ROOT, 'DATASETS', args.functional, args.basis, args.\n version, fname)\n", (1054, 1132), False, 'import os\n'), ((1191, 1214), 'mldftdat.data.load_descriptors', 'load_descriptors', (['fname'], {}), '(fname)\n', (1207, 1214), False, 'from mldftdat.data import load_descriptors, filter_descriptors\n'), ((1374, 1433), 'mldftdat.data.filter_descriptors', 'filter_descriptors', (['X', 'y', 'rho_data'], {'tol': 'args.density_cutoff'}), '(X, y, rho_data, tol=args.density_cutoff)\n', (1392, 1433), False, 'from mldftdat.data import load_descriptors, filter_descriptors\n'), ((1863, 1919), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Trains a GP exchange model"""'}), "(description='Trains a GP exchange model')\n", (1877, 1919), False, 'from argparse import ArgumentParser\n'), ((4702, 4727), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (4716, 4727), True, 'import numpy as np\n'), ((6664, 6689), 'joblib.dump', 'dump', (['gpr', 'args.save_file'], {}), '(gpr, args.save_file)\n', (6668, 6689), False, 'from joblib import dump\n'), ((593, 625), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.Loader'}), '(f, Loader=yaml.Loader)\n', (602, 625), False, 'import yaml\n'), ((1539, 1560), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (1548, 1560), True, 'import numpy as np\n'), ((1569, 1592), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (1586, 1592), True, 'import numpy as np\n'), ((5613, 5637), 'numpy.append', 'np.append', (['X', 'Xn'], {'axis': '(0)'}), '(X, Xn, axis=0)\n', (5622, 5637), True, 'import numpy as np\n'), ((5650, 5674), 'numpy.append', 'np.append', (['y', 'yn'], {'axis': '(0)'}), '(y, yn, axis=0)\n', (5659, 5674), True, 'import numpy as np\n'), ((5689, 5717), 'numpy.append', 'np.append', (['rho', 'rhon'], {'axis': '(0)'}), '(rho, rhon, axis=0)\n', (5698, 5717), True, 'import numpy as np\n'), ((5737, 5775), 'numpy.append', 'np.append', (['rho_data', 'rho_datan'], {'axis': '(1)'}), '(rho_data, rho_datan, axis=1)\n', (5746, 5775), True, 'import numpy as np\n'), ((5967, 5992), 'numpy.append', 'np.append', (['Xv', 'Xn'], {'axis': '(0)'}), '(Xv, Xn, axis=0)\n', (5976, 5992), True, 'import numpy as np\n'), ((6006, 6031), 'numpy.append', 'np.append', (['yv', 'yn'], {'axis': '(0)'}), '(yv, yn, axis=0)\n', (6015, 6031), True, 'import numpy as np\n'), ((6047, 6076), 'numpy.append', 'np.append', (['rhov', 'rhon'], {'axis': '(0)'}), '(rhov, rhon, axis=0)\n', (6056, 6076), True, 'import numpy as np\n'), ((6097, 6136), 'numpy.append', 'np.append', (['rho_datav', 'rho_datan'], {'axis': '(1)'}), '(rho_datav, rho_datan, axis=1)\n', (6106, 6136), True, 'import numpy as np\n'), ((532, 568), 'os.path.join', 'os.path.join', (['fname', '"""settings.yaml"""'], {}), "(fname, 'settings.yaml')\n", (544, 568), False, 'import os\n'), ((6503, 6518), 'numpy.mean', 'np.mean', (['abserr'], {}), '(abserr)\n', (6510, 6518), True, 'import numpy as np\n')]
|
"""
********************************
* Created by mohammed-alaa *
********************************
Spatial Dataloader implementing sequence api from keras (defines how to load a single item)
this loads batches of images for each iteration it returns [batch_size, height, width ,3] ndarrays
"""
import copy
import random
import cv2
import numpy as np
import tensorflow.keras as keras
from .UCF_splitting_kernel import *
from .helpers import get_training_augmenter, get_validation_augmenter
class SpatialSequence(keras.utils.Sequence):
def __init__(self, data_to_load, data_root_path, batch_size, is_training, augmenter):
"""get data structure to load data"""
# list of (video names,frame/max_frame,label)
self.data_to_load = copy.deepcopy(data_to_load)
self.batch_size = batch_size
self.is_training = is_training
self.augmenter = copy.deepcopy(augmenter)
self.data_root_path = data_root_path
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # three lists
def __len__(self):
"""Denotes the number of batches per epoch"""
return (len(self.video_names) + self.batch_size - 1) // self.batch_size # ceiling div
def get_actual_length(self):
"""Denotes the total number of samples"""
return len(self.video_names)
def __getitem__(self, batch_start):
"""Gets one batch"""
batch_video_names = self.video_names[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_frames = self.frames[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]
batch_y = np.array(self.labels[batch_start * self.batch_size:(batch_start + 1) * self.batch_size])
batch_x = [] # could be less or equal batch size
#
for vid_id, _ in enumerate(batch_y):
if self.is_training: # max frame is given
frame_id = random.randint(1, batch_frames[vid_id]) # random frame (one based)
else:
frame_id = batch_frames[vid_id] # just as selected
batch_x.append(
cv2.cvtColor(cv2.imread(os.path.join(self.data_root_path, "v_" + batch_video_names[vid_id], 'frame{}'.format(str(frame_id).zfill(6)) + '.jpg')), cv2.COLOR_BGR2RGB)
)
if self.is_training:
return np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0, batch_y
else:
# no label needed since (test_video_to_label mapping) (dictionary of name to label) is returned
return batch_video_names, np.array(self.augmenter.augment_images(batch_x), dtype=np.float32) / 255.0
def shuffle_and_reset(self):
"""
new data for the next epoch
"""
random.shuffle(self.data_to_load)
self.video_names, self.frames, self.labels = [list(one_of_three_tuples) for one_of_three_tuples in zip(*self.data_to_load)] # shuffle all
class SpatialDataLoader:
def __init__(self, batch_size, testing_samples_per_video, width, height, log_stream=open("/tmp/null.log", "w"), augmenter_level=1, data_root_path='./jpegs_256/', ucf_list_path='./UCF_list/', ucf_split='01'):
"""
get the mapping and initialize the augmenter
"""
self.batch_size = batch_size
self.width, self.height = width, height
self.data_root_path = data_root_path
self.testing_samples_per_video = testing_samples_per_video
self.log_stream = log_stream
# split the training and testing videos
data_util_ = DataUtil(path=ucf_list_path, split=ucf_split)
self.train_video_to_label, self.test_video_to_label = data_util_.get_train_test_video_to_label_mapping() # name without v_ or .avi and small s .. name to numeric label starts at 0
# get video frames
self.video_frame_count = data_util_.get_video_frame_count() # name without v_ or .avi and small s
self.augmenter_level = augmenter_level
def run(self):
"""
get the data structure for training and validation
"""
train_loader = self.get_training_loader()
val_loader = self.get_testing_loader()
return train_loader, val_loader, self.test_video_to_label
def get_training_data_structure(self):
"""
get the data structure for training
"""
training_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.train_video_to_label: # sample from the whole video frames
training_data_structure.append((video_name, self.video_frame_count[video_name], self.train_video_to_label[video_name]))
return training_data_structure
def get_testing_data_structure(self):
"""
get the data structure for validation
"""
test_data_structure = [] # list of (video names,frame/max_frame,label)
for video_name in self.test_video_to_label:
nb_frame = self.video_frame_count[video_name]
interval = nb_frame // self.testing_samples_per_video
if interval == 0: # for videos shorter than self.testing_samples_per_video
interval = 1
# range is exclusive add one to be inclusive
# 1 > self.testing_samples_per_video * interval
for frame_idx in range(1, min(self.testing_samples_per_video * interval, nb_frame) + 1, interval):
test_data_structure.append((video_name, frame_idx, self.test_video_to_label[video_name]))
return test_data_structure
def get_training_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_training_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=True,
augmenter=get_training_augmenter(height=self.height, width=self.width, augmenter_level=self.augmenter_level),
)
print('==> Training data :', len(loader.data_to_load), 'videos', file=self.log_stream)
print('==> Training data :', len(loader.data_to_load), 'videos')
return loader
def get_testing_loader(self):
"""
an instance of sequence loader for spatial model for parallel dataloading using keras sequence
"""
loader = SpatialSequence(data_to_load=self.get_testing_data_structure(),
data_root_path=self.data_root_path,
batch_size=self.batch_size,
is_training=False,
augmenter=get_validation_augmenter(height=self.height, width=self.width),
)
print('==> Validation data :', len(loader.data_to_load), 'frames', file=self.log_stream)
print('==> Validation data :', len(loader.data_to_load), 'frames')
return loader
if __name__ == '__main__':
data_loader = SpatialDataLoader(batch_size=64, use_multiprocessing=True, # data_root_path="data",
ucf_split='01',
testing_samples_per_video=19, width=224, height=224, num_workers=2)
train_loader, test_loader, test_video_level_label = data_loader.run()
print(len(train_loader))
print(len(test_loader))
print(train_loader.get_actual_length())
print(test_loader.get_actual_length())
print(train_loader.sequence[0][0].shape, train_loader.sequence[0][1].shape)
print(train_loader[0][0].shape, train_loader[0][1].shape)
# import tqdm
# progress = tqdm.tqdm(train_loader.get_epoch_generator(), total=len(train_loader))
# for (sampled_frame, label) in progress:
# pass
import matplotlib.pyplot as plt
# preview raw data
def preview(data, labels):
# 3 channels
fig, axeslist = plt.subplots(ncols=8, nrows=8, figsize=(10, 10))
for i, sample in enumerate(data):
axeslist.ravel()[i].imshow(data[i])
axeslist.ravel()[i].set_title(labels[i])
axeslist.ravel()[i].set_axis_off()
plt.subplots_adjust(wspace=.4, hspace=.4)
print("train sample")
for batch in train_loader.get_epoch_generator():
print(batch[0].shape, batch[1].shape)
print(batch[1])
preview(batch[0], batch[1])
break
print("test sample") # same name will be displayed testing_samples_per_video with no shuffling
for batch in test_loader.get_epoch_generator():
print(batch[1].shape, batch[2].shape)
print(batch[0], batch[2])
preview(batch[1], batch[2])
break
|
[
"random.randint",
"random.shuffle",
"numpy.array",
"copy.deepcopy",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust"
] |
[((760, 787), 'copy.deepcopy', 'copy.deepcopy', (['data_to_load'], {}), '(data_to_load)\n', (773, 787), False, 'import copy\n'), ((890, 914), 'copy.deepcopy', 'copy.deepcopy', (['augmenter'], {}), '(augmenter)\n', (903, 914), False, 'import copy\n'), ((1705, 1798), 'numpy.array', 'np.array', (['self.labels[batch_start * self.batch_size:(batch_start + 1) * self.batch_size]'], {}), '(self.labels[batch_start * self.batch_size:(batch_start + 1) * self\n .batch_size])\n', (1713, 1798), True, 'import numpy as np\n'), ((2837, 2870), 'random.shuffle', 'random.shuffle', (['self.data_to_load'], {}), '(self.data_to_load)\n', (2851, 2870), False, 'import random\n'), ((8155, 8203), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(8)', 'nrows': '(8)', 'figsize': '(10, 10)'}), '(ncols=8, nrows=8, figsize=(10, 10))\n', (8167, 8203), True, 'import matplotlib.pyplot as plt\n'), ((8404, 8447), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.4)', 'hspace': '(0.4)'}), '(wspace=0.4, hspace=0.4)\n', (8423, 8447), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2029), 'random.randint', 'random.randint', (['(1)', 'batch_frames[vid_id]'], {}), '(1, batch_frames[vid_id])\n', (2004, 2029), False, 'import random\n')]
|
# ******************************************************
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
from netCDF4 import Dataset
import numpy as np
import general_path
import accuflux
import ascraster
import get_surrounding_cells
import make_np_grid
def do(mask_asc_fn, mask_id, dum_asc, logical = "EQ", mask_type='np_grid'):
dum_mask = ascraster.create_mask(mask_asc_fn, mask_id, logical = logical, numtype=int)
mask=[]
if mask_type=="rowcol":
for i in dum_mask:
mask.append(dum_asc.get_row_col_from_index(i))
elif mask_type=="index":
for i in dum_mask:
mask.append(i)
elif mask_type=="latlon":
for i in dum_mask:
mask.append(dum_asc.get_coord_from_index(i))
elif mask_type=="np_grid":
mask = np.zeros((dum_asc.nrows, dum_asc.ncols), dtype=bool)
mask[:,:] = True
for i in dum_mask:
row, col = dum_asc.get_row_col_from_index(i)
mask[row,col]=False
return mask
|
[
"ascraster.create_mask",
"numpy.zeros"
] |
[((503, 576), 'ascraster.create_mask', 'ascraster.create_mask', (['mask_asc_fn', 'mask_id'], {'logical': 'logical', 'numtype': 'int'}), '(mask_asc_fn, mask_id, logical=logical, numtype=int)\n', (524, 576), False, 'import ascraster\n'), ((930, 982), 'numpy.zeros', 'np.zeros', (['(dum_asc.nrows, dum_asc.ncols)'], {'dtype': 'bool'}), '((dum_asc.nrows, dum_asc.ncols), dtype=bool)\n', (938, 982), True, 'import numpy as np\n')]
|
from parameters import *
from library_time import *
from paths import *
import numpy as np
import pylab as plt
import matplotlib.pyplot as mplt
mplt.rc('text', usetex=True)
mplt.rcParams.update({'font.size': 16})
import logging, getopt, sys
import time
import os
##########################################################################################
# C O N F I G U R A T I O N
##########################################################################################
# activate ylim for w
var1 = w1
var3 = w3
var5 = w5
var10 = w10
var25 = w25
mode = "w" # u or w
##########################################################################################
# M A I N
##########################################################################################
if __name__ == "__main__":
if not os.path.exists('plots'):
os.makedirs('plots')
print('Created folder plots!')
if not os.path.exists('plots/integral'):
os.makedirs('plots/integral')
print('Created folder plots/integral!')
t = np.linspace(tmin, tmax, Nt)
r = np.linspace(0,R,Nr)
Ivar1 = np.zeros(Nt)
Ivar3 = np.zeros(Nt)
Ivar5 = np.zeros(Nt)
Ivar10 = np.zeros(Nt)
Ivar25 = np.zeros(Nt)
for i in range(Nt):
# /1000000 because of units
Ivar1[i] = integrate(var1, i,r, Nt)/1000000
Ivar3[i] = integrate(var3, i,r, Nt)/1000000
Ivar5[i] = integrate(var5, i,r, Nt)/1000000
Ivar10[i] = integrate(var10, i,r, Nt)/1000000
Ivar25[i] = integrate(var25, i,r, Nt)/1000000
mplt.plot(t, Ivar1, label=r'$\alpha = 1$')
mplt.plot(t, Ivar3, label=r'$\alpha = 3$')
mplt.plot(t, Ivar5, label=r'$\alpha = 5$')
mplt.plot(t, Ivar10, label=r'$\alpha = 10$')
mplt.plot(t, Ivar25, label=r'$\alpha = 25$')
mplt.xlim(tmin, tmax)
mplt.yscale('log')
mplt.xlabel(r'$t\quad [h]$')
mplt.ylabel(r'$\bar{'+mode+'}\quad [\mu mol]$')
##########################################################################################
# lim for w, because some values dont make sense
mplt.ylim(1e-11, 3e2)
# lim for w, because some values dont make sense
##########################################################################################
mplt.legend(loc=1, bbox_to_anchor=(1, 0.9))
mplt.tight_layout()
mplt.savefig('plots/integral/int'+mode+'.pdf', format='pdf')
mplt.show()
|
[
"os.path.exists",
"matplotlib.pyplot.savefig",
"os.makedirs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.show"
] |
[((145, 173), 'matplotlib.pyplot.rc', 'mplt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (152, 173), True, 'import matplotlib.pyplot as mplt\n'), ((174, 213), 'matplotlib.pyplot.rcParams.update', 'mplt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (194, 213), True, 'import matplotlib.pyplot as mplt\n'), ((1057, 1084), 'numpy.linspace', 'np.linspace', (['tmin', 'tmax', 'Nt'], {}), '(tmin, tmax, Nt)\n', (1068, 1084), True, 'import numpy as np\n'), ((1093, 1114), 'numpy.linspace', 'np.linspace', (['(0)', 'R', 'Nr'], {}), '(0, R, Nr)\n', (1104, 1114), True, 'import numpy as np\n'), ((1131, 1143), 'numpy.zeros', 'np.zeros', (['Nt'], {}), '(Nt)\n', (1139, 1143), True, 'import numpy as np\n'), ((1157, 1169), 'numpy.zeros', 'np.zeros', (['Nt'], {}), '(Nt)\n', (1165, 1169), True, 'import numpy as np\n'), ((1183, 1195), 'numpy.zeros', 'np.zeros', (['Nt'], {}), '(Nt)\n', (1191, 1195), True, 'import numpy as np\n'), ((1209, 1221), 'numpy.zeros', 'np.zeros', (['Nt'], {}), '(Nt)\n', (1217, 1221), True, 'import numpy as np\n'), ((1235, 1247), 'numpy.zeros', 'np.zeros', (['Nt'], {}), '(Nt)\n', (1243, 1247), True, 'import numpy as np\n'), ((1585, 1627), 'matplotlib.pyplot.plot', 'mplt.plot', (['t', 'Ivar1'], {'label': '"""$\\\\alpha = 1$"""'}), "(t, Ivar1, label='$\\\\alpha = 1$')\n", (1594, 1627), True, 'import matplotlib.pyplot as mplt\n'), ((1632, 1674), 'matplotlib.pyplot.plot', 'mplt.plot', (['t', 'Ivar3'], {'label': '"""$\\\\alpha = 3$"""'}), "(t, Ivar3, label='$\\\\alpha = 3$')\n", (1641, 1674), True, 'import matplotlib.pyplot as mplt\n'), ((1679, 1721), 'matplotlib.pyplot.plot', 'mplt.plot', (['t', 'Ivar5'], {'label': '"""$\\\\alpha = 5$"""'}), "(t, Ivar5, label='$\\\\alpha = 5$')\n", (1688, 1721), True, 'import matplotlib.pyplot as mplt\n'), ((1726, 1770), 'matplotlib.pyplot.plot', 'mplt.plot', (['t', 'Ivar10'], {'label': '"""$\\\\alpha = 10$"""'}), "(t, Ivar10, label='$\\\\alpha = 10$')\n", (1735, 1770), True, 'import matplotlib.pyplot as mplt\n'), ((1775, 1819), 'matplotlib.pyplot.plot', 'mplt.plot', (['t', 'Ivar25'], {'label': '"""$\\\\alpha = 25$"""'}), "(t, Ivar25, label='$\\\\alpha = 25$')\n", (1784, 1819), True, 'import matplotlib.pyplot as mplt\n'), ((1824, 1845), 'matplotlib.pyplot.xlim', 'mplt.xlim', (['tmin', 'tmax'], {}), '(tmin, tmax)\n', (1833, 1845), True, 'import matplotlib.pyplot as mplt\n'), ((1850, 1868), 'matplotlib.pyplot.yscale', 'mplt.yscale', (['"""log"""'], {}), "('log')\n", (1861, 1868), True, 'import matplotlib.pyplot as mplt\n'), ((1873, 1901), 'matplotlib.pyplot.xlabel', 'mplt.xlabel', (['"""$t\\\\quad [h]$"""'], {}), "('$t\\\\quad [h]$')\n", (1884, 1901), True, 'import matplotlib.pyplot as mplt\n'), ((1906, 1959), 'matplotlib.pyplot.ylabel', 'mplt.ylabel', (["('$\\\\bar{' + mode + '}\\\\quad [\\\\mu mol]$')"], {}), "('$\\\\bar{' + mode + '}\\\\quad [\\\\mu mol]$')\n", (1917, 1959), True, 'import matplotlib.pyplot as mplt\n'), ((2102, 2125), 'matplotlib.pyplot.ylim', 'mplt.ylim', (['(1e-11)', '(300.0)'], {}), '(1e-11, 300.0)\n', (2111, 2125), True, 'import matplotlib.pyplot as mplt\n'), ((2273, 2316), 'matplotlib.pyplot.legend', 'mplt.legend', ([], {'loc': '(1)', 'bbox_to_anchor': '(1, 0.9)'}), '(loc=1, bbox_to_anchor=(1, 0.9))\n', (2284, 2316), True, 'import matplotlib.pyplot as mplt\n'), ((2321, 2340), 'matplotlib.pyplot.tight_layout', 'mplt.tight_layout', ([], {}), '()\n', (2338, 2340), True, 'import matplotlib.pyplot as mplt\n'), ((2345, 2409), 'matplotlib.pyplot.savefig', 'mplt.savefig', (["('plots/integral/int' + mode + '.pdf')"], {'format': '"""pdf"""'}), "('plots/integral/int' + mode + '.pdf', format='pdf')\n", (2357, 2409), True, 'import matplotlib.pyplot as mplt\n'), ((2410, 2421), 'matplotlib.pyplot.show', 'mplt.show', ([], {}), '()\n', (2419, 2421), True, 'import matplotlib.pyplot as mplt\n'), ((820, 843), 'os.path.exists', 'os.path.exists', (['"""plots"""'], {}), "('plots')\n", (834, 843), False, 'import os\n'), ((853, 873), 'os.makedirs', 'os.makedirs', (['"""plots"""'], {}), "('plots')\n", (864, 873), False, 'import os\n'), ((924, 956), 'os.path.exists', 'os.path.exists', (['"""plots/integral"""'], {}), "('plots/integral')\n", (938, 956), False, 'import os\n'), ((966, 995), 'os.makedirs', 'os.makedirs', (['"""plots/integral"""'], {}), "('plots/integral')\n", (977, 995), False, 'import os\n')]
|
"""
echopype data model that keeps tracks of echo data and
its connection to data files.
"""
import os
import warnings
import datetime as dt
from echopype.utils import uwa
import numpy as np
import xarray as xr
class ModelBase(object):
"""Class for manipulating echo data that is already converted to netCDF."""
def __init__(self, file_path=""):
self.file_path = file_path # this passes the input through file name test
self.noise_est_range_bin_size = 5 # meters per tile for noise estimation
self.noise_est_ping_size = 30 # number of pings per tile for noise estimation
self.MVBS_range_bin_size = 5 # meters per tile for MVBS
self.MVBS_ping_size = 30 # number of pings per tile for MVBS
self.Sv = None # calibrated volume backscattering strength
self.Sv_path = None # path to save calibrated results
self.Sv_clean = None # denoised volume backscattering strength
self.TS = None # calibrated target strength
self.TS_path = None # path to save TS calculation results
self.MVBS = None # mean volume backscattering strength
self._salinity = None
self._temperature = None
self._pressure = None
self._sound_speed = None
self._sample_thickness = None
self._range = None
self._seawater_absorption = None
@property
def salinity(self):
return self._salinity
@salinity.setter
def salinity(self, sal):
self._salinity = sal
@property
def pressure(self):
return self._pressure
@pressure.setter
def pressure(self, pres):
self._pressure = pres
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, t):
self._temperature = t
@property
def sample_thickness(self):
return self._sample_thickness
@sample_thickness.setter
def sample_thickness(self, sth):
self._sample_thickness = sth
@property
def range(self):
return self._range
@range.setter
def range(self, rr):
self._range = rr
@property
def seawater_absorption(self):
return self._seawater_absorption
@seawater_absorption.setter
def seawater_absorption(self, absorption):
self._seawater_absorption.values = absorption
@property
def sound_speed(self):
return self._sound_speed
@sound_speed.setter
def sound_speed(self, ss):
if isinstance(self._sound_speed, xr.DataArray):
self._sound_speed.values = ss
else:
self._sound_speed = ss
@property
def file_path(self):
return self._file_path
@file_path.setter
def file_path(self, p):
self._file_path = p
# Load netCDF groups if file format is correct
pp = os.path.basename(p)
_, ext = os.path.splitext(pp)
supported_ext_list = ['.raw', '.01A']
if ext in supported_ext_list:
print('Data file in manufacturer format, please convert to .nc first.')
elif ext == '.nc':
self.toplevel = xr.open_dataset(self.file_path)
# Get .nc filenames for storing processed data if computation is performed
self.Sv_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv.nc')
self.Sv_clean_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_Sv_clean.nc')
self.TS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_TS.nc')
self.MVBS_path = os.path.join(os.path.dirname(self.file_path),
os.path.splitext(os.path.basename(self.file_path))[0] + '_MVBS.nc')
# Raise error if the file format convention does not match
if self.toplevel.sonar_convention_name != 'SONAR-netCDF4':
raise ValueError('netCDF file convention not recognized.')
self.toplevel.close()
else:
raise ValueError('Data file format not recognized.')
def calc_sound_speed(self, src='file'):
"""Base method to be overridden for calculating sound_speed for different sonar models
"""
# issue warning when subclass methods not available
print("Sound speed calculation has not been implemented for this sonar model!")
def calc_seawater_absorption(self, src='file'):
"""Base method to be overridden for calculating seawater_absorption for different sonar models
"""
# issue warning when subclass methods not available
print("Seawater absorption calculation has not been implemented for this sonar model!")
def calc_sample_thickness(self):
"""Base method to be overridden for calculating sample_thickness for different sonar models.
"""
# issue warning when subclass methods not available
print('Sample thickness calculation has not been implemented for this sonar model!')
def calc_range(self):
"""Base method to be overridden for calculating range for different sonar models.
"""
# issue warning when subclass methods not available
print('Range calculation has not been implemented for this sonar model!')
def recalculate_environment(self, ss=True, sa=True, st=True, r=True):
""" Recalculates sound speed, seawater absorption, sample thickness, and range using
salinity, temperature, and pressure
Parameters
----------
ss : bool
Whether to calcualte sound speed. Defaults to `True`
sa : bool
Whether to calcualte seawater absorption. Defaults to `True`
st : bool
Whether to calcualte sample thickness. Defaults to `True`
r : bool
Whether to calcualte range. Defaults to `True`
"""
s, t, p = self.salinity, self.temperature, self.pressure
if s is not None and t is not None and p is not None:
if ss:
self.sound_speed = self.calc_sound_speed(src='user')
if sa:
self.seawater_absorption = self.calc_seawater_absorption(src='user')
if st:
self.sample_thickness = self.calc_sample_thickness()
if r:
self.range = self.calc_range()
elif s is None:
print("Salinity was not provided. Environment was not recalculated")
elif t is None:
print("Temperature was not provided. Environment was not recalculated")
else:
print("Pressure was not provided. Environment was not recalculated")
def calibrate(self):
"""Base method to be overridden for volume backscatter calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Calibration has not been implemented for this sonar model!')
def calibrate_TS(self):
"""Base method to be overridden for target strength calibration and echo-integration for different sonar models.
"""
# issue warning when subclass methods not available
print('Target strength calibration has not been implemented for this sonar model!')
def validate_path(self, save_path, save_postfix):
"""Creates a directory if it doesnt exist. Returns a valid save path.
"""
def _assemble_path():
file_in = os.path.basename(self.file_path)
file_name, file_ext = os.path.splitext(file_in)
return file_name + save_postfix + file_ext
if save_path is None:
save_dir = os.path.dirname(self.file_path)
file_out = _assemble_path()
else:
path_ext = os.path.splitext(save_path)[1]
# If given save_path is file, split into directory and file
if path_ext != '':
save_dir, file_out = os.path.split(save_path)
if save_dir == '': # save_path is only a filename without directory
save_dir = os.path.dirname(self.file_path) # use directory from input file
# If given save_path is a directory, get a filename from input .nc file
else:
save_dir = save_path
file_out = _assemble_path()
# Create folder if not already exists
if save_dir == '':
# TODO: should we use '.' instead of os.getcwd()?
save_dir = os.getcwd() # explicit about path to current directory
if not os.path.exists(save_dir):
os.mkdir(save_dir)
return os.path.join(save_dir, file_out)
@staticmethod
def get_tile_params(r_data_sz, p_data_sz, r_tile_sz, p_tile_sz, sample_thickness):
"""Obtain ping_time and range_bin parameters associated with groupby and groupby_bins operations.
These parameters are used in methods remove_noise(), noise_estimates(), get_MVBS().
Parameters
----------
r_data_sz : int
number of range_bin entries in data
p_data_sz : int
number of ping_time entries in data
r_tile_sz : float
tile size along the range_bin dimension [m]
p_tile_sz : int
tile size along the ping_time dimension [number of pings]
sample_thickness : float
thickness of each data sample, determined by sound speed and pulse duration
Returns
-------
r_tile_sz : int
modified tile size along the range dimension [m], determined by sample_thickness
r_tile_bin_edge : list of int
bin edges along the range_bin dimension for :py:func:`xarray.DataArray.groupby_bins` operation
p_tile_bin_edge : list of int
bin edges along the ping_time dimension for :py:func:`xarray.DataArray.groupby_bins` operation
"""
# Adjust noise_est_range_bin_size because range_bin_size may be an inconvenient value
num_r_per_tile = np.round(r_tile_sz / sample_thickness).astype(int) # num of range_bin per tile
r_tile_sz = num_r_per_tile * sample_thickness
# Total number of range_bin and ping tiles
num_tile_range_bin = np.ceil(r_data_sz / num_r_per_tile).astype(int)
if np.mod(p_data_sz, p_tile_sz) == 0:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int) + 1
else:
num_tile_ping = np.ceil(p_data_sz / p_tile_sz).astype(int)
# Tile bin edges along range
# ... -1 to make sure each bin has the same size because of the right-inclusive and left-exclusive bins
r_tile_bin_edge = [np.arange(x.values + 1) * y.values - 1 for x, y in zip(num_tile_range_bin, num_r_per_tile)]
p_tile_bin_edge = np.arange(num_tile_ping + 1) * p_tile_sz - 1
return r_tile_sz, r_tile_bin_edge, p_tile_bin_edge
def _get_proc_Sv(self, source_path=None, source_postfix='_Sv'):
"""Private method to return calibrated Sv either from memory or _Sv.nc file.
This method is called by remove_noise(), noise_estimates() and get_MVBS().
"""
if self.Sv is None: # calibration not yet performed
Sv_path = self.validate_path(save_path=source_path, # wrangle _Sv path
save_postfix=source_postfix)
if os.path.exists(Sv_path): # _Sv exists
self.Sv = xr.open_dataset(Sv_path) # load _Sv file
else:
# if path specification given but file do not exist:
if (source_path is not None) or (source_postfix != '_Sv'):
print('%s no calibrated data found in specified path: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), Sv_path))
else:
print('%s data has not been calibrated. ' % dt.datetime.now().strftime('%H:%M:%S'))
print(' performing calibration now and operate from Sv in memory.')
self.calibrate() # calibrate, have Sv in memory
return self.Sv
def remove_noise(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None,
SNR=0, Sv_threshold=None,
save=False, save_postfix='_Sv_clean', save_path=None):
"""Remove noise by using noise estimates obtained from the minimum mean calibrated power level
along each column of tiles.
See method noise_estimates() for details of noise estimation.
Reference: <NAME> & Higginbottom, 2017, ICES Journal of Marine Sciences
Parameters
----------
source_postfix : str
postfix of the Sv file used to remove noise from, default to '_Sv'
source_path : str
path of Sv file used to remove noise from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float, optional
Meters per tile for noise estimation [m]
noise_est_ping_size : int, optional
Number of pings per tile for noise estimation
SNR : int, optional
Minimum signal-to-noise ratio (remove values below this after general noise removal).
Sv_threshold : int, optional
Minimum Sv threshold [dB] (remove values below this after general noise removal)
save : bool, optional
Whether to save the denoised Sv (``Sv_clean``) into a new .nc file.
Default to ``False``.
save_postfix : str
Filename postfix, default to '_Sv_clean'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_Sv_clean.nc default
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Get calibrated Sv
if self.Sv is not None:
print('%s Remove noise from Sv stored in memory.' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
print('%s Remove noise from Sv stored in: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Get TVG and ABS for compensating for transmission loss
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Function for use with apply
def remove_n(x, rr):
p_c_lin = 10 ** ((x.Sv - x.ABS - x.TVG) / 10)
nn = 10 * np.log10(p_c_lin.mean(dim='ping_time').groupby_bins('range_bin', rr).mean().min(
dim='range_bin_bins')) + x.ABS + x.TVG
# Return values where signal is [SNR] dB above noise and at least [Sv_threshold] dB
if not Sv_threshold:
return x.Sv.where(x.Sv > (nn + SNR), other=np.nan)
else:
return x.Sv.where((x.Sv > (nn + SNR)) & (x > Sv_threshold), other=np.nan)
# Groupby noise removal operation
proc_data.coords['ping_idx'] = ('ping_time', np.arange(proc_data.Sv['ping_time'].size))
ABS.name = 'ABS'
TVG.name = 'TVG'
pp = xr.merge([proc_data, ABS])
pp = xr.merge([pp, TVG])
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
Sv_clean = pp.groupby_bins('ping_idx', ping_tile_bin_edge).\
map(remove_n, rr=range_bin_tile_bin_edge[0])
Sv_clean = Sv_clean.drop_vars(['ping_idx'])
else:
tmp_clean = []
cnt = 0
for key, val in pp.groupby('frequency'): # iterate over different frequency channel
tmp = val.groupby_bins('ping_idx', ping_tile_bin_edge). \
map(remove_n, rr=range_bin_tile_bin_edge[cnt])
cnt += 1
tmp_clean.append(tmp)
clean_val = np.array([zz.values for zz in xr.align(*tmp_clean, join='outer')])
Sv_clean = xr.DataArray(clean_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_clean[0]['ping_time'].values,
'range_bin': tmp_clean[0]['range_bin'].values},
dims=['frequency', 'ping_time', 'range_bin'])
# Set up DataSet
Sv_clean.name = 'Sv'
Sv_clean = Sv_clean.to_dataset()
Sv_clean['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
Sv_clean.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Attach calculated range into data set
Sv_clean['range'] = (('frequency', 'range_bin'), self.range.T)
# Save as object attributes as a netCDF file
self.Sv_clean = Sv_clean
# TODO: now adding the below so that MVBS can be calculated directly
# from the cleaned Sv without saving and loading Sv_clean from disk.
# However this is not explicit to the user. A better way to do this
# is to change get_MVBS() to first check existence of self.Sv_clean
# when `_Sv_clean` is specified as the source_postfix.
if not print_src: # remove noise from Sv stored in memory
self.Sv = Sv_clean.copy()
if save:
self.Sv_clean_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving denoised Sv to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.Sv_clean_path))
Sv_clean.to_netcdf(self.Sv_clean_path)
# Close opened resources
proc_data.close()
def noise_estimates(self, source_postfix='_Sv', source_path=None,
noise_est_range_bin_size=None, noise_est_ping_size=None):
"""Obtain noise estimates from the minimum mean calibrated power level along each column of tiles.
The tiles here are defined by class attributes noise_est_range_bin_size and noise_est_ping_size.
This method contains redundant pieces of code that also appear in method remove_noise(),
but this method can be used separately to determine the exact tile size for noise removal before
noise removal is actually performed.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate noise estimates from, default to '_Sv'
source_path : str
path of Sv file used to calculate noise estimates from, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
noise_est_range_bin_size : float
meters per tile for noise estimation [m]
noise_est_ping_size : int
number of pings per tile for noise estimation
Returns
-------
noise_est : xarray DataSet
noise estimates as a DataArray with dimension [ping_time x range_bin]
ping_time and range_bin are taken from the first element of each tile along each of the dimensions
"""
# Check params
if (noise_est_range_bin_size is not None) and (self.noise_est_range_bin_size != noise_est_range_bin_size):
self.noise_est_range_bin_size = noise_est_range_bin_size
if (noise_est_ping_size is not None) and (self.noise_est_ping_size != noise_est_ping_size):
self.noise_est_ping_size = noise_est_ping_size
# Use calibrated data to calculate noise removal
proc_data = self._get_proc_Sv()
# Get tile indexing parameters
self.noise_est_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.noise_est_range_bin_size,
p_tile_sz=self.noise_est_ping_size,
sample_thickness=self.sample_thickness)
# Values for noise estimates
range_meter = self.range
TVG = np.real(20 * np.log10(range_meter.where(range_meter >= 1, other=1)))
ABS = 2 * self.seawater_absorption * range_meter
# Noise estimates
proc_data['power_cal'] = 10 ** ((proc_data.Sv - ABS - TVG) / 10)
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
noise_est = 10 * np.log10(proc_data['power_cal'].coarsen(
ping_time=self.noise_est_ping_size,
range_bin=int(np.unique(self.noise_est_range_bin_size / self.sample_thickness)),
boundary='pad').mean().min(dim='range_bin'))
else:
range_bin_coarsen_idx = (self.noise_est_range_bin_size / self.sample_thickness).astype(int)
tmp_noise = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(proc_data['power_cal'].sel(frequency=freq).coarsen(
ping_time=self.noise_est_ping_size,
range_bin=r_bin.values,
boundary='pad').mean().min(dim='range_bin'))
tmp_da.name = 'noise_est'
tmp_noise.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
noise_val = np.array([zz.values for zz in xr.align(*tmp_noise, join='outer')])
noise_est = xr.DataArray(noise_val,
coords={'frequency': proc_data['frequency'].values,
'ping_time': tmp_noise[0]['ping_time'].values},
dims=['frequency', 'ping_time'])
noise_est = noise_est.to_dataset(name='noise_est')
noise_est['noise_est_range_bin_size'] = ('frequency', self.noise_est_range_bin_size)
noise_est.attrs['noise_est_ping_size'] = self.noise_est_ping_size
# Close opened resources
proc_data.close()
return noise_est
def get_MVBS(self, source_postfix='_Sv', source_path=None,
MVBS_range_bin_size=None, MVBS_ping_size=None,
save=False, save_postfix='_MVBS', save_path=None):
"""Calculate Mean Volume Backscattering Strength (MVBS).
The calculation uses class attributes MVBS_ping_size and MVBS_range_bin_size to
calculate and save MVBS as a new attribute to the calling EchoData instance.
MVBS is an xarray DataArray with dimensions ``ping_time`` and ``range_bin``
that are from the first elements of each tile along the corresponding dimensions
in the original Sv or Sv_clean DataArray.
Parameters
----------
source_postfix : str
postfix of the Sv file used to calculate MVBS, default to '_Sv'
source_path : str
path of Sv file used to calculate MVBS, can be one of the following:
- None (default):
use Sv in RAWFILENAME_Sv.nc in the same folder as the raw data file,
or when RAWFILENAME_Sv.nc doesn't exist, perform self.calibrate() and use the resulted self.Sv
- path to a directory: RAWFILENAME_Sv.nc in the specified directory
- path to a specific file: the specified file, e.g., ./another_directory/some_other_filename.nc
MVBS_range_bin_size : float, optional
meters per tile for calculating MVBS [m]
MVBS_ping_size : int, optional
number of pings per tile for calculating MVBS
save : bool, optional
whether to save the calculated MVBS into a new .nc file, default to ``False``
save_postfix : str
Filename postfix, default to '_MVBS'
save_path : str
Full filename to save to, overwriting the RAWFILENAME_MVBS.nc default
"""
# Check params
if (MVBS_range_bin_size is not None) and (self.MVBS_range_bin_size != MVBS_range_bin_size):
self.MVBS_range_bin_size = MVBS_range_bin_size
if (MVBS_ping_size is not None) and (self.MVBS_ping_size != MVBS_ping_size):
self.MVBS_ping_size = MVBS_ping_size
# Get Sv by validating path and calibrate if not already done
if self.Sv is not None:
print('%s use Sv stored in memory to calculate MVBS' % dt.datetime.now().strftime('%H:%M:%S'))
print_src = False
else:
print_src = True
proc_data = self._get_proc_Sv(source_path=source_path, source_postfix=source_postfix)
if print_src:
if self.Sv_path is not None:
print('%s Sv source used to calculate MVBS: %s' %
(dt.datetime.now().strftime('%H:%M:%S'), self.Sv_path))
else:
print('%s Sv source used to calculate MVBS: memory' %
dt.datetime.now().strftime('%H:%M:%S'))
# Get tile indexing parameters
self.MVBS_range_bin_size, range_bin_tile_bin_edge, ping_tile_bin_edge = \
self.get_tile_params(r_data_sz=proc_data.range_bin.size,
p_data_sz=proc_data.ping_time.size,
r_tile_sz=self.MVBS_range_bin_size,
p_tile_sz=self.MVBS_ping_size,
sample_thickness=self.sample_thickness)
# Calculate MVBS
Sv_linear = 10 ** (proc_data.Sv / 10) # convert to linear domain before averaging
# check if number of range_bin per tile the same for all freq channels
if np.unique([np.array(x).size for x in range_bin_tile_bin_edge]).size == 1:
MVBS = 10 * np.log10(Sv_linear.coarsen(
ping_time=self.MVBS_ping_size,
range_bin=int(np.unique(self.MVBS_range_bin_size / self.sample_thickness)),
boundary='pad').mean())
MVBS.coords['range_bin'] = ('range_bin', np.arange(MVBS['range_bin'].size))
else:
range_bin_coarsen_idx = (self.MVBS_range_bin_size / self.sample_thickness).astype(int)
tmp_MVBS = []
for r_bin in range_bin_coarsen_idx:
freq = r_bin.frequency.values
tmp_da = 10 * np.log10(Sv_linear.sel(frequency=freq).coarsen(
ping_time=self.MVBS_ping_size,
range_bin=r_bin.values,
boundary='pad').mean())
tmp_da.coords['range_bin'] = ('range_bin', np.arange(tmp_da['range_bin'].size))
tmp_da.name = 'MVBS'
tmp_MVBS.append(tmp_da)
# Construct a dataArray TODO: this can probably be done smarter using xarray native functions
MVBS_val = np.array([zz.values for zz in xr.align(*tmp_MVBS, join='outer')])
MVBS = xr.DataArray(MVBS_val,
coords={'frequency': Sv_linear['frequency'].values,
'ping_time': tmp_MVBS[0]['ping_time'].values,
'range_bin': np.arange(MVBS_val.shape[2])},
dims=['frequency', 'ping_time', 'range_bin']).dropna(dim='range_bin', how='all')
# Set MVBS attributes
MVBS.name = 'MVBS'
MVBS = MVBS.to_dataset()
MVBS['MVBS_range_bin_size'] = ('frequency', self.MVBS_range_bin_size)
MVBS.attrs['MVBS_ping_size'] = self.MVBS_ping_size
# Save results in object and as a netCDF file
self.MVBS = MVBS
if save:
self.MVBS_path = self.validate_path(save_path=save_path, save_postfix=save_postfix)
print('%s saving MVBS to %s' % (dt.datetime.now().strftime('%H:%M:%S'), self.MVBS_path))
MVBS.to_netcdf(self.MVBS_path)
# Close opened resources
proc_data.close()
|
[
"numpy.array",
"xarray.align",
"numpy.mod",
"numpy.arange",
"os.path.exists",
"xarray.merge",
"os.path.split",
"os.mkdir",
"numpy.round",
"numpy.ceil",
"os.path.splitext",
"os.path.dirname",
"xarray.open_dataset",
"numpy.unique",
"os.path.join",
"os.getcwd",
"datetime.datetime.now",
"os.path.basename",
"xarray.DataArray"
] |
[((2910, 2929), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (2926, 2929), False, 'import os\n'), ((2947, 2967), 'os.path.splitext', 'os.path.splitext', (['pp'], {}), '(pp)\n', (2963, 2967), False, 'import os\n'), ((8945, 8977), 'os.path.join', 'os.path.join', (['save_dir', 'file_out'], {}), '(save_dir, file_out)\n', (8957, 8977), False, 'import os\n'), ((16839, 16865), 'xarray.merge', 'xr.merge', (['[proc_data, ABS]'], {}), '([proc_data, ABS])\n', (16847, 16865), True, 'import xarray as xr\n'), ((16879, 16898), 'xarray.merge', 'xr.merge', (['[pp, TVG]'], {}), '([pp, TVG])\n', (16887, 16898), True, 'import xarray as xr\n'), ((7771, 7803), 'os.path.basename', 'os.path.basename', (['self.file_path'], {}), '(self.file_path)\n', (7787, 7803), False, 'import os\n'), ((7838, 7863), 'os.path.splitext', 'os.path.splitext', (['file_in'], {}), '(file_in)\n', (7854, 7863), False, 'import os\n'), ((7973, 8004), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (7988, 8004), False, 'import os\n'), ((8801, 8812), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8810, 8812), False, 'import os\n'), ((8872, 8896), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (8886, 8896), False, 'import os\n'), ((8910, 8928), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (8918, 8928), False, 'import os\n'), ((10609, 10637), 'numpy.mod', 'np.mod', (['p_data_sz', 'p_tile_sz'], {}), '(p_data_sz, p_tile_sz)\n', (10615, 10637), True, 'import numpy as np\n'), ((11689, 11712), 'os.path.exists', 'os.path.exists', (['Sv_path'], {}), '(Sv_path)\n', (11703, 11712), False, 'import os\n'), ((16733, 16774), 'numpy.arange', 'np.arange', (["proc_data.Sv['ping_time'].size"], {}), "(proc_data.Sv['ping_time'].size)\n", (16742, 16774), True, 'import numpy as np\n'), ((17741, 17966), 'xarray.DataArray', 'xr.DataArray', (['clean_val'], {'coords': "{'frequency': proc_data['frequency'].values, 'ping_time': tmp_clean[0][\n 'ping_time'].values, 'range_bin': tmp_clean[0]['range_bin'].values}", 'dims': "['frequency', 'ping_time', 'range_bin']"}), "(clean_val, coords={'frequency': proc_data['frequency'].values,\n 'ping_time': tmp_clean[0]['ping_time'].values, 'range_bin': tmp_clean[0\n ]['range_bin'].values}, dims=['frequency', 'ping_time', 'range_bin'])\n", (17753, 17966), True, 'import xarray as xr\n'), ((23685, 23849), 'xarray.DataArray', 'xr.DataArray', (['noise_val'], {'coords': "{'frequency': proc_data['frequency'].values, 'ping_time': tmp_noise[0][\n 'ping_time'].values}", 'dims': "['frequency', 'ping_time']"}), "(noise_val, coords={'frequency': proc_data['frequency'].values,\n 'ping_time': tmp_noise[0]['ping_time'].values}, dims=['frequency',\n 'ping_time'])\n", (23697, 23849), True, 'import xarray as xr\n'), ((3192, 3223), 'xarray.open_dataset', 'xr.open_dataset', (['self.file_path'], {}), '(self.file_path)\n', (3207, 3223), True, 'import xarray as xr\n'), ((8082, 8109), 'os.path.splitext', 'os.path.splitext', (['save_path'], {}), '(save_path)\n', (8098, 8109), False, 'import os\n'), ((8253, 8277), 'os.path.split', 'os.path.split', (['save_path'], {}), '(save_path)\n', (8266, 8277), False, 'import os\n'), ((10335, 10373), 'numpy.round', 'np.round', (['(r_tile_sz / sample_thickness)'], {}), '(r_tile_sz / sample_thickness)\n', (10343, 10373), True, 'import numpy as np\n'), ((10550, 10585), 'numpy.ceil', 'np.ceil', (['(r_data_sz / num_r_per_tile)'], {}), '(r_data_sz / num_r_per_tile)\n', (10557, 10585), True, 'import numpy as np\n'), ((11099, 11127), 'numpy.arange', 'np.arange', (['(num_tile_ping + 1)'], {}), '(num_tile_ping + 1)\n', (11108, 11127), True, 'import numpy as np\n'), ((11754, 11778), 'xarray.open_dataset', 'xr.open_dataset', (['Sv_path'], {}), '(Sv_path)\n', (11769, 11778), True, 'import xarray as xr\n'), ((28184, 28217), 'numpy.arange', 'np.arange', (["MVBS['range_bin'].size"], {}), "(MVBS['range_bin'].size)\n", (28193, 28217), True, 'import numpy as np\n'), ((3352, 3383), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (3367, 3383), False, 'import os\n'), ((3537, 3568), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (3552, 3568), False, 'import os\n'), ((3728, 3759), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (3743, 3759), False, 'import os\n'), ((3909, 3940), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (3924, 3940), False, 'import os\n'), ((8394, 8425), 'os.path.dirname', 'os.path.dirname', (['self.file_path'], {}), '(self.file_path)\n', (8409, 8425), False, 'import os\n'), ((10761, 10791), 'numpy.ceil', 'np.ceil', (['(p_data_sz / p_tile_sz)'], {}), '(p_data_sz / p_tile_sz)\n', (10768, 10791), True, 'import numpy as np\n'), ((10981, 11004), 'numpy.arange', 'np.arange', (['(x.values + 1)'], {}), '(x.values + 1)\n', (10990, 11004), True, 'import numpy as np\n'), ((28740, 28775), 'numpy.arange', 'np.arange', (["tmp_da['range_bin'].size"], {}), "(tmp_da['range_bin'].size)\n", (28749, 28775), True, 'import numpy as np\n'), ((10672, 10702), 'numpy.ceil', 'np.ceil', (['(p_data_sz / p_tile_sz)'], {}), '(p_data_sz / p_tile_sz)\n', (10679, 10702), True, 'import numpy as np\n'), ((17681, 17715), 'xarray.align', 'xr.align', (['*tmp_clean'], {'join': '"""outer"""'}), "(*tmp_clean, join='outer')\n", (17689, 17715), True, 'import xarray as xr\n'), ((23624, 23658), 'xarray.align', 'xr.align', (['*tmp_noise'], {'join': '"""outer"""'}), "(*tmp_noise, join='outer')\n", (23632, 23658), True, 'import xarray as xr\n'), ((29015, 29048), 'xarray.align', 'xr.align', (['*tmp_MVBS'], {'join': '"""outer"""'}), "(*tmp_MVBS, join='outer')\n", (29023, 29048), True, 'import xarray as xr\n'), ((14963, 14980), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (14978, 14980), True, 'import datetime as dt\n'), ((17000, 17011), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (17008, 17011), True, 'import numpy as np\n'), ((22541, 22552), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (22549, 22552), True, 'import numpy as np\n'), ((26586, 26603), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (26601, 26603), True, 'import datetime as dt\n'), ((27837, 27848), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (27845, 27848), True, 'import numpy as np\n'), ((3442, 3474), 'os.path.basename', 'os.path.basename', (['self.file_path'], {}), '(self.file_path)\n', (3458, 3474), False, 'import os\n'), ((3633, 3665), 'os.path.basename', 'os.path.basename', (['self.file_path'], {}), '(self.file_path)\n', (3649, 3665), False, 'import os\n'), ((3818, 3850), 'os.path.basename', 'os.path.basename', (['self.file_path'], {}), '(self.file_path)\n', (3834, 3850), False, 'import os\n'), ((4001, 4033), 'os.path.basename', 'os.path.basename', (['self.file_path'], {}), '(self.file_path)\n', (4017, 4033), False, 'import os\n'), ((15274, 15291), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (15289, 15291), True, 'import datetime as dt\n'), ((19234, 19251), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (19249, 19251), True, 'import datetime as dt\n'), ((27114, 27131), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (27129, 27131), True, 'import datetime as dt\n'), ((29316, 29344), 'numpy.arange', 'np.arange', (['MVBS_val.shape[2]'], {}), '(MVBS_val.shape[2])\n', (29325, 29344), True, 'import numpy as np\n'), ((29926, 29943), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (29941, 29943), True, 'import datetime as dt\n'), ((12203, 12220), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (12218, 12220), True, 'import datetime as dt\n'), ((26948, 26965), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (26963, 26965), True, 'import datetime as dt\n'), ((12066, 12083), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (12081, 12083), True, 'import datetime as dt\n'), ((28029, 28088), 'numpy.unique', 'np.unique', (['(self.MVBS_range_bin_size / self.sample_thickness)'], {}), '(self.MVBS_range_bin_size / self.sample_thickness)\n', (28038, 28088), True, 'import numpy as np\n'), ((22756, 22820), 'numpy.unique', 'np.unique', (['(self.noise_est_range_bin_size / self.sample_thickness)'], {}), '(self.noise_est_range_bin_size / self.sample_thickness)\n', (22765, 22820), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
import time
class CaptureManager(object):
def __init__(self, capture, preview_window_manager=None, should_mirror_preview = False):
self.preview_window_manager = preview_window_manager
self.should_mirror_preview = should_mirror_preview
self._capture = capture
self._channel = 0
self._entered_frame = False
self._frame = None
self._frames_elapsed = long(0)
self._fps_est = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self):
return self._channel
@property
def frame(self):
if self._entered_frame and self._frame is None:
_, self._frame = self._capture.retrieve(channel=self.channel)
return self._frame
def enter_frame(self):
# capture the next frame
assert not self._entered_frame, 'previous enter_frame() had no matching exit_frame()'
if self._capture is not None:
self._entered_frame = self._capture.grab()
def exit_frame(self):
# draw to window, write to files, release the frame
# frame is retrievable or not
if self.frame is None:
self._entered_frame = False
return
if self._frames_elapsed == 0:
self._start_time = time.time()
else:
time_elapsed = time.time() - self._start_time
self._fps_est = self._frames_elapsed / time_elapsed
self._frames_elapsed += 1
# draw
if self.preview_window_manager is not None:
if self.should_mirror_preview:
mirrored_frame = np.fliplr(self._frame).copy()
self.preview_window_manager.show(mirrored_frame)
else:
self.preview_window_manager.show(self._frame)
# release the frame
self._frame = None
self._entered_frame = False
class WindowManager(object):
def __init__(self, window_name, keypress_callback = None):
self.keypress_callback = keypress_callback
self._window_name = window_name
self._is_window_created = False
@property
def is_window_created(self):
return self._is_window_created
def create_window(self):
cv2.namedWindow(self._window_name)
self._is_window_created = True
def show(self, frame):
cv2.imshow(self._window_name, frame)
def destroy_window(self):
cv2.destroyWindow(self._window_name)
self._is_window_created = False
def process_events(self):
keykode = cv2.waitKey(1)
if self.keypress_callback is not None and keykode != -1:
keykode &= 0xFF
self.keypress_callback(keykode)
|
[
"cv2.destroyWindow",
"numpy.fliplr",
"cv2.imshow",
"cv2.waitKey",
"time.time",
"cv2.namedWindow"
] |
[((2291, 2325), 'cv2.namedWindow', 'cv2.namedWindow', (['self._window_name'], {}), '(self._window_name)\n', (2306, 2325), False, 'import cv2\n'), ((2401, 2437), 'cv2.imshow', 'cv2.imshow', (['self._window_name', 'frame'], {}), '(self._window_name, frame)\n', (2411, 2437), False, 'import cv2\n'), ((2477, 2513), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self._window_name'], {}), '(self._window_name)\n', (2494, 2513), False, 'import cv2\n'), ((2603, 2617), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2614, 2617), False, 'import cv2\n'), ((1346, 1357), 'time.time', 'time.time', ([], {}), '()\n', (1355, 1357), False, 'import time\n'), ((1399, 1410), 'time.time', 'time.time', ([], {}), '()\n', (1408, 1410), False, 'import time\n'), ((1672, 1694), 'numpy.fliplr', 'np.fliplr', (['self._frame'], {}), '(self._frame)\n', (1681, 1694), True, 'import numpy as np\n')]
|
"""
Unit tests for SNIa truth catalog code.
"""
import os
import unittest
import sqlite3
import numpy as np
import pandas as pd
from desc.sims_truthcatalog import SNeTruthWriter, SNSynthPhotFactory
class SNSynthPhotFactoryTestCase(unittest.TestCase):
"""
Test case class for SNIa synthetic photometry factory class.
"""
def test_SNSythPhotFactory(self):
"""
Test some flux calculations using the underlying SNObject
and SyntheticPhotometry classes.
"""
sp_factory = SNSynthPhotFactory(z=0.6322702169418335,
t0=61719.9950436545,
x0=4.2832710977804034e-06,
x1=-1.207738485943195,
c=-0.0069750402968899936,
snra=55.26407314527358,
sndec=-40.81575605788344)
mjds = (61689.150791, 61697.354470, 61712.258685)
bands = ('z', 'i', 'r')
fluxes = (2.6401569864737633, 71.18561504923377, 1048.0327802379868)
for mjd, band, flux in zip(mjds, bands, fluxes):
sp = sp_factory.create(mjd)
self.assertAlmostEqual(sp.calcFlux(band), flux)
class SNeTruthWriterTestCase(unittest.TestCase):
"""
Test case class for SNIa truth catalog generation class.
"""
def setUp(self):
self.outfile = 'test_sne_truth_cat.db'
self.data_dir = os.path.join(os.environ['SIMS_TRUTHCATALOG_DIR'],
'data')
sn_db_file = os.path.join(self.data_dir,
'sne_cosmoDC2_v1.1.4_MS_DDF_small.db')
self.sne_truth_writer = SNeTruthWriter(self.outfile, sn_db_file)
def tearDown(self):
if os.path.isfile(self.outfile):
os.remove(self.outfile)
def test_truth_summary(self):
"""Test that the truth_summary columns are filled out as expected."""
self.sne_truth_writer.write()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from truth_summary', conn)
zeros = np.zeros(len(df))
ones = np.ones(len(df))
np.testing.assert_equal(df['is_variable'], ones)
np.testing.assert_equal(df['is_pointsource'], ones)
for band in 'ugrizy':
flux_col = f'flux_{band}'
np.testing.assert_equal(df[flux_col], zeros)
flux_col += '_noMW'
np.testing.assert_equal(df[flux_col], zeros)
def test_auxiliary_truth(self):
"""
Test that the columns from the sne_params table are transcribed
correctly.
"""
self.sne_truth_writer.write_auxiliary_truth()
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_auxiliary_info', conn)
np.testing.assert_equal(self.sne_truth_writer.sne_df['snid_in'],
df['id'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['galaxy_id'],
df['host_galaxy'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['snra_in'],
df['ra'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['t0_in'],
df['t0'].to_numpy())
np.testing.assert_equal(self.sne_truth_writer.sne_df['z_in'],
df['redshift'].to_numpy())
def test_variability_truth(self):
"""
Test some expected values for a SNIa in the test SNe catalog
using a small opsim db table.
"""
opsim_db_file = os.path.join(self.data_dir,
'minion_1016_desc_dithered_v4_small.db')
self.sne_truth_writer.write_variability_truth(opsim_db_file,
max_rows=60)
with sqlite3.connect(self.outfile) as conn:
df = pd.read_sql('select * from sn_variability_truth', conn)
my_object = 'MS_10195_1375'
self.assertIn(my_object, df['id'].to_list())
my_df = df.query(f'id == "{my_object}"')
for visit in (1425850, 1433860, 1495410):
self.assertIn(visit, my_df['obsHistID'].to_list())
if __name__ == '__main__':
unittest.main()
|
[
"numpy.testing.assert_equal",
"sqlite3.connect",
"desc.sims_truthcatalog.SNSynthPhotFactory",
"os.path.join",
"os.path.isfile",
"unittest.main",
"pandas.read_sql",
"desc.sims_truthcatalog.SNeTruthWriter",
"os.remove"
] |
[((4369, 4384), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4382, 4384), False, 'import unittest\n'), ((524, 720), 'desc.sims_truthcatalog.SNSynthPhotFactory', 'SNSynthPhotFactory', ([], {'z': '(0.6322702169418335)', 't0': '(61719.9950436545)', 'x0': '(4.2832710977804034e-06)', 'x1': '(-1.207738485943195)', 'c': '(-0.0069750402968899936)', 'snra': '(55.26407314527358)', 'sndec': '(-40.81575605788344)'}), '(z=0.6322702169418335, t0=61719.9950436545, x0=\n 4.2832710977804034e-06, x1=-1.207738485943195, c=-0.0069750402968899936,\n snra=55.26407314527358, sndec=-40.81575605788344)\n', (542, 720), False, 'from desc.sims_truthcatalog import SNeTruthWriter, SNSynthPhotFactory\n'), ((1496, 1553), 'os.path.join', 'os.path.join', (["os.environ['SIMS_TRUTHCATALOG_DIR']", '"""data"""'], {}), "(os.environ['SIMS_TRUTHCATALOG_DIR'], 'data')\n", (1508, 1553), False, 'import os\n'), ((1612, 1678), 'os.path.join', 'os.path.join', (['self.data_dir', '"""sne_cosmoDC2_v1.1.4_MS_DDF_small.db"""'], {}), "(self.data_dir, 'sne_cosmoDC2_v1.1.4_MS_DDF_small.db')\n", (1624, 1678), False, 'import os\n'), ((1745, 1785), 'desc.sims_truthcatalog.SNeTruthWriter', 'SNeTruthWriter', (['self.outfile', 'sn_db_file'], {}), '(self.outfile, sn_db_file)\n', (1759, 1785), False, 'from desc.sims_truthcatalog import SNeTruthWriter, SNSynthPhotFactory\n'), ((1822, 1850), 'os.path.isfile', 'os.path.isfile', (['self.outfile'], {}), '(self.outfile)\n', (1836, 1850), False, 'import os\n'), ((2231, 2279), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["df['is_variable']", 'ones'], {}), "(df['is_variable'], ones)\n", (2254, 2279), True, 'import numpy as np\n'), ((2288, 2339), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (["df['is_pointsource']", 'ones'], {}), "(df['is_pointsource'], ones)\n", (2311, 2339), True, 'import numpy as np\n'), ((3718, 3786), 'os.path.join', 'os.path.join', (['self.data_dir', '"""minion_1016_desc_dithered_v4_small.db"""'], {}), "(self.data_dir, 'minion_1016_desc_dithered_v4_small.db')\n", (3730, 3786), False, 'import os\n'), ((1864, 1887), 'os.remove', 'os.remove', (['self.outfile'], {}), '(self.outfile)\n', (1873, 1887), False, 'import os\n'), ((2052, 2081), 'sqlite3.connect', 'sqlite3.connect', (['self.outfile'], {}), '(self.outfile)\n', (2067, 2081), False, 'import sqlite3\n'), ((2108, 2156), 'pandas.read_sql', 'pd.read_sql', (['"""select * from truth_summary"""', 'conn'], {}), "('select * from truth_summary', conn)\n", (2119, 2156), True, 'import pandas as pd\n'), ((2420, 2464), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['df[flux_col]', 'zeros'], {}), '(df[flux_col], zeros)\n', (2443, 2464), True, 'import numpy as np\n'), ((2509, 2553), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['df[flux_col]', 'zeros'], {}), '(df[flux_col], zeros)\n', (2532, 2553), True, 'import numpy as np\n'), ((2773, 2802), 'sqlite3.connect', 'sqlite3.connect', (['self.outfile'], {}), '(self.outfile)\n', (2788, 2802), False, 'import sqlite3\n'), ((2829, 2881), 'pandas.read_sql', 'pd.read_sql', (['"""select * from sn_auxiliary_info"""', 'conn'], {}), "('select * from sn_auxiliary_info', conn)\n", (2840, 2881), True, 'import pandas as pd\n'), ((3973, 4002), 'sqlite3.connect', 'sqlite3.connect', (['self.outfile'], {}), '(self.outfile)\n', (3988, 4002), False, 'import sqlite3\n'), ((4029, 4084), 'pandas.read_sql', 'pd.read_sql', (['"""select * from sn_variability_truth"""', 'conn'], {}), "('select * from sn_variability_truth', conn)\n", (4040, 4084), True, 'import pandas as pd\n')]
|
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import os
import argparse
import logging
import numpy as np
from PIL import Image
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torchvision import transforms
import cv2
import tqdm
from net.pspnet import PSPNet
models = {
'squeezenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='squeezenet'),
'densenet': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend='densenet'),
'resnet18': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet18'),
'resnet34': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend='resnet34'),
'resnet50': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet50'),
'resnet101': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet101'),
'resnet152': lambda: PSPNet(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend='resnet152')
}
parser = argparse.ArgumentParser(description="Pyramid Scene Parsing Network")
parser.add_argument('--models-path', type=str, default='./checkpoints',
help='Path for storing model snapshots')
parser.add_argument('--backend', type=str,
default='densenet', help='Feature extractor')
parser.add_argument('--num-classes', type=int,
default=20, help="Number of classes.")
args = parser.parse_args()
def build_network(snapshot, backend):
epoch = 0
backend = backend.lower()
net = models[backend]()
net = nn.DataParallel(net)
if snapshot is not None:
_, epoch = os.path.basename(snapshot).split('_')
if not epoch == 'last':
epoch = int(epoch)
net.load_state_dict(torch.load(
snapshot, map_location=torch.device('cpu')))
logging.info(
"Snapshot for epoch {} loaded from {}".format(epoch, snapshot))
if torch.cuda.is_available():
net = net.cuda()
return net, epoch
def get_transform():
transform_image_list = [
# transforms.Resize((192, 256), 3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
return transforms.Compose(transform_image_list)
def show_image(img, pred):
fig, axes = plt.subplots(1, 2)
ax0, ax1 = axes
ax0.get_xaxis().set_ticks([])
ax0.get_yaxis().set_ticks([])
ax1.get_xaxis().set_ticks([])
ax1.get_yaxis().set_ticks([])
classes = np.array(('Background', # always index 0
'Hat', 'Hair', 'Glove', 'Sunglasses',
'UpperClothes', 'Dress', 'Coat', 'Socks',
'Pants', 'Jumpsuits', 'Scarf', 'Skirt',
'Face', 'Left-arm', 'Right-arm', 'Left-leg',
'Right-leg', 'Left-shoe', 'Right-shoe',))
colormap = [(0, 0, 0),
(1, 0.25, 0), (0, 0.25, 0), (0.5, 0, 0.25), (1, 1, 1),
(1, 0.75, 0), (0, 0, 0.5), (0.5, 0.25, 0), (0.75, 0, 0.25),
(1, 0, 0.25), (0, 0.5, 0), (0.5, 0.5, 0), (0.25, 0, 0.5),
(1, 0, 0.75), (0, 0.5, 0.5), (0.25, 0.5, 0.5), (1, 0, 0),
(1, 0.25, 0), (0, 0.75, 0), (0.5, 0.75, 0), ]
cmap = matplotlib.colors.ListedColormap(colormap)
bounds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
h, w, _ = pred.shape
def denormalize(img, mean, std):
c, _, _ = img.shape
for idx in range(c):
img[idx, :, :] = img[idx, :, :] * std[idx] + mean[idx]
return img
img = denormalize(img.cpu().numpy(), [0.485, 0.456, 0.406], [
0.229, 0.224, 0.225])
img = img.transpose(1, 2, 0).reshape((h, w, 3))
pred = pred.reshape((h, w))
# show image
ax0.set_title('img')
ax0.imshow(img)
ax1.set_title('pred')
mappable = ax1.imshow(pred, cmap=cmap, norm=norm)
# colorbar legend
cbar = plt.colorbar(mappable, ax=axes, shrink=0.7, )
cbar.ax.get_yaxis().set_ticks([])
for j, lab in enumerate(classes):
cbar.ax.text(2.3, (j + 0.45) / 20.0, lab, ha='left', va='center', )
plt.savefig(fname="./result.jpg")
print('result saved to ./result.jpg')
plt.show()
def main():
# --------------- model --------------- #
snapshot = os.path.join(args.models_path, args.backend, 'PSPNet_last')
net, starting_epoch = build_network(snapshot, args.backend)
net.eval()
# ------------ load image ------------ #
data_transform = get_transform()
imgfolder = 'ACGPN/ACGPN_testdata/test_img/'
savefolder = 'ACGPN/ACGPN_testdata/test_humanparse/'
if not os.path.exists(savefolder):
os.mkdir(savefolder)
imglist = os.listdir(imgfolder)
for imgname in tqdm.tqdm(imglist):
imgpath = os.path.join(imgfolder, imgname)
print(imgpath)
img = Image.open(imgpath)
img = data_transform(img)
if torch.cuda.is_available():
img = img.cuda()
with torch.no_grad():
pred, _ = net(img.unsqueeze(dim=0))
pred = pred.squeeze(dim=0)
pred = pred.cpu().numpy().transpose(1, 2, 0)
pred = np.asarray(np.argmax(pred, axis=2),
dtype=np.uint8).reshape((256, 192, 1))
pred_3 = np.repeat(pred, 3, axis = 2)
savepath = os.path.join(savefolder, imgname)
cv2.imwrite(savepath, pred_3)
if __name__ == '__main__':
main()
|
[
"numpy.array",
"torch.cuda.is_available",
"os.path.exists",
"os.listdir",
"numpy.repeat",
"argparse.ArgumentParser",
"matplotlib.colors.ListedColormap",
"os.mkdir",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.savefig",
"numpy.argmax",
"torchvision.transforms.Normalize",
"torchvision.transforms.Compose",
"matplotlib.pyplot.show",
"torch.device",
"cv2.imwrite",
"PIL.Image.open",
"matplotlib.pyplot.colorbar",
"os.path.join",
"torch.nn.DataParallel",
"tqdm.tqdm",
"os.path.basename",
"net.pspnet.PSPNet",
"torch.no_grad",
"matplotlib.colors.BoundaryNorm",
"matplotlib.pyplot.subplots"
] |
[((1113, 1181), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pyramid Scene Parsing Network"""'}), "(description='Pyramid Scene Parsing Network')\n", (1136, 1181), False, 'import argparse\n'), ((1679, 1699), 'torch.nn.DataParallel', 'nn.DataParallel', (['net'], {}), '(net)\n', (1694, 1699), True, 'import torch.nn as nn\n'), ((2051, 2076), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2074, 2076), False, 'import torch\n'), ((2345, 2385), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_image_list'], {}), '(transform_image_list)\n', (2363, 2385), False, 'from torchvision import transforms\n'), ((2431, 2449), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (2443, 2449), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2862), 'numpy.array', 'np.array', (["('Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'UpperClothes',\n 'Dress', 'Coat', 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt',\n 'Face', 'Left-arm', 'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe',\n 'Right-shoe')"], {}), "(('Background', 'Hat', 'Hair', 'Glove', 'Sunglasses',\n 'UpperClothes', 'Dress', 'Coat', 'Socks', 'Pants', 'Jumpsuits', 'Scarf',\n 'Skirt', 'Face', 'Left-arm', 'Right-arm', 'Left-leg', 'Right-leg',\n 'Left-shoe', 'Right-shoe'))\n", (2629, 2862), True, 'import numpy as np\n'), ((3385, 3427), 'matplotlib.colors.ListedColormap', 'matplotlib.colors.ListedColormap', (['colormap'], {}), '(colormap)\n', (3417, 3427), False, 'import matplotlib\n'), ((3541, 3587), 'matplotlib.colors.BoundaryNorm', 'matplotlib.colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (3571, 3587), False, 'import matplotlib\n'), ((4166, 4209), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['mappable'], {'ax': 'axes', 'shrink': '(0.7)'}), '(mappable, ax=axes, shrink=0.7)\n', (4178, 4209), True, 'import matplotlib.pyplot as plt\n'), ((4369, 4402), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': '"""./result.jpg"""'}), "(fname='./result.jpg')\n", (4380, 4402), True, 'import matplotlib.pyplot as plt\n'), ((4449, 4459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4457, 4459), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4594), 'os.path.join', 'os.path.join', (['args.models_path', 'args.backend', '"""PSPNet_last"""'], {}), "(args.models_path, args.backend, 'PSPNet_last')\n", (4547, 4594), False, 'import os\n'), ((4945, 4966), 'os.listdir', 'os.listdir', (['imgfolder'], {}), '(imgfolder)\n', (4955, 4966), False, 'import os\n'), ((4986, 5004), 'tqdm.tqdm', 'tqdm.tqdm', (['imglist'], {}), '(imglist)\n', (4995, 5004), False, 'import tqdm\n'), ((343, 434), 'net.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(512)', 'deep_features_size': '(256)', 'backend': '"""squeezenet"""'}), "(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=\n 'squeezenet')\n", (349, 434), False, 'from net.pspnet import PSPNet\n'), ((455, 545), 'net.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(1024)', 'deep_features_size': '(512)', 'backend': '"""densenet"""'}), "(sizes=(1, 2, 3, 6), psp_size=1024, deep_features_size=512, backend=\n 'densenet')\n", (461, 545), False, 'from net.pspnet import PSPNet\n'), ((566, 655), 'net.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(512)', 'deep_features_size': '(256)', 'backend': '"""resnet18"""'}), "(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=\n 'resnet18')\n", (572, 655), False, 'from net.pspnet import PSPNet\n'), ((676, 765), 'net.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(512)', 'deep_features_size': '(256)', 'backend': '"""resnet34"""'}), "(sizes=(1, 2, 3, 6), psp_size=512, deep_features_size=256, backend=\n 'resnet34')\n", (682, 765), False, 'from net.pspnet import PSPNet\n'), ((786, 877), 'net.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(2048)', 'deep_features_size': '(1024)', 'backend': '"""resnet50"""'}), "(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet50')\n", (792, 877), False, 'from net.pspnet import PSPNet\n'), ((899, 991), 'net.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(2048)', 'deep_features_size': '(1024)', 'backend': '"""resnet101"""'}), "(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet101')\n", (905, 991), False, 'from net.pspnet import PSPNet\n'), ((1013, 1105), 'net.pspnet.PSPNet', 'PSPNet', ([], {'sizes': '(1, 2, 3, 6)', 'psp_size': '(2048)', 'deep_features_size': '(1024)', 'backend': '"""resnet152"""'}), "(sizes=(1, 2, 3, 6), psp_size=2048, deep_features_size=1024, backend=\n 'resnet152')\n", (1019, 1105), False, 'from net.pspnet import PSPNet\n'), ((2229, 2250), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2248, 2250), False, 'from torchvision import transforms\n'), ((2260, 2326), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2280, 2326), False, 'from torchvision import transforms\n'), ((4874, 4900), 'os.path.exists', 'os.path.exists', (['savefolder'], {}), '(savefolder)\n', (4888, 4900), False, 'import os\n'), ((4910, 4930), 'os.mkdir', 'os.mkdir', (['savefolder'], {}), '(savefolder)\n', (4918, 4930), False, 'import os\n'), ((5024, 5056), 'os.path.join', 'os.path.join', (['imgfolder', 'imgname'], {}), '(imgfolder, imgname)\n', (5036, 5056), False, 'import os\n'), ((5094, 5113), 'PIL.Image.open', 'Image.open', (['imgpath'], {}), '(imgpath)\n', (5104, 5113), False, 'from PIL import Image\n'), ((5159, 5184), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5182, 5184), False, 'import torch\n'), ((5229, 5244), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5242, 5244), False, 'import torch\n'), ((5546, 5572), 'numpy.repeat', 'np.repeat', (['pred', '(3)'], {'axis': '(2)'}), '(pred, 3, axis=2)\n', (5555, 5572), True, 'import numpy as np\n'), ((5599, 5632), 'os.path.join', 'os.path.join', (['savefolder', 'imgname'], {}), '(savefolder, imgname)\n', (5611, 5632), False, 'import os\n'), ((5645, 5674), 'cv2.imwrite', 'cv2.imwrite', (['savepath', 'pred_3'], {}), '(savepath, pred_3)\n', (5656, 5674), False, 'import cv2\n'), ((1748, 1774), 'os.path.basename', 'os.path.basename', (['snapshot'], {}), '(snapshot)\n', (1764, 1774), False, 'import os\n'), ((1924, 1943), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1936, 1943), False, 'import torch\n'), ((5420, 5443), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(2)'}), '(pred, axis=2)\n', (5429, 5443), True, 'import numpy as np\n')]
|
import cv2
import os
import numpy as np
# This module contains all common functions that are called in tester.py file
# Given an image below function returns rectangle for face detected alongwith gray scale image
def faceDetection(test_img):
gray_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY) # convert color image to grayscale
face_haar_cascade = cv2.CascadeClassifier('HaarCascade/haarcascade_frontalface_default.xml') # Load haar classifier
faces = face_haar_cascade.detectMultiScale(gray_img, scaleFactor=1.32,
minNeighbors=5) # detectMultiScale returns rectangles
return faces, gray_img
# Given a directory below function returns part of gray_img which is face alongwith its label/ID
def labels_for_training_data(directory):
faces = []
faceID = []
for path, subdirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.startswith("."):
print("Skipping system file") # Skipping files that startwith .
continue
id = os.path.basename(path) # fetching subdirectory names
img_path = os.path.join(path, filename) # fetching image path
print("img_path:", img_path)
print("id:", id)
test_img = cv2.imread(img_path) # loading each image one by one
if test_img is None:
print("Image not loaded properly")
continue
faces_rect, gray_img = faceDetection(
test_img) # Calling faceDetection function to return faces detected in particular image
if len(faces_rect) != 1:
continue # Since we are assuming only single person images are being fed to classifier
(x, y, w, h) = faces_rect[0]
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from grayscale image
faces.append(roi_gray)
faceID.append(int(id))
return faces, faceID
# Below function trains haar classifier and takes faces,faceID returned by previous function as its arguments
def train_classifier(faces, faceID):
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.train(faces, np.array(faceID))
return face_recognizer
# Below function draws bounding boxes around detected face in image
def draw_rect(test_img, face):
(x, y, w, h) = face
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=5)
# Below function writes name of person for detected label
def put_text(test_img, text, x, y):
cv2.putText(test_img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 4)
|
[
"cv2.rectangle",
"os.path.join",
"cv2.face.LBPHFaceRecognizer_create",
"cv2.putText",
"numpy.array",
"os.path.basename",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.imread",
"os.walk"
] |
[((261, 303), 'cv2.cvtColor', 'cv2.cvtColor', (['test_img', 'cv2.COLOR_BGR2GRAY'], {}), '(test_img, cv2.COLOR_BGR2GRAY)\n', (273, 303), False, 'import cv2\n'), ((364, 436), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""HaarCascade/haarcascade_frontalface_default.xml"""'], {}), "('HaarCascade/haarcascade_frontalface_default.xml')\n", (385, 436), False, 'import cv2\n'), ((878, 896), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (885, 896), False, 'import os\n'), ((2203, 2239), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (2237, 2239), False, 'import cv2\n'), ((2447, 2520), 'cv2.rectangle', 'cv2.rectangle', (['test_img', '(x, y)', '(x + w, y + h)', '(255, 0, 0)'], {'thickness': '(5)'}), '(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=5)\n', (2460, 2520), False, 'import cv2\n'), ((2621, 2700), 'cv2.putText', 'cv2.putText', (['test_img', 'text', '(x, y)', 'cv2.FONT_HERSHEY_DUPLEX', '(2)', '(255, 0, 0)', '(4)'], {}), '(test_img, text, (x, y), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 4)\n', (2632, 2700), False, 'import cv2\n'), ((2273, 2289), 'numpy.array', 'np.array', (['faceID'], {}), '(faceID)\n', (2281, 2289), True, 'import numpy as np\n'), ((1098, 1120), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1114, 1120), False, 'import os\n'), ((1175, 1203), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1187, 1203), False, 'import os\n'), ((1320, 1340), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1330, 1340), False, 'import cv2\n')]
|
import dask
import numpy as np
import pandas as pd
from epimargin.models import Age_SIRVD
from epimargin.utils import annually, normalize, percent, years
from studies.vaccine_allocation.commons import *
from tqdm import tqdm
import warnings
warnings.filterwarnings("error")
num_sims = 1000
simulation_range = 1 * years
phi_points = [_ * percent * annually for _ in (25, 50, 100, 200)]
simulation_initial_conditions = pd.read_csv(data/f"all_india_coalesced_scaling_Apr15.csv")\
.drop(columns = ["Unnamed: 0"])\
.set_index(["state", "district"])
rerun_states = ["Telangana", "Uttarakhand", "Jharkhand", "Arunachal Pradesh", "Nagaland", "Sikkim"] + coalesce_states
districts_to_run = simulation_initial_conditions
num_age_bins = 7
seed = 0
MORTALITY = [6, 5, 4, 3, 2, 1, 0]
CONTACT = [1, 2, 3, 4, 0, 5, 6]
CONSUMPTION = [4, 5, 6, 3, 2, 1, 0]
def save_metrics(tag, policy, dst = tev_src):
np.savez_compressed(dst/f"{tag}.npz",
dT = policy.dT_total,
dD = policy.dD_total,
pi = policy.pi,
q0 = policy.q0,
q1 = policy.q1,
Dj = policy.D
)
def prioritize(num_doses, S, prioritization):
Sp = S[:, prioritization]
dV = np.where(Sp.cumsum(axis = 1) <= num_doses, Sp, 0)
dV[np.arange(len(dV)), (Sp.cumsum(axis = 1) > dV.cumsum(axis = 1)).argmax(axis = 1)] = num_doses - dV.sum(axis = 1)
return dV[:, sorted(range(len(prioritization)), key = prioritization.__getitem__)].clip(0, S)
def process(district_data):
(
(state, district), state_code,
sero_0, N_0, sero_1, N_1, sero_2, N_2, sero_3, N_3, sero_4, N_4, sero_5, N_5, sero_6, N_6, N_tot,
Rt, Rt_upper, Rt_lower, S0, I0, R0, D0, dT0, dD0, V0, T_ratio, R_ratio
) = district_data
try:
S0 = int(S0)
except ValueError as e:
print (state, district, e)
return
Sj0 = np.array([(1 - sj) * Nj for (sj, Nj) in zip([sero_0, sero_1, sero_2, sero_3, sero_4, sero_5, sero_6], [N_0, N_1, N_2, N_3, N_4, N_5, N_6])])
# distribute historical doses assuming mortality prioritization
Sj0 = prioritize(V0, Sj0.copy()[None, :], MORTALITY)[0]
def get_model(seed = 0):
model = Age_SIRVD(
name = state_code + "_" + district,
population = N_tot - D0,
dT0 = (np.ones(num_sims) * dT0).astype(int),
Rt0 = 0 if S0 == 0 else Rt * N_tot / S0,
S0 = np.tile( Sj0, num_sims).reshape((num_sims, -1)),
I0 = np.tile((fI * I0).T, num_sims).reshape((num_sims, -1)),
R0 = np.tile((fR * R0).T, num_sims).reshape((num_sims, -1)),
D0 = np.tile((fD * D0).T, num_sims).reshape((num_sims, -1)),
mortality = np.array(list(OD_IFRs.values())),
infectious_period = infectious_period,
random_seed = seed,
)
model.dD_total[0] = np.ones(num_sims) * dD0
model.dT_total[0] = np.ones(num_sims) * dT0
return model
for phi in phi_points:
num_doses = phi * (S0 + I0 + R0)
sim_tag = f"{state_code}_{district}_phi{int(phi * 365 * 100)}_"
random_model, mortality_model, contact_model, no_vax_model = [get_model(seed) for _ in range(4)]
for t in range(simulation_range):
if t <= 1/phi:
dV_random = num_doses * normalize(random_model.N[-1], axis = 1).clip(0)
dV_mortality = prioritize(num_doses, mortality_model.N[-1], MORTALITY ).clip(0)
dV_contact = prioritize(num_doses, contact_model.N[-1], CONTACT ).clip(0)
else:
dV_random, dV_mortality, dV_contact = np.zeros((num_sims, 7)), np.zeros((num_sims, 7)), np.zeros((num_sims, 7))
random_model .parallel_forward_epi_step(dV_random, num_sims = num_sims)
mortality_model.parallel_forward_epi_step(dV_mortality, num_sims = num_sims)
contact_model .parallel_forward_epi_step(dV_contact, num_sims = num_sims)
no_vax_model .parallel_forward_epi_step(dV = np.zeros((7, num_sims))[:, 0], num_sims = num_sims)
if phi == phi_points[0]:
save_metrics(sim_tag + "novax", no_vax_model )
save_metrics(sim_tag + "random", random_model )
save_metrics(sim_tag + "mortality", mortality_model)
save_metrics(sim_tag + "contact", contact_model )
if __name__ == "__main__":
distribute = False
if distribute:
with dask.config.set({"scheduler.allowed-failures": 1}):
client = dask.distributed.Client(n_workers = 1, threads_per_worker = 1)
print(client.dashboard_link)
with dask.distributed.get_task_stream(client) as ts:
futures = []
for district in districts_to_run.itertuples():
futures.append(client.submit(process, district, key = ":".join(district[0])))
dask.distributed.progress(futures)
else:
failures = []
for t in tqdm(districts_to_run.itertuples(), total = len(districts_to_run)):
process(t)
# try:
# process(t)
# except Exception as e:
# failures.append((e, t))
for failure in failures:
print(failure)
|
[
"dask.config.set",
"dask.distributed.progress",
"numpy.tile",
"numpy.ones",
"pandas.read_csv",
"dask.distributed.get_task_stream",
"epimargin.utils.normalize",
"dask.distributed.Client",
"numpy.zeros",
"numpy.savez_compressed",
"warnings.filterwarnings"
] |
[((242, 274), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (265, 274), False, 'import warnings\n'), ((937, 1076), 'numpy.savez_compressed', 'np.savez_compressed', (["(dst / f'{tag}.npz')"], {'dT': 'policy.dT_total', 'dD': 'policy.dD_total', 'pi': 'policy.pi', 'q0': 'policy.q0', 'q1': 'policy.q1', 'Dj': 'policy.D'}), "(dst / f'{tag}.npz', dT=policy.dT_total, dD=policy.\n dD_total, pi=policy.pi, q0=policy.q0, q1=policy.q1, Dj=policy.D)\n", (956, 1076), True, 'import numpy as np\n'), ((2950, 2967), 'numpy.ones', 'np.ones', (['num_sims'], {}), '(num_sims)\n', (2957, 2967), True, 'import numpy as np\n'), ((3002, 3019), 'numpy.ones', 'np.ones', (['num_sims'], {}), '(num_sims)\n', (3009, 3019), True, 'import numpy as np\n'), ((4548, 4598), 'dask.config.set', 'dask.config.set', (["{'scheduler.allowed-failures': 1}"], {}), "({'scheduler.allowed-failures': 1})\n", (4563, 4598), False, 'import dask\n'), ((4621, 4679), 'dask.distributed.Client', 'dask.distributed.Client', ([], {'n_workers': '(1)', 'threads_per_worker': '(1)'}), '(n_workers=1, threads_per_worker=1)\n', (4644, 4679), False, 'import dask\n'), ((4992, 5026), 'dask.distributed.progress', 'dask.distributed.progress', (['futures'], {}), '(futures)\n', (5017, 5026), False, 'import dask\n'), ((433, 493), 'pandas.read_csv', 'pd.read_csv', (["(data / f'all_india_coalesced_scaling_Apr15.csv')"], {}), "(data / f'all_india_coalesced_scaling_Apr15.csv')\n", (444, 493), True, 'import pandas as pd\n'), ((4742, 4782), 'dask.distributed.get_task_stream', 'dask.distributed.get_task_stream', (['client'], {}), '(client)\n', (4774, 4782), False, 'import dask\n'), ((3722, 3745), 'numpy.zeros', 'np.zeros', (['(num_sims, 7)'], {}), '((num_sims, 7))\n', (3730, 3745), True, 'import numpy as np\n'), ((3747, 3770), 'numpy.zeros', 'np.zeros', (['(num_sims, 7)'], {}), '((num_sims, 7))\n', (3755, 3770), True, 'import numpy as np\n'), ((3772, 3795), 'numpy.zeros', 'np.zeros', (['(num_sims, 7)'], {}), '((num_sims, 7))\n', (3780, 3795), True, 'import numpy as np\n'), ((2467, 2489), 'numpy.tile', 'np.tile', (['Sj0', 'num_sims'], {}), '(Sj0, num_sims)\n', (2474, 2489), True, 'import numpy as np\n'), ((2549, 2579), 'numpy.tile', 'np.tile', (['(fI * I0).T', 'num_sims'], {}), '((fI * I0).T, num_sims)\n', (2556, 2579), True, 'import numpy as np\n'), ((2631, 2661), 'numpy.tile', 'np.tile', (['(fR * R0).T', 'num_sims'], {}), '((fR * R0).T, num_sims)\n', (2638, 2661), True, 'import numpy as np\n'), ((2713, 2743), 'numpy.tile', 'np.tile', (['(fD * D0).T', 'num_sims'], {}), '((fD * D0).T, num_sims)\n', (2720, 2743), True, 'import numpy as np\n'), ((4135, 4158), 'numpy.zeros', 'np.zeros', (['(7, num_sims)'], {}), '((7, num_sims))\n', (4143, 4158), True, 'import numpy as np\n'), ((2341, 2358), 'numpy.ones', 'np.ones', (['num_sims'], {}), '(num_sims)\n', (2348, 2358), True, 'import numpy as np\n'), ((3405, 3442), 'epimargin.utils.normalize', 'normalize', (['random_model.N[-1]'], {'axis': '(1)'}), '(random_model.N[-1], axis=1)\n', (3414, 3442), False, 'from epimargin.utils import annually, normalize, percent, years\n')]
|
'''Every agent has an agent state, which is its local view of the world'''
import numpy as np
import itertools
class AgentState:
def __init__(self, name, agt, seed=1234):
self.name = name
self.prng = np.random.RandomState(seed)
# contains the variable assignment (exploreD) for this agent and its neighbors
self.variables_assignments = {var.name: var.value for var in agt.variables}
self.this_agt = agt
## Data structures to explore assignment local to an agent
self.my_vars = [var.name for var in agt.variables]
# the iterator to all possible assignment for this agent
self.assignment_it = 0
# All possible assignments for the variables of this agent
domains = [var.domain for var in agt.variables]
self.agt_assignments_list = list(itertools.product(*domains))
def addNeighborsVariables(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def recvNeighborsValues(self, neighbor):
for var in neighbor.variables:
self.variables_assignments[var.name] = var.value
def copyAgtAssignmentToState(self):
for var in self.this_agt.variables:
self.variables_assignments[var.name] = var.value
def nextAssignment(self):
'''
If a next assignment for the agent local variables exists, then assign it
:var self.variables_assignments and return True. Otherwise return False.
'''
if self.assignment_it < len(self.agt_assignments_list):
self.setAssignmentIt(self.assignment_it)
self.assignment_it += 1
return True
else:
# Reset iterator
self.assignment_it = 0
return False
def setAssignmentIt(self, it):
for i, var_name in enumerate(self.my_vars):
self.variables_assignments[var_name] = self.agt_assignments_list[it][i]
|
[
"itertools.product",
"numpy.random.RandomState"
] |
[((221, 248), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (242, 248), True, 'import numpy as np\n'), ((837, 864), 'itertools.product', 'itertools.product', (['*domains'], {}), '(*domains)\n', (854, 864), False, 'import itertools\n')]
|
from typing import List, Union
import numpy as np
import pandas_datareader as pdr
import pandas as pd
import matplotlib.pyplot as plt
def rsi(symbol :str ,name :str, date :str) -> None :
"""
Calculates and visualises the Relative Stock Index on a Stock of the company.
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
name(str) : Name of the company
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
rsi('GOOG','Google','2020,01,01')
"""
ticker : str = pdr.get_data_yahoo(symbol, date)
delta : List[float] = ticker['Close'].diff()
up : int = delta.clip(lower=0)
down : int = -1*delta.clip(upper=0)
ema_up : Union[bool,float]= up.ewm(com=13, adjust=False).mean()
ema_down : Union[bool,float] = down.ewm(com=13, adjust=False).mean()
rs : float = ema_up/ema_down
ticker['RSI'] = 100 - (100/(1 + rs))
ticker : list = ticker.iloc[14:]
print(ticker)
fig, (ax1, ax2) = plt.subplots(2)
ax1.get_xaxis().set_visible(False)
fig.suptitle(name)
ticker['Close'].plot(ax=ax1)
ax1.set_ylabel('Price ($)')
ticker['RSI'].plot(ax=ax2)
ax2.set_ylim(0,100)
ax2.axhline(30, color='r', linestyle='--')
ax2.axhline(70, color='r', linestyle='--')
ax2.set_ylabel('RSI')
plt.show()
def volatility(symbol :str, date :str) ->None:
"""
Measures and visualizes the Volatility of a Stock by calculating the Average True Range(ATR)
Parameters:
symbol(str) : Symbol of the company from https://in.finance.yahoo.com/
date(str) : start date of historical data in the format (YYYY,M,D)
Returns:
Return type: void
Example:
volatility('GOOG','2020,01,01')
"""
data : str = pdr.get_data_yahoo(symbol,date)
data.head()
high_low : Union[int,float]= data['High'] - data['Low']
high_cp : List[float] = np.abs(data['High'] - data['Close'].shift())
low_cp : List[float]= np.abs(data['Low'] - data['Close'].shift())
df : List[str] = pd.concat([high_low, high_cp, low_cp], axis=1)
true_range : float= np.max(df, axis=1)
average_true_range : float= true_range.rolling(14).mean()
average_true_range
true_range.rolling(14).sum()/14
fig, ax = plt.subplots()
average_true_range.plot(ax=ax)
ax2 : Union[bool,float]= data['Close'].plot(ax=ax, secondary_y=True, alpha=.3)
ax.set_ylabel("ATR")
ax2.set_ylabel("Price")
plt.show()
|
[
"pandas_datareader.get_data_yahoo",
"numpy.max",
"pandas.concat",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((620, 652), 'pandas_datareader.get_data_yahoo', 'pdr.get_data_yahoo', (['symbol', 'date'], {}), '(symbol, date)\n', (638, 652), True, 'import pandas_datareader as pdr\n'), ((1072, 1087), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1084, 1087), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1404), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1402, 1404), True, 'import matplotlib.pyplot as plt\n'), ((1865, 1897), 'pandas_datareader.get_data_yahoo', 'pdr.get_data_yahoo', (['symbol', 'date'], {}), '(symbol, date)\n', (1883, 1897), True, 'import pandas_datareader as pdr\n'), ((2137, 2183), 'pandas.concat', 'pd.concat', (['[high_low, high_cp, low_cp]'], {'axis': '(1)'}), '([high_low, high_cp, low_cp], axis=1)\n', (2146, 2183), True, 'import pandas as pd\n'), ((2208, 2226), 'numpy.max', 'np.max', (['df'], {'axis': '(1)'}), '(df, axis=1)\n', (2214, 2226), True, 'import numpy as np\n'), ((2367, 2381), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2379, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2557, 2567), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2565, 2567), True, 'import matplotlib.pyplot as plt\n')]
|
"""
These classes are a collection of the needed tools to read external data.
The External type objects created by these classes are initialized before
the Stateful objects by functions.Model.initialize.
"""
import re
import os
import warnings
import pandas as pd # TODO move to openpyxl
import numpy as np
import xarray as xr
from openpyxl import load_workbook
from . import utils
class Excels():
"""
Class to save the read Excel files and thus avoid double reading
"""
_Excels, _Excels_opyxl = {}, {}
@classmethod
def read(cls, file_name, sheet_name):
"""
Read the Excel file or return the previously read one
"""
if file_name + sheet_name in cls._Excels:
return cls._Excels[file_name + sheet_name]
else:
excel = np.array([
pd.to_numeric(ex, errors='coerce')
for ex in
pd.read_excel(file_name, sheet_name, header=None).values
])
cls._Excels[file_name + sheet_name] = excel
return excel
@classmethod
def read_opyxl(cls, file_name):
"""
Read the Excel file using OpenPyXL or return the previously read one
"""
if file_name in cls._Excels_opyxl:
return cls._Excels_opyxl[file_name]
else:
excel = load_workbook(file_name, read_only=True, data_only=True)
cls._Excels_opyxl[file_name] = excel
return excel
@classmethod
def clean(cls):
"""
Clean the dictionary of read files
"""
cls._Excels, cls._Excels_opyxl = {}, {}
class External(object):
"""
Main class of external objects
Attributes
----------
py_name: str
The python name of the object
missing: str ("warning", "error", "ignore", "keep")
What to do with missing values. If "warning" (default)
shows a warning message and interpolates the values.
If "raise" raises an error. If "ignore" interpolates
the values without showing anything. If "keep" it will keep
the missing values, this option may cause the integration to
fail, but it may be used to check the quality of the data.
file: str
File name from which the data is read.
sheet: str
Sheet name from which the data is read.
"""
missing = "warning"
def __init__(self, py_name):
self.py_name = py_name
self.file = None
self.sheet = None
def __str__(self):
return self.py_name
def _get_data_from_file(self, rows, cols):
"""
Function to read data from excel file using rows and columns
Parameters
----------
rows: list of len 2
first row and last row+1 to be read, starting from 0
cols: list of len 2
first col and last col+1 to be read, starting from 0
Returns
-------
data: pandas.DataFrame, pandas.Series or float
depending on the shape of the requested data
"""
# TODO move to openpyxl to avoid pandas dependency in this file.
ext = os.path.splitext(self.file)[1].lower()
if ext in ['.xls', '.xlsx']:
# read data
data = Excels.read(
self.file,
self.sheet)[rows[0]:rows[1], cols[0]:cols[1]].copy()
shape = data.shape
# if it is a single row remove its dimension
if shape[1] == 1:
data = data[:, 0]
if shape[0] == 1:
data = data[0]
return data
raise NotImplementedError(self.py_name + "\n"
+ "The files with extension "
+ ext + " are not implemented")
def _get_data_from_file_opyxl(self, cellname):
"""
Function to read data from excel file using cell range name
Parameters
----------
cellname: str
the cell range name
Returns
-------
data: numpy.ndarray or float
depending on the shape of the requested data
"""
# read data
excel = Excels.read_opyxl(self.file)
try:
# Get the local id of the sheet
# needed for searching in locals names
# need to lower the sheetnames as Vensim has no case sensitivity
sheetId = [sheetname_wb.lower() for sheetname_wb
in excel.sheetnames].index(self.sheet.lower())
except ValueError:
# Error if it is not able to get the localSheetId
raise ValueError(self.py_name + "\n"
+ "The sheet doesn't exist...\n"
+ self._file_sheet)
try:
# Search for local and global names
cellrange = excel.defined_names.get(cellname, sheetId)\
or excel.defined_names.get(cellname)
coordinates = cellrange.destinations
for sheet, cells in coordinates:
if sheet.lower() == self.sheet.lower():
values = excel[sheet][cells]
try:
return np.array(
[[i.value if not isinstance(i.value, str)
else np.nan for i in j] for j in values],
dtype=float)
except TypeError:
return float(values.value)
raise AttributeError
except (KeyError, AttributeError):
# key error if the cellrange doesn't exist in the file or sheet
raise AttributeError(
self.py_name + "\n"
+ "The cell range name:\t {}\n".format(cellname)
+ "Doesn't exist in:\n" + self._file_sheet
)
def _get_series_data(self, series_across, series_row_or_col, cell, size):
"""
Function thar reads series and data from excel file for
DATA and LOOKUPS.
Parameters
----------
series_across: "row", "column" or "name"
The way to read series file.
series_row_or_col: int or str
If series_across is "row" the row number where the series data is.
If series_across is "column" the column name where
the series data is.
If series_across is "name" the cell range name where
the series data is.
cell:
If series_across is not "name, the top left cell where
the data table starts.
Else the name of the cell range where the data is.
size:
The size of the 2nd dimension of the data.
Returns
-------
series, data: ndarray (1D), ndarray(1D/2D)
The values of the series and data.
"""
if series_across == "row":
# Horizontal data (dimension values in a row)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series = self._get_data_from_file(
rows=[int(series_row_or_col)-1, int(series_row_or_col)],
cols=[first_col, None])
# read data
data = self._get_data_from_file(
rows=[first_row, first_row + size],
cols=[first_col, None]).transpose()
elif series_across == "column":
# Vertical data (dimension values in a column)
# get the dimension values
first_row, first_col = self._split_excel_cell(cell)
series_col = self._col_to_num(series_row_or_col)
series = self._get_data_from_file(
rows=[first_row, None],
cols=[series_col, series_col+1])
# read data
data = self._get_data_from_file(
rows=[first_row, None],
cols=[first_col, first_col + size])
else:
# get series data
series = self._get_data_from_file_opyxl(series_row_or_col)
if isinstance(series, float):
series = np.array([[series]])
series_shape = series.shape
if series_shape[0] == 1:
# horizontal definition of lookup/time dimension
series = series[0]
transpose = True
elif series_shape[1] == 1:
# vertical definition of lookup/time dimension
series = series[:, 0]
transpose = False
else:
# Error if the lookup/time dimension is 2D
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\tDimentime_missingsion name:"
+ "\t{}\n".format(series_row_or_col)
+ " is a table and not a vector"
)
# get data
data = self._get_data_from_file_opyxl(cell)
if isinstance(data, float):
data = np.array([[data]])
if transpose:
# transpose for horizontal definition of dimension
data = data.transpose()
if data.shape[0] != len(series):
raise ValueError(
self.py_name + "\n"
+ "Dimension and data given in:\n"
+ self._file_sheet
+ "\tDimension name:\t{}\n".format(series_row_or_col)
+ "\tData name:\t{}\n".format(cell)
+ " don't have the same length in the 1st dimension"
)
if data.shape[1] != size:
# Given coordinates length is different than
# the lentgh of 2nd dimension
raise ValueError(
self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same size as the given coordinates"
)
if data.shape[1] == 1:
# remove second dimension of data if its shape is (N, 1)
data = data[:, 0]
return series, data
def _resolve_file(self, root=None, possible_ext=None):
possible_ext = possible_ext or\
['', '.xls', '.xlsx', '.odt', '.txt', '.tab']
if self.file[0] == '?':
self.file = os.path.join(root, self.file[1:])
if not os.path.isfile(self.file):
for ext in possible_ext:
if os.path.isfile(self.file + ext):
self.file = self.file + ext
return
# raise FileNotFoundError(self.file)
# python2 compatibility
raise IOError("File Not Found: " + self.file)
else:
return
def _initialize_data(self, element_type):
"""
Initialize one element of DATA or LOOKUPS
Parameters
----------
element_type: str
"lookup" for LOOKUPS, "data" for data.
Returns
-------
data: xarray.DataArray
Dataarray with the time or interpolation dimension
as first dimension.
"""
self._resolve_file(root=self.root)
series_across = self._series_selector(self.x_row_or_col, self.cell)
size = utils.compute_shape(self.coords, reshape_len=1,
py_name=self.py_name)[0]
series, data = self._get_series_data(
series_across=series_across,
series_row_or_col=self.x_row_or_col,
cell=self.cell, size=size
)
# remove nan or missing values from dimension
if series_across != "name":
# Remove last nans only if the method is to read by row or col
i = 0
try:
while np.isnan(series[i-1]):
i -= 1
except IndexError:
# series has len 0
raise ValueError(
self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " has length 0"
)
if i != 0:
series = series[:i]
data = data[:i]
# warning/error if missing data in the series
if any(np.isnan(series)) and self.missing != "keep":
valid_values = ~np.isnan(series)
series = series[valid_values]
data = data[valid_values]
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " the corresponding data value(s) to the "
+ "missing/non-valid value(s) will be ignored\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Dimension value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
)
# Check if the lookup/time dimension is strictly monotonous
if np.any(np.diff(series) <= 0) and self.missing != "keep":
raise ValueError(self.py_name + "\n"
+ "Dimension given in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(series_across, self.x_row_or_col)
+ " is not strictly monotonous")
# Check for missing values in data
if np.any(np.isnan(data)) and self.missing != "keep":
if series_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
# Fill missing values with the chosen interpolation method
# what Vensim does during running for DATA
warnings.warn(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
+ " the corresponding value will be filled "
+ "with the interpolation method of the object.\n\n"
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Data value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# fill values
self._fill_missing(series, data)
reshape_dims = tuple([len(series)] + utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
if element_type == "lookup":
dim_name = "lookup_dim"
else:
dim_name = "time"
data = xr.DataArray(
data=data,
coords={dim_name: series, **self.coords},
dims=[dim_name] + list(self.coords)
)
return data
def _fill_missing(self, series, data):
"""
Fills missing values in excel read data. Mutates the values in data.
Parameters
----------
series:
the time series without missing values
data:
the data with missing values
Returns
-------
None
"""
# if data is 2dims we need to interpolate
datanan = np.isnan(data)
if len(data.shape) == 1:
data[datanan] = self._interpolate_missing(
series[datanan],
series[~datanan],
data[~datanan])
else:
for i, nanlist in enumerate(list(datanan.transpose())):
data[nanlist, i] = self._interpolate_missing(
series[nanlist],
series[~nanlist],
data[~nanlist][:, i])
def _interpolate_missing(self, x, xr, yr):
"""
Interpolates a list of missing values from _fill_missing
Parameters
----------
x:
list of missing values interpolate
xr:
non-missing x values
yr:
non-missing y values
Returns
-------
y:
Result after interpolating x with self.interp method
"""
y = np.empty_like(x, dtype=float)
for i, value in enumerate(x):
if self.interp == "raw":
y[i] = np.nan
elif value >= xr[-1]:
y[i] = yr[-1]
elif value <= xr[0]:
y[i] = yr[0]
elif self.interp == 'look forward':
y[i] = yr[xr >= value][0]
elif self.interp == 'hold backward':
y[i] = yr[xr <= value][-1]
else:
y[i] = np.interp(value, xr, yr)
return y
@property
def _file_sheet(self):
"""
Returns file and sheet name in a string
"""
return "\tFile name:\t{}\n".format(self.file)\
+ "\tSheet name:\t{}\n".format(self.sheet)
@staticmethod
def _col_to_num(col):
"""
Transforms the column name to int
Parameters
----------
col: str
Column name
Returns
-------
int
Column number
"""
if len(col) == 1:
return ord(col.upper()) - ord('A')
elif len(col) == 2:
left = ord(col[0].upper()) - ord('A') + 1
right = ord(col[1].upper()) - ord('A')
return left * (ord('Z')-ord('A')+1) + right
else:
left = ord(col[0].upper()) - ord('A') + 1
center = ord(col[1].upper()) - ord('A') + 1
right = ord(col[2].upper()) - ord('A')
return left * ((ord('Z')-ord('A')+1)**2)\
+ center * (ord('Z')-ord('A')+1)\
+ right
def _split_excel_cell(self, cell):
"""
Splits a cell value given in a string.
Returns None for non-valid cell formats.
Parameters
----------
cell: str
Cell like string, such as "A1", "b16", "AC19"...
If it is not a cell like string will return None.
Returns
-------
row number, column number: int, int
If the cell input is valid. Both numbers are given in Python
enumeration, i.e., first row and first column are 0.
"""
split = re.findall(r'\d+|\D+', cell)
try:
# check that we only have two values [column, row]
assert len(split) == 2
# check that the column name has no special characters
assert not re.compile('[^a-zA-Z]+').search(split[0])
# check that row number is not 0
assert int(split[1]) != 0
# the column name has as maximum 3 letters
assert len(split[0]) <= 3
return int(split[1])-1, self._col_to_num(split[0])
except AssertionError:
return
@staticmethod
def _reshape(data, dims):
"""
Reshapes an pandas.DataFrame, pandas.Series, xarray.DataArray
or np.ndarray in the given dimensions.
Parameters
----------
data: xarray.DataArray/numpy.ndarray
Data to be reshaped
dims: tuple
The dimensions to reshape.
Returns
-------
numpy.ndarray
reshaped array
"""
try:
data = data.values
except AttributeError:
pass
return data.reshape(dims)
def _series_selector(self, x_row_or_col, cell):
"""
Selects if a series data (DATA/LOOKUPS), should be read by columns,
rows or cellrange name.
Based on the input format of x_row_or_col and cell.
The format of the 2 variables must be consistent.
Parameters
----------
x_row_or_col: str
String of a number if series is given in a row, letter if series is
given in a column or name if the series is given by cellrange name.
cell: str
Cell identificator, such as "A1", or name if the data is given
by cellrange name.
Returns
-------
series_across: str
"row" if series is given in a row
"column" if series is given in a column
"name" if series and data are given by range name
"""
try:
# if x_row_or_col is numeric the series must be a row
int(x_row_or_col)
return "row"
except ValueError:
if self._split_excel_cell(cell):
# if the cell can be splitted means that the format is
# "A1" like then the series must be a column
return "column"
else:
return "name"
class ExtData(External):
"""
Class for Vensim GET XLS DATA/GET DIRECT DATA
"""
def __init__(self, file_name, sheet, time_row_or_col, cell,
interp, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.time_row_or_cols = [time_row_or_col]
self.cells = [cell]
self.coordss = [coords]
self.root = root
self.interp = interp
# check if the interpolation method is valid
if not interp:
self.interp = "interpolate"
if self.interp not in ["interpolate", "raw",
"look forward", "hold backward"]:
raise ValueError(self.py_name + "\n"
+ " The interpolation method (interp) must be "
+ "'raw', 'interpolate', "
+ "'look forward' or 'hold backward")
def add(self, file_name, sheet, time_row_or_col, cell,
interp, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.time_row_or_cols.append(time_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if not interp:
interp = "interpolate"
if interp != self.interp:
raise ValueError(self.py_name + "\n"
+ "Error matching interpolation method with "
+ "previously defined one")
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.time_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("data"))
self.data = utils.xrmerge(data)
def __call__(self, time):
if time in self.data['time'].values:
outdata = self.data.sel(time=time)
elif self.interp == "raw":
return np.nan
elif time > self.data['time'].values[-1]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the time")
outdata = self.data[-1]
elif time < self.data['time'].values[0]:
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the time")
outdata = self.data[0]
elif self.interp == "interpolate":
outdata = self.data.interp(time=time)
elif self.interp == 'look forward':
outdata = self.data.sel(time=time, method="backfill")
elif self.interp == 'hold backward':
outdata = self.data.sel(time=time, method="pad")
if self.coordss[0]:
# Remove time coord from the DataArray
return outdata.reset_coords('time', drop=True)
else:
# if data has no-coords return a float
return float(outdata)
class ExtLookup(External):
"""
Class for Vensim GET XLS LOOKUPS/GET DIRECT LOOKUPS
"""
def __init__(self, file_name, sheet, x_row_or_col, cell,
coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.x_row_or_cols = [x_row_or_col]
self.cells = [cell]
self.root = root
self.coordss = [coords]
self.interp = "interpolate"
def add(self, file_name, sheet, x_row_or_col, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.x_row_or_cols.append(x_row_or_col)
self.cells.append(cell)
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.x_row_or_cols,
self.cells, self.coordss)
for (self.file, self.sheet, self.x_row_or_col,
self.cell, self.coords) in zipped:
data.append(self._initialize_data("lookup"))
self.data = utils.xrmerge(data)
def __call__(self, x):
return self._call(self.data, x)
def _call(self, data, x):
if isinstance(x, xr.DataArray):
if not x.dims:
# shape 0 xarrays
return self._call(data, float(x))
if np.all(x > data['lookup_dim'].values[-1]):
outdata, _ = xr.broadcast(data[-1], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif np.all(x < data['lookup_dim'].values[0]):
outdata, _ = xr.broadcast(data[0], x)
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
data, _ = xr.broadcast(data, x)
outdata = data[0].copy()
for a in utils.xrsplit(x):
outdata.loc[a.coords] = self._call(data.loc[a.coords],
float(a))
# the output will be always an xarray
return outdata.reset_coords('lookup_dim', drop=True)
else:
if x in data['lookup_dim'].values:
outdata = data.sel(lookup_dim=x)
elif x > data['lookup_dim'].values[-1]:
outdata = data[-1]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data above the maximum value of the series")
elif x < data['lookup_dim'].values[0]:
outdata = data[0]
warnings.warn(
self.py_name + "\n"
+ "extrapolating data below the minimum value of the series")
else:
outdata = data.interp(lookup_dim=x)
# the output could be a float or an xarray
if self.coordss[0]:
# Remove lookup dimension coord from the DataArray
return outdata.reset_coords('lookup_dim', drop=True)
else:
# if lookup has no-coords return a float
return float(outdata)
class ExtConstant(External):
"""
Class for Vensim GET XLS CONSTANTS/GET DIRECT CONSTANTS
"""
def __init__(self, file_name, sheet, cell, coords, root, py_name):
super().__init__(py_name)
self.files = [file_name]
self.sheets = [sheet]
self.transposes = [cell[-1] == '*']
self.cells = [cell.strip('*')]
self.root = root
self.coordss = [coords]
def add(self, file_name, sheet, cell, coords):
"""
Add information to retrieve new dimension in an already declared object
"""
self.files.append(file_name)
self.sheets.append(sheet)
self.transposes.append(cell[-1] == '*')
self.cells.append(cell.strip('*'))
self.coordss.append(coords)
if list(coords) != list(self.coordss[0]):
raise ValueError(self.py_name + "\n"
+ "Error matching dimensions with previous data")
def initialize(self):
"""
Initialize all elements and create the self.data xarray.DataArray
"""
data = []
zipped = zip(self.files, self.sheets, self.transposes,
self.cells, self.coordss)
for (self.file, self.sheet, self.transpose,
self.cell, self.coords) in zipped:
data.append(self._initialize())
self.data = utils.xrmerge(data)
def _initialize(self):
"""
Initialize one element
"""
self._resolve_file(root=self.root)
split = self._split_excel_cell(self.cell)
if split:
data_across = "cell"
cell = split
else:
data_across = "name"
cell = self.cell
shape = utils.compute_shape(self.coords, reshape_len=2,
py_name=self.py_name)
if self.transpose:
shape.reverse()
data = self._get_constant_data(data_across, cell, shape)
if self.transpose:
data = data.transpose()
if np.any(np.isnan(data)):
# nan values in data
if data_across == "name":
cell_type = "Cellrange"
else:
cell_type = "Reference cell"
if self.missing == "warning":
warnings.warn(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
elif self.missing == "raise":
raise ValueError(
self.py_name + "\n"
+ "Constant value missing or non-valid in:\n"
+ self._file_sheet
+ "\t{}:\t{}\n".format(cell_type, self.cell)
)
# Create only an xarray if the data is not 0 dimensional
if len(self.coords) > 0:
reshape_dims = tuple(utils.compute_shape(self.coords))
if len(reshape_dims) > 1:
data = self._reshape(data, reshape_dims)
data = xr.DataArray(
data=data, coords=self.coords, dims=list(self.coords)
)
return data
def _get_constant_data(self, data_across, cell, shape):
"""
Function thar reads data from excel file for CONSTANT
Parameters
----------
data_across: "cell" or "name"
The way to read data file.
cell: int or str
If data_across is "cell" the lefttop split cell value where
the data is.
If data_across is "name" the cell range name where the data is.
shape:
The shape of the data in 2D.
Returns
-------
data: float/ndarray(1D/2D)
The values of the data.
"""
if data_across == "cell":
# read data from topleft cell name using pandas
start_row, start_col = cell
return self._get_data_from_file(
rows=[start_row, start_row + shape[0]],
cols=[start_col, start_col + shape[1]])
else:
# read data from cell range name using OpenPyXL
data = self._get_data_from_file_opyxl(cell)
try:
# Remove length=1 axis
data_shape = data.shape
if data_shape[1] == 1:
data = data[:, 0]
if data_shape[0] == 1:
data = data[0]
except AttributeError:
# Data is a float, nothing to do
pass
# Check data dims
try:
if shape[0] == 1 and shape[1] != 1:
assert shape[1] == len(data)
elif shape[0] != 1 and shape[1] == 1:
assert shape[0] == len(data)
elif shape[0] == 1 and shape[1] == 1:
assert isinstance(data, float)
else:
assert tuple(shape) == data.shape
except AssertionError:
raise ValueError(self.py_name + "\n"
+ "Data given in:\n"
+ self._file_sheet
+ "\tData name:\t{}\n".format(cell)
+ " has not the same shape as the"
+ " given coordinates")
return data
def __call__(self):
return self.data
class ExtSubscript(External):
"""
Class for Vensim GET XLS SUBSCRIPT/GET DIRECT SUBSCRIPT
"""
def __init__(self, file_name, sheet, firstcell, lastcell, prefix, root):
super().__init__("Hardcoded external subscript")
self.file = file_name
self.sheet = sheet
self._resolve_file(root=root)
row_first, col_first = self._split_excel_cell(firstcell)
row_last, col_last = self._split_excel_cell(lastcell)
data = pd.read_excel(
self.file, sheet,
skiprows=row_first-1,
nrows=row_last-row_first+1,
usecols=np.arange(col_first, col_last+1)
)
self.subscript = [prefix + str(d) for d in data.values.flatten()]
|
[
"numpy.all",
"re.compile",
"openpyxl.load_workbook",
"xarray.broadcast",
"os.path.join",
"os.path.splitext",
"numpy.diff",
"os.path.isfile",
"numpy.array",
"numpy.empty_like",
"numpy.isnan",
"pandas.to_numeric",
"pandas.read_excel",
"warnings.warn",
"numpy.interp",
"re.findall",
"numpy.arange"
] |
[((16001, 16015), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (16009, 16015), True, 'import numpy as np\n'), ((16902, 16931), 'numpy.empty_like', 'np.empty_like', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (16915, 16931), True, 'import numpy as np\n'), ((19042, 19071), 're.findall', 're.findall', (['"""\\\\d+|\\\\D+"""', 'cell'], {}), "('\\\\d+|\\\\D+', cell)\n", (19052, 19071), False, 'import re\n'), ((1349, 1405), 'openpyxl.load_workbook', 'load_workbook', (['file_name'], {'read_only': '(True)', 'data_only': '(True)'}), '(file_name, read_only=True, data_only=True)\n', (1362, 1405), False, 'from openpyxl import load_workbook\n'), ((10559, 10592), 'os.path.join', 'os.path.join', (['root', 'self.file[1:]'], {}), '(root, self.file[1:])\n', (10571, 10592), False, 'import os\n'), ((10609, 10634), 'os.path.isfile', 'os.path.isfile', (['self.file'], {}), '(self.file)\n', (10623, 10634), False, 'import os\n'), ((26592, 26633), 'numpy.all', 'np.all', (["(x > data['lookup_dim'].values[-1])"], {}), "(x > data['lookup_dim'].values[-1])\n", (26598, 26633), True, 'import numpy as np\n'), ((30529, 30543), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (30537, 30543), True, 'import numpy as np\n'), ((10692, 10723), 'os.path.isfile', 'os.path.isfile', (['(self.file + ext)'], {}), '(self.file + ext)\n', (10706, 10723), False, 'import os\n'), ((12025, 12048), 'numpy.isnan', 'np.isnan', (['series[i - 1]'], {}), '(series[i - 1])\n', (12033, 12048), True, 'import numpy as np\n'), ((12600, 12616), 'numpy.isnan', 'np.isnan', (['series'], {}), '(series)\n', (12608, 12616), True, 'import numpy as np\n'), ((12674, 12690), 'numpy.isnan', 'np.isnan', (['series'], {}), '(series)\n', (12682, 12690), True, 'import numpy as np\n'), ((13977, 13991), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (13985, 13991), True, 'import numpy as np\n'), ((26664, 26689), 'xarray.broadcast', 'xr.broadcast', (['data[-1]', 'x'], {}), '(data[-1], x)\n', (26676, 26689), True, 'import xarray as xr\n'), ((26706, 26805), 'warnings.warn', 'warnings.warn', (["(self.py_name + '\\n' +\n 'extrapolating data above the maximum value of the series')"], {}), "(self.py_name + '\\n' +\n 'extrapolating data above the maximum value of the series')\n", (26719, 26805), False, 'import warnings\n'), ((26856, 26896), 'numpy.all', 'np.all', (["(x < data['lookup_dim'].values[0])"], {}), "(x < data['lookup_dim'].values[0])\n", (26862, 26896), True, 'import numpy as np\n'), ((34649, 34683), 'numpy.arange', 'np.arange', (['col_first', '(col_last + 1)'], {}), '(col_first, col_last + 1)\n', (34658, 34683), True, 'import numpy as np\n'), ((835, 869), 'pandas.to_numeric', 'pd.to_numeric', (['ex'], {'errors': '"""coerce"""'}), "(ex, errors='coerce')\n", (848, 869), True, 'import pandas as pd\n'), ((3158, 3185), 'os.path.splitext', 'os.path.splitext', (['self.file'], {}), '(self.file)\n', (3174, 3185), False, 'import os\n'), ((8183, 8203), 'numpy.array', 'np.array', (['[[series]]'], {}), '([[series]])\n', (8191, 8203), True, 'import numpy as np\n'), ((9144, 9162), 'numpy.array', 'np.array', (['[[data]]'], {}), '([[data]])\n', (9152, 9162), True, 'import numpy as np\n'), ((13609, 13624), 'numpy.diff', 'np.diff', (['series'], {}), '(series)\n', (13616, 13624), True, 'import numpy as np\n'), ((23945, 24042), 'warnings.warn', 'warnings.warn', (["(self.py_name + '\\n' + 'extrapolating data above the maximum value of the time'\n )"], {}), "(self.py_name + '\\n' +\n 'extrapolating data above the maximum value of the time')\n", (23958, 24042), False, 'import warnings\n'), ((26927, 26951), 'xarray.broadcast', 'xr.broadcast', (['data[0]', 'x'], {}), '(data[0], x)\n', (26939, 26951), True, 'import xarray as xr\n'), ((26968, 27067), 'warnings.warn', 'warnings.warn', (["(self.py_name + '\\n' +\n 'extrapolating data below the minimum value of the series')"], {}), "(self.py_name + '\\n' +\n 'extrapolating data below the minimum value of the series')\n", (26981, 27067), False, 'import warnings\n'), ((27145, 27166), 'xarray.broadcast', 'xr.broadcast', (['data', 'x'], {}), '(data, x)\n', (27157, 27166), True, 'import xarray as xr\n'), ((27713, 27812), 'warnings.warn', 'warnings.warn', (["(self.py_name + '\\n' +\n 'extrapolating data above the maximum value of the series')"], {}), "(self.py_name + '\\n' +\n 'extrapolating data above the maximum value of the series')\n", (27726, 27812), False, 'import warnings\n'), ((19272, 19296), 're.compile', 're.compile', (['"""[^a-zA-Z]+"""'], {}), "('[^a-zA-Z]+')\n", (19282, 19296), False, 'import re\n'), ((24165, 24262), 'warnings.warn', 'warnings.warn', (["(self.py_name + '\\n' + 'extrapolating data below the minimum value of the time'\n )"], {}), "(self.py_name + '\\n' +\n 'extrapolating data below the minimum value of the time')\n", (24178, 24262), False, 'import warnings\n'), ((27947, 28046), 'warnings.warn', 'warnings.warn', (["(self.py_name + '\\n' +\n 'extrapolating data below the minimum value of the series')"], {}), "(self.py_name + '\\n' +\n 'extrapolating data below the minimum value of the series')\n", (27960, 28046), False, 'import warnings\n'), ((912, 961), 'pandas.read_excel', 'pd.read_excel', (['file_name', 'sheet_name'], {'header': 'None'}), '(file_name, sheet_name, header=None)\n', (925, 961), True, 'import pandas as pd\n'), ((17386, 17410), 'numpy.interp', 'np.interp', (['value', 'xr', 'yr'], {}), '(value, xr, yr)\n', (17395, 17410), True, 'import numpy as np\n')]
|
import random
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch_geometric.utils import coalesce, degree, remove_self_loops
from .num_nodes import maybe_num_nodes
def negative_sampling(edge_index: Tensor,
num_nodes: Optional[Union[int, Tuple[int, int]]] = None,
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False) -> Tensor:
r"""Samples random negative edges of a graph given by :attr:`edge_index`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int or Tuple[int, int], optional): The number of nodes,
*i.e.* :obj:`max_val + 1` of :attr:`edge_index`.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`.
(default: :obj:`None`)
num_neg_samples (int, optional): The (approximate) number of negative
samples to return.
If set to :obj:`None`, will try to return a negative edge for every
positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
assert method in ['sparse', 'dense']
size = num_nodes
bipartite = isinstance(size, (tuple, list))
size = maybe_num_nodes(edge_index) if size is None else size
size = (size, size) if not bipartite else size
force_undirected = False if bipartite else force_undirected
idx, population = edge_index_to_vector(edge_index, size, bipartite,
force_undirected)
if idx.numel() >= population:
return edge_index.new_empty((2, 0))
if num_neg_samples is None:
num_neg_samples = edge_index.size(1)
if force_undirected:
num_neg_samples = num_neg_samples // 2
prob = 1. - idx.numel() / population # Probability to sample a negative.
sample_size = int(1.1 * num_neg_samples / prob) # (Over)-sample size.
neg_idx = None
if method == 'dense':
# The dense version creates a mask of shape `population` to check for
# invalid samples.
mask = idx.new_ones(population, dtype=torch.bool)
mask[idx] = False
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, idx.device)
rnd = rnd[mask[rnd]] # Filter true negatives.
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
mask[neg_idx] = False
else: # 'sparse'
# The sparse version checks for invalid samples via `np.isin`.
idx = idx.to('cpu')
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, device='cpu')
mask = np.isin(rnd, idx)
if neg_idx is not None:
mask |= np.isin(rnd, neg_idx.to('cpu'))
mask = torch.from_numpy(mask).to(torch.bool)
rnd = rnd[~mask].to(edge_index.device)
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
return vector_to_edge_index(neg_idx, size, bipartite, force_undirected)
def batched_negative_sampling(
edge_index: Tensor,
batch: Union[Tensor, Tuple[Tensor, Tensor]],
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False,
) -> Tensor:
r"""Samples random negative edges of multiple graphs given by
:attr:`edge_index` and :attr:`batch`.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph connecting two different node types.
num_neg_samples (int, optional): The number of negative samples to
return. If set to :obj:`None`, will try to return a negative edge
for every positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
if isinstance(batch, Tensor):
src_batch, dst_batch = batch, batch
else:
src_batch, dst_batch = batch[0], batch[1]
split = degree(src_batch[edge_index[0]], dtype=torch.long).tolist()
edge_indices = torch.split(edge_index, split, dim=1)
num_src = degree(src_batch, dtype=torch.long)
cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]])
if isinstance(batch, Tensor):
num_nodes = num_src.tolist()
cumsum = cum_src
else:
num_dst = degree(dst_batch, dtype=torch.long)
cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]])
num_nodes = torch.stack([num_src, num_dst], dim=1).tolist()
cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1)
neg_edge_indices = []
for i, edge_index in enumerate(edge_indices):
edge_index = edge_index - cumsum[i]
neg_edge_index = negative_sampling(edge_index, num_nodes[i],
num_neg_samples, method,
force_undirected)
neg_edge_index += cumsum[i]
neg_edge_indices.append(neg_edge_index)
return torch.cat(neg_edge_indices, dim=1)
def structured_negative_sampling(edge_index, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True):
r"""Samples a negative edge :obj:`(i,k)` for every positive edge
:obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a
tuple of the form :obj:`(i,j,k)`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: (LongTensor, LongTensor, LongTensor)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index.cpu()
pos_idx = row * num_nodes + col
if not contains_neg_self_loops:
loop_idx = torch.arange(num_nodes) * (num_nodes + 1)
pos_idx = torch.cat([pos_idx, loop_idx], dim=0)
rand = torch.randint(num_nodes, (row.size(0), ), dtype=torch.long)
neg_idx = row * num_nodes + rand
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = mask.nonzero(as_tuple=False).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.randint(num_nodes, (rest.size(0), ), dtype=torch.long)
rand[rest] = tmp
neg_idx = row[rest] * num_nodes + tmp
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = rest[mask]
return edge_index[0], edge_index[1], rand.to(edge_index.device)
def structured_negative_sampling_feasible(
edge_index: Tensor, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True) -> bool:
r"""Returns :obj:`True` if
:meth:`~torch_geometric.utils.structured_negative_sampling` is feasible
on the graph given by :obj:`edge_index`.
:obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible
if atleast one node is connected to all other nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
max_num_neighbors = num_nodes
edge_index = coalesce(edge_index, num_nodes=num_nodes)
if not contains_neg_self_loops:
edge_index, _ = remove_self_loops(edge_index)
max_num_neighbors -= 1 # Reduce number of valid neighbors
deg = degree(edge_index[0], num_nodes)
# True if there exists no node that is connected to all other nodes.
return bool(torch.all(deg < max_num_neighbors))
###############################################################################
def sample(population: int, k: int, device=None) -> Tensor:
if population <= k:
return torch.arange(population, device=device)
else:
return torch.tensor(random.sample(range(population), k), device=device)
def edge_index_to_vector(
edge_index: Tensor,
size: Tuple[int, int],
bipartite: bool,
force_undirected: bool = False,
) -> Tuple[Tensor, int]:
row, col = edge_index
if bipartite: # No need to account for self-loops.
idx = (row * size[1]).add_(col)
population = size[0] * size[1]
return idx, population
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
# We only operate on the upper triangular matrix:
mask = row < col
row, col = row[mask], col[mask]
offset = torch.arange(1, num_nodes, device=row.device).cumsum(0)[row]
idx = row.mul_(num_nodes).add_(col).sub_(offset)
population = (num_nodes * (num_nodes + 1)) // 2 - num_nodes
return idx, population
else:
assert size[0] == size[1]
num_nodes = size[0]
# We remove self-loops as we do not want to take them into account
# when sampling negative values.
mask = row != col
row, col = row[mask], col[mask]
col[row < col] -= 1
idx = row.mul_(num_nodes - 1).add_(col)
population = num_nodes * num_nodes - num_nodes
return idx, population
def vector_to_edge_index(idx: Tensor, size: Tuple[int, int], bipartite: bool,
force_undirected: bool = False) -> Tensor:
if bipartite: # No need to account for self-loops.
row = idx.div(size[1], rounding_mode='floor')
col = idx % size[1]
return torch.stack([row, col], dim=0)
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
offset = torch.arange(1, num_nodes, device=idx.device).cumsum(0)
end = torch.arange(num_nodes, num_nodes * num_nodes, num_nodes,
device=idx.device)
row = torch.bucketize(idx, end.sub_(offset), right=True)
col = offset[row].add_(idx) % num_nodes
return torch.stack([torch.cat([row, col]), torch.cat([col, row])], 0)
else:
assert size[0] == size[1]
num_nodes = size[0]
row = idx.div(num_nodes - 1, rounding_mode='floor')
col = idx % (num_nodes - 1)
col[row <= col] += 1
return torch.stack([row, col], dim=0)
|
[
"torch.split",
"torch_geometric.utils.degree",
"torch.all",
"torch.stack",
"numpy.isin",
"torch.from_numpy",
"torch_geometric.utils.remove_self_loops",
"torch.arange",
"torch_geometric.utils.coalesce",
"torch.cat"
] |
[((5711, 5748), 'torch.split', 'torch.split', (['edge_index', 'split'], {'dim': '(1)'}), '(edge_index, split, dim=1)\n', (5722, 5748), False, 'import torch\n'), ((5764, 5799), 'torch_geometric.utils.degree', 'degree', (['src_batch'], {'dtype': 'torch.long'}), '(src_batch, dtype=torch.long)\n', (5770, 5799), False, 'from torch_geometric.utils import coalesce, degree, remove_self_loops\n'), ((6667, 6701), 'torch.cat', 'torch.cat', (['neg_edge_indices'], {'dim': '(1)'}), '(neg_edge_indices, dim=1)\n', (6676, 6701), False, 'import torch\n'), ((9298, 9339), 'torch_geometric.utils.coalesce', 'coalesce', (['edge_index'], {'num_nodes': 'num_nodes'}), '(edge_index, num_nodes=num_nodes)\n', (9306, 9339), False, 'from torch_geometric.utils import coalesce, degree, remove_self_loops\n'), ((9509, 9541), 'torch_geometric.utils.degree', 'degree', (['edge_index[0]', 'num_nodes'], {}), '(edge_index[0], num_nodes)\n', (9515, 9541), False, 'from torch_geometric.utils import coalesce, degree, remove_self_loops\n'), ((5999, 6034), 'torch_geometric.utils.degree', 'degree', (['dst_batch'], {'dtype': 'torch.long'}), '(dst_batch, dtype=torch.long)\n', (6005, 6034), False, 'from torch_geometric.utils import coalesce, degree, remove_self_loops\n'), ((7712, 7749), 'torch.cat', 'torch.cat', (['[pos_idx, loop_idx]'], {'dim': '(0)'}), '([pos_idx, loop_idx], dim=0)\n', (7721, 7749), False, 'import torch\n'), ((9401, 9430), 'torch_geometric.utils.remove_self_loops', 'remove_self_loops', (['edge_index'], {}), '(edge_index)\n', (9418, 9430), False, 'from torch_geometric.utils import coalesce, degree, remove_self_loops\n'), ((9631, 9665), 'torch.all', 'torch.all', (['(deg < max_num_neighbors)'], {}), '(deg < max_num_neighbors)\n', (9640, 9665), False, 'import torch\n'), ((9850, 9889), 'torch.arange', 'torch.arange', (['population'], {'device': 'device'}), '(population, device=device)\n', (9862, 9889), False, 'import torch\n'), ((11503, 11533), 'torch.stack', 'torch.stack', (['[row, col]'], {'dim': '(0)'}), '([row, col], dim=0)\n', (11514, 11533), False, 'import torch\n'), ((3509, 3526), 'numpy.isin', 'np.isin', (['rnd', 'idx'], {}), '(rnd, idx)\n', (3516, 3526), True, 'import numpy as np\n'), ((5632, 5682), 'torch_geometric.utils.degree', 'degree', (['src_batch[edge_index[0]]'], {'dtype': 'torch.long'}), '(src_batch[edge_index[0]], dtype=torch.long)\n', (5638, 5682), False, 'from torch_geometric.utils import coalesce, degree, remove_self_loops\n'), ((7652, 7675), 'torch.arange', 'torch.arange', (['num_nodes'], {}), '(num_nodes)\n', (7664, 7675), False, 'import torch\n'), ((11712, 11788), 'torch.arange', 'torch.arange', (['num_nodes', '(num_nodes * num_nodes)', 'num_nodes'], {'device': 'idx.device'}), '(num_nodes, num_nodes * num_nodes, num_nodes, device=idx.device)\n', (11724, 11788), False, 'import torch\n'), ((12221, 12251), 'torch.stack', 'torch.stack', (['[row, col]'], {'dim': '(0)'}), '([row, col], dim=0)\n', (12232, 12251), False, 'import torch\n'), ((3045, 3070), 'torch.cat', 'torch.cat', (['[neg_idx, rnd]'], {}), '([neg_idx, rnd])\n', (3054, 3070), False, 'import torch\n'), ((3777, 3802), 'torch.cat', 'torch.cat', (['[neg_idx, rnd]'], {}), '([neg_idx, rnd])\n', (3786, 3802), False, 'import torch\n'), ((6134, 6172), 'torch.stack', 'torch.stack', (['[num_src, num_dst]'], {'dim': '(1)'}), '([num_src, num_dst], dim=1)\n', (6145, 6172), False, 'import torch\n'), ((6199, 6237), 'torch.stack', 'torch.stack', (['[cum_src, cum_dst]'], {'dim': '(1)'}), '([cum_src, cum_dst], dim=1)\n', (6210, 6237), False, 'import torch\n'), ((7888, 7913), 'numpy.isin', 'np.isin', (['neg_idx', 'pos_idx'], {}), '(neg_idx, pos_idx)\n', (7895, 7913), True, 'import numpy as np\n'), ((3638, 3660), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (3654, 3660), False, 'import torch\n'), ((8206, 8231), 'numpy.isin', 'np.isin', (['neg_idx', 'pos_idx'], {}), '(neg_idx, pos_idx)\n', (8213, 8231), True, 'import numpy as np\n'), ((11642, 11687), 'torch.arange', 'torch.arange', (['(1)', 'num_nodes'], {'device': 'idx.device'}), '(1, num_nodes, device=idx.device)\n', (11654, 11687), False, 'import torch\n'), ((11957, 11978), 'torch.cat', 'torch.cat', (['[row, col]'], {}), '([row, col])\n', (11966, 11978), False, 'import torch\n'), ((11980, 12001), 'torch.cat', 'torch.cat', (['[col, row]'], {}), '([col, row])\n', (11989, 12001), False, 'import torch\n'), ((10566, 10611), 'torch.arange', 'torch.arange', (['(1)', 'num_nodes'], {'device': 'row.device'}), '(1, num_nodes, device=row.device)\n', (10578, 10611), False, 'import torch\n')]
|
from tkinter import *
from PIL import ImageGrab
import numpy as np
import cv2
import time
import pyautogui as pg
import DirectInputRoutines as DIR
from LogKey import key_check
last_time = time.time()
one_hot = [0, 0, 0, 0, 0, 0]
hash_dict = {'w':0, 's':1, 'a':2, 'd':3, 'c':4, 'v':5}
X = []
y = []
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def process_img(original_image):
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
#processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
], np.int32)
processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
processed_img = roi(processed_img, [vertices])
# more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# edges rho theta thresh # min length, max gap:
#lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15)
#draw_lines(processed_img,lines)
return processed_img
def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
# fill the mask
cv2.fillPoly(mask, vertices, 255)
# now only show the area that is the mask
masked = cv2.bitwise_and(img, mask)
return masked
def draw_lines(img,lines):
for line in lines:
coords = line[0]
cv2.line(img, (coords[0], coords[1]), (coords[2], coords[3]), [255,255,255], 3)
def change_tab():
pg.hotkey("alt","tab")
def send_key(e):
hash = {"w":DIR.W, "a":DIR.A, "s":DIR.S, "d":DIR.D}
return hash[e.keysym]
def keyup(e):
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
#print('down', e.keysym)
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 0
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen, temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
#cv2.imshow("image", printscreen)
def keydown(e):
#print('up', e.keysym)
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 1
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,680)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen,temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
root = Tk()
frame = Frame(root, width=100, height=100)
frame.bind("<KeyPress>", keydown)
frame.bind("<KeyRelease>", keyup)
frame.pack()
frame.focus_set()
root.mainloop()
np.save("X.npy", X)
np.save("y.npy", y)
|
[
"cv2.fillPoly",
"pyautogui.hotkey",
"numpy.median",
"cv2.GaussianBlur",
"PIL.ImageGrab.grab",
"cv2.line",
"numpy.zeros_like",
"cv2.bitwise_and",
"numpy.array",
"cv2.cvtColor",
"cv2.Canny",
"time.time",
"numpy.save"
] |
[((196, 207), 'time.time', 'time.time', ([], {}), '()\n', (205, 207), False, 'import time\n'), ((3513, 3532), 'numpy.save', 'np.save', (['"""X.npy"""', 'X'], {}), "('X.npy', X)\n", (3520, 3532), True, 'import numpy as np\n'), ((3534, 3553), 'numpy.save', 'np.save', (['"""y.npy"""', 'y'], {}), "('y.npy', y)\n", (3541, 3553), True, 'import numpy as np\n'), ((419, 435), 'numpy.median', 'np.median', (['image'], {}), '(image)\n', (428, 435), True, 'import numpy as np\n'), ((597, 627), 'cv2.Canny', 'cv2.Canny', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (606, 627), False, 'import cv2\n'), ((725, 773), 'cv2.cvtColor', 'cv2.cvtColor', (['original_image', 'cv2.COLOR_BGR2GRAY'], {}), '(original_image, cv2.COLOR_BGR2GRAY)\n', (737, 773), False, 'import cv2\n'), ((795, 851), 'cv2.Canny', 'cv2.Canny', (['processed_img'], {'threshold1': '(200)', 'threshold2': '(300)'}), '(processed_img, threshold1=200, threshold2=300)\n', (804, 851), False, 'import cv2\n'), ((873, 929), 'cv2.Canny', 'cv2.Canny', (['processed_img'], {'threshold1': '(200)', 'threshold2': '(300)'}), '(processed_img, threshold1=200, threshold2=300)\n', (882, 929), False, 'import cv2\n'), ((1031, 1126), 'numpy.array', 'np.array', (['[[10, 500], [10, 300], [300, 200], [500, 200], [800, 300], [800, 500]]', 'np.int32'], {}), '([[10, 500], [10, 300], [300, 200], [500, 200], [800, 300], [800, \n 500]], np.int32)\n', (1039, 1126), True, 'import numpy as np\n'), ((1162, 1204), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['processed_img', '(5, 5)', '(0)'], {}), '(processed_img, (5, 5), 0)\n', (1178, 1204), False, 'import cv2\n'), ((1679, 1697), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (1692, 1697), True, 'import numpy as np\n'), ((1724, 1757), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'vertices', '(255)'], {}), '(mask, vertices, 255)\n', (1736, 1757), False, 'import cv2\n'), ((1819, 1845), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'mask'], {}), '(img, mask)\n', (1834, 1845), False, 'import cv2\n'), ((2062, 2085), 'pyautogui.hotkey', 'pg.hotkey', (['"""alt"""', '"""tab"""'], {}), "('alt', 'tab')\n", (2071, 2085), True, 'import pyautogui as pg\n'), ((2684, 2695), 'time.time', 'time.time', ([], {}), '()\n', (2693, 2695), False, 'import time\n'), ((3274, 3285), 'time.time', 'time.time', ([], {}), '()\n', (3283, 3285), False, 'import time\n'), ((1956, 2042), 'cv2.line', 'cv2.line', (['img', '(coords[0], coords[1])', '(coords[2], coords[3])', '[255, 255, 255]', '(3)'], {}), '(img, (coords[0], coords[1]), (coords[2], coords[3]), [255, 255, \n 255], 3)\n', (1964, 2042), False, 'import cv2\n'), ((2489, 2527), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {'bbox': '(0, 40, 800, 640)'}), '(bbox=(0, 40, 800, 640))\n', (2503, 2527), False, 'from PIL import ImageGrab\n'), ((3080, 3118), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {'bbox': '(0, 40, 800, 680)'}), '(bbox=(0, 40, 800, 680))\n', (3094, 3118), False, 'from PIL import ImageGrab\n'), ((2611, 2622), 'time.time', 'time.time', ([], {}), '()\n', (2620, 2622), False, 'import time\n'), ((3202, 3213), 'time.time', 'time.time', ([], {}), '()\n', (3211, 3213), False, 'import time\n')]
|
import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import pickle
import datetime
from matplotlib import style
import matplotlib.pyplot as plot
# Config
isLoadFromLocal = True
quandl.ApiConfig.api_key = '<KEY>'
style.use('ggplot')
# Loading data
if isLoadFromLocal:
df = pickle.load(open("DataFromQuandl_Stock_Chap2.pickle", "rb"))
else:
df = quandl.get('WIKI/GOOGL')
pickle.dump(df, open("DataFromQuandl_Stock_Chap2.pickle", "wb+"))
# Data pre-processing
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close']
df['PCT_Change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open']
df = df[['Adj. Close', 'HL_PCT', 'PCT_Change', 'Adj. Volume']]
forecastCol = 'Adj. Close'
df.fillna('-99999', inplace = True)
forecastOut = int(math.ceil(0.01*len(df)))
df['label'] = df[forecastCol].shift(-forecastOut)
# df['label'].plot()
# df[forecastCol].plot()
# plot.legend(loc = 4)
# plot.show()
x = np.array(df.drop(['label'], 1))
print(x)
x = preprocessing.scale(x)
print(x)
xLately = x[-forecastOut:]
x = x[:-forecastOut]
df.dropna(inplace = True)
y = np.array(df['label'])
# Regression
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.1)
# classifier = svm.SVR(kernel='linear') # SVM SVR
classifier = LinearRegression(n_jobs=3) # Linear Regression
classifier.fit(x_train, y_train)
accuracy = classifier.score(x_test, y_test)
forecastSet = classifier.predict(xLately)
print('Accuracy is ', accuracy, '\nForecasted values are ', forecastSet, '\nNumber of values is ', forecastOut)
df['Forecast'] = np.nan
lastDate = df.iloc[-1].name
print(lastDate)
lastTime = lastDate.timestamp()
print(lastTime)
oneDay = 24 * 60 * 60 # seconds in a day
nextTime = lastTime + oneDay
for iter in forecastSet:
nextDate = datetime.datetime.fromtimestamp(nextTime)
nextTime += oneDay
df.loc[nextDate] = [np.nan for _ in range(len(df.columns) - 1)] + [iter]
df['Adj. Close'].plot()
df['Forecast'].plot()
plot.legend(loc = 4)
plot.xlabel('Date')
plot.ylabel('Price')
plot.show()
|
[
"datetime.datetime.fromtimestamp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.style.use",
"quandl.get",
"sklearn.cross_validation.train_test_split",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.scale",
"matplotlib.pyplot.show"
] |
[((312, 331), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (321, 331), False, 'from matplotlib import style\n'), ((1072, 1094), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['x'], {}), '(x)\n', (1091, 1094), False, 'from sklearn import preprocessing, cross_validation, svm\n'), ((1182, 1203), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (1190, 1203), True, 'import numpy as np\n'), ((1254, 1308), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['x', 'y'], {'test_size': '(0.1)'}), '(x, y, test_size=0.1)\n', (1287, 1308), False, 'from sklearn import preprocessing, cross_validation, svm\n'), ((1374, 1400), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'n_jobs': '(3)'}), '(n_jobs=3)\n', (1390, 1400), False, 'from sklearn.linear_model import LinearRegression\n'), ((2070, 2088), 'matplotlib.pyplot.legend', 'plot.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (2081, 2088), True, 'import matplotlib.pyplot as plot\n'), ((2091, 2110), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""Date"""'], {}), "('Date')\n", (2102, 2110), True, 'import matplotlib.pyplot as plot\n'), ((2111, 2131), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Price"""'], {}), "('Price')\n", (2122, 2131), True, 'import matplotlib.pyplot as plot\n'), ((2132, 2143), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (2141, 2143), True, 'import matplotlib.pyplot as plot\n'), ((454, 478), 'quandl.get', 'quandl.get', (['"""WIKI/GOOGL"""'], {}), "('WIKI/GOOGL')\n", (464, 478), False, 'import quandl\n'), ((1881, 1922), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['nextTime'], {}), '(nextTime)\n', (1912, 1922), False, 'import datetime\n')]
|
import tkinter.messagebox
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import numpy
import pytesseract #Python wrapper for Google-owned OCR engine known by the name of Tesseract.
import cv2
from PIL import Image, ImageTk
import os
root = tk.Tk()
root.title("Object Character Recognizer")
root.geometry("1280x720")
test_image = None
def browse_image():
fin = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select Image File", filetypes=(("PNG Files", "*.png"), ("JPG Files", "*.jpg"), ("All Files", "*.*")))
global test_image
image = Image.open(fin)
test_image = image
img = ImageTk.PhotoImage(image.resize((650, 400)))
lb = tk.Label(image=img)
lb.place(x=25, y=50)
root.mainloop()
def use_ocr_default():
try:
global test_image
messge = None
#OEM stands for OCR Engine Mode and PSM stands for Page Segmentation Mode.
#OEM defines what kind of OCR engine is to be used (this defines the dataset that would be used to cross-match
#the available data with the testing data).
#PSM defines how Tesseract will treat the image that supposedly contains characters and how it will extract the
#data from the image.
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except: #Print a error message when the user inputs an incompatible image.
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_handwriting():
try:
global test_image
opencv_img = numpy.array(test_image)
opencv_img = opencv_img[:, :, ::-1].copy() #This line is used to convert RGB PIL image file to BGR cv2 image file.
blurred_img = cv2.medianBlur(opencv_img, 5)
gray_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray_img, 122, 255, cv2.THRESH_BINARY)
messge = None
tess = pytesseract.image_to_string(binary, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_singletext():
try:
global test_image
messge = None
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 7')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
w = tk.LabelFrame(root, text="Image:", width=768, height=600)
w.place(x=20, y=10)
w.pack_propagate(0)
w1 = tk.LabelFrame(root, text="Extracted Text:", width=500, height=310)
w1.place(x=800, y=300)
w2 = tk.LabelFrame(root, text="Operations:", width=350, height=280)
w2.place(x=800, y=10)
btn1 = tk.Button(w2, text="Load Image", padx=40, pady=10, command=browse_image)
btn1.place(x=22, y=20)
btn1 = tk.Button(w2, text="Run Handwritten OCR", padx=40, pady=10, command=use_ocr_handwriting)
btn1.place(x=22, y=80)
btn1 = tk.Button(w2, text="Run Default OCR", padx=40, pady=10, command=use_ocr_default)
btn1.place(x=22, y=140)
btn1 = tk.Button(w2, text="Run Single Text OCR", padx=40, pady=10, command=use_ocr_singletext)
btn1.place(x=22, y=200)
root.mainloop()
|
[
"tkinter.LabelFrame",
"PIL.Image.open",
"cv2.threshold",
"cv2.medianBlur",
"tkinter.Button",
"os.getcwd",
"numpy.array",
"tkinter.Tk",
"tkinter.Label",
"pytesseract.image_to_string",
"cv2.cvtColor"
] |
[((268, 275), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (273, 275), True, 'import tkinter as tk\n'), ((3726, 3783), 'tkinter.LabelFrame', 'tk.LabelFrame', (['root'], {'text': '"""Image:"""', 'width': '(768)', 'height': '(600)'}), "(root, text='Image:', width=768, height=600)\n", (3739, 3783), True, 'import tkinter as tk\n'), ((3829, 3895), 'tkinter.LabelFrame', 'tk.LabelFrame', (['root'], {'text': '"""Extracted Text:"""', 'width': '(500)', 'height': '(310)'}), "(root, text='Extracted Text:', width=500, height=310)\n", (3842, 3895), True, 'import tkinter as tk\n'), ((3924, 3986), 'tkinter.LabelFrame', 'tk.LabelFrame', (['root'], {'text': '"""Operations:"""', 'width': '(350)', 'height': '(280)'}), "(root, text='Operations:', width=350, height=280)\n", (3937, 3986), True, 'import tkinter as tk\n'), ((4016, 4088), 'tkinter.Button', 'tk.Button', (['w2'], {'text': '"""Load Image"""', 'padx': '(40)', 'pady': '(10)', 'command': 'browse_image'}), "(w2, text='Load Image', padx=40, pady=10, command=browse_image)\n", (4025, 4088), True, 'import tkinter as tk\n'), ((4119, 4212), 'tkinter.Button', 'tk.Button', (['w2'], {'text': '"""Run Handwritten OCR"""', 'padx': '(40)', 'pady': '(10)', 'command': 'use_ocr_handwriting'}), "(w2, text='Run Handwritten OCR', padx=40, pady=10, command=\n use_ocr_handwriting)\n", (4128, 4212), True, 'import tkinter as tk\n'), ((4238, 4323), 'tkinter.Button', 'tk.Button', (['w2'], {'text': '"""Run Default OCR"""', 'padx': '(40)', 'pady': '(10)', 'command': 'use_ocr_default'}), "(w2, text='Run Default OCR', padx=40, pady=10, command=use_ocr_default\n )\n", (4247, 4323), True, 'import tkinter as tk\n'), ((4350, 4442), 'tkinter.Button', 'tk.Button', (['w2'], {'text': '"""Run Single Text OCR"""', 'padx': '(40)', 'pady': '(10)', 'command': 'use_ocr_singletext'}), "(w2, text='Run Single Text OCR', padx=40, pady=10, command=\n use_ocr_singletext)\n", (4359, 4442), True, 'import tkinter as tk\n'), ((589, 604), 'PIL.Image.open', 'Image.open', (['fin'], {}), '(fin)\n', (599, 604), False, 'from PIL import Image, ImageTk\n'), ((692, 711), 'tkinter.Label', 'tk.Label', ([], {'image': 'img'}), '(image=img)\n', (700, 711), True, 'import tkinter as tk\n'), ((1257, 1329), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['test_image'], {'config': '"""-l eng --oem 1 --psm 3"""'}), "(test_image, config='-l eng --oem 1 --psm 3')\n", (1284, 1329), False, 'import pytesseract\n'), ((2021, 2044), 'numpy.array', 'numpy.array', (['test_image'], {}), '(test_image)\n', (2032, 2044), False, 'import numpy\n'), ((2190, 2219), 'cv2.medianBlur', 'cv2.medianBlur', (['opencv_img', '(5)'], {}), '(opencv_img, 5)\n', (2204, 2219), False, 'import cv2\n'), ((2239, 2284), 'cv2.cvtColor', 'cv2.cvtColor', (['blurred_img', 'cv2.COLOR_BGR2GRAY'], {}), '(blurred_img, cv2.COLOR_BGR2GRAY)\n', (2251, 2284), False, 'import cv2\n'), ((2310, 2362), 'cv2.threshold', 'cv2.threshold', (['gray_img', '(122)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray_img, 122, 255, cv2.THRESH_BINARY)\n', (2323, 2362), False, 'import cv2\n'), ((2400, 2468), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['binary'], {'config': '"""-l eng --oem 1 --psm 3"""'}), "(binary, config='-l eng --oem 1 --psm 3')\n", (2427, 2468), False, 'import pytesseract\n'), ((3108, 3180), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['test_image'], {'config': '"""-l eng --oem 1 --psm 7"""'}), "(test_image, config='-l eng --oem 1 --psm 7')\n", (3135, 3180), False, 'import pytesseract\n'), ((432, 443), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (441, 443), False, 'import os\n')]
|
# Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Vector functionality
import os
import libceed
import numpy as np
import check
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
def check_values(ceed, x, value):
with x.array_read() as b:
for i in range(len(b)):
assert b[i] == value
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector
# -------------------------------------------------------------------------------
def test_100(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test setValue
# -------------------------------------------------------------------------------
def test_101(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
value = 1
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
for i in range(len(b)):
assert b[i] == 10 + i
x.set_value(3.0)
check_values(ceed, x, 3.0)
del x
x = ceed.Vector(n)
# Set value before setting or getting the array
x.set_value(5.0)
check_values(ceed, x, 5.0)
# -------------------------------------------------------------------------------
# Test getArrayRead state counter
# -------------------------------------------------------------------------------
def test_102(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
x.set_value(0)
# Two read accesses should not generate an error
a = x.get_array_read()
b = x.get_array_read()
x.restore_array_read()
x.restore_array_read()
# -------------------------------------------------------------------------------
# Test setting one vector from array of another vector
# -------------------------------------------------------------------------------
def test_103(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as x_array:
y.set_array(x_array, cmode=libceed.USE_POINTER)
with y.array_read() as y_array:
for i in range(n):
assert y_array[i] == 10 + i
# -------------------------------------------------------------------------------
# Test getArray to modify array
# -------------------------------------------------------------------------------
def test_104(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.zeros(n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
b[3] = -3.14
if libceed.lib.CEED_SCALAR_TYPE == libceed.SCALAR_FP32:
assert a[3] == np.float32(-3.14)
else:
assert a[3] == -3.14
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector using
# CEED_MEM_DEVICE
# -------------------------------------------------------------------------------
def test_105(ceed_resource):
# Skip test for non-GPU backend
if 'gpu' in ceed_resource:
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
arr = x.get_array_read(memtype=libceed.MEM_DEVICE)
y.set_array(arr, memtype=libceed.MEM_DEVICE)
x.restore_array_read()
with y.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test view
# -------------------------------------------------------------------------------
def test_107(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test norms
# -------------------------------------------------------------------------------
def test_108(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
for i in range(n):
if (i % 2 == 0):
a[i] *= -1
x.set_array(a, cmode=libceed.USE_POINTER)
norm = x.norm(normtype=libceed.NORM_1)
assert abs(norm - 45.) < TOL
norm = x.norm()
assert abs(norm - np.sqrt(285.)) < TOL
norm = x.norm(normtype=libceed.NORM_MAX)
assert abs(norm - 9.) < TOL
# -------------------------------------------------------------------------------
# Test taking the reciprocal of a vector
# -------------------------------------------------------------------------------
def test_119(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
x.reciprocal()
with x.array_read() as b:
for i in range(n):
assert abs(b[i] - 1. / (10 + i)) < TOL
# -------------------------------------------------------------------------------
# Test AXPY
# -------------------------------------------------------------------------------
def test_121(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
y.axpy(-0.5, x)
with y.array() as b:
assert np.allclose(.5 * a, b)
# -------------------------------------------------------------------------------
# Test pointwise multiplication
# -------------------------------------------------------------------------------
def test_122(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
w = ceed.Vector(n)
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
w.set_array(a, cmode=libceed.COPY_VALUES)
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
w.pointwise_mult(x, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
w.pointwise_mult(w, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i) < 1e-14
w.pointwise_mult(x, w)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i * i) < 1e-14
y.pointwise_mult(y, y)
with y.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
# -------------------------------------------------------------------------------
# Test Scale
# -------------------------------------------------------------------------------
def test_123(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
x.scale(-0.5)
with x.array() as b:
assert np.allclose(-.5 * a, b)
# -------------------------------------------------------------------------------
# Test getArrayWrite to modify array
# -------------------------------------------------------------------------------
def test_124(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
with x.array_write() as a:
for i in range(len(a)):
a[i] = 3 * i
with x.array_read() as a:
for i in range(len(a)):
assert a[i] == 3 * i
# -------------------------------------------------------------------------------
# Test modification of reshaped array
# -------------------------------------------------------------------------------
def test_199(ceed_resource):
"""Modification of reshaped array"""
ceed = libceed.Ceed(ceed_resource)
vec = ceed.Vector(12)
vec.set_value(0.0)
with vec.array(4, 3) as x:
x[...] = np.eye(4, 3)
with vec.array_read(3, 4) as x:
assert np.all(x == np.eye(4, 3).reshape(3, 4))
# -------------------------------------------------------------------------------
|
[
"numpy.eye",
"libceed.Ceed",
"numpy.allclose",
"numpy.sqrt",
"check.output",
"numpy.float32"
] |
[((1674, 1701), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (1686, 1701), False, 'import libceed\n'), ((2155, 2182), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (2167, 2182), False, 'import libceed\n'), ((2857, 2884), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (2869, 2884), False, 'import libceed\n'), ((3364, 3391), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (3376, 3391), False, 'import libceed\n'), ((3985, 4012), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (3997, 4012), False, 'import libceed\n'), ((5390, 5417), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (5402, 5417), False, 'import libceed\n'), ((5604, 5624), 'check.output', 'check.output', (['capsys'], {}), '(capsys)\n', (5616, 5624), False, 'import check\n'), ((5907, 5934), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (5919, 5934), False, 'import libceed\n'), ((6608, 6635), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (6620, 6635), False, 'import libceed\n'), ((7129, 7156), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (7141, 7156), False, 'import libceed\n'), ((7695, 7722), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (7707, 7722), False, 'import libceed\n'), ((8753, 8780), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (8765, 8780), False, 'import libceed\n'), ((9246, 9273), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (9258, 9273), False, 'import libceed\n'), ((9781, 9808), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (9793, 9808), False, 'import libceed\n'), ((4706, 4733), 'libceed.Ceed', 'libceed.Ceed', (['ceed_resource'], {}), '(ceed_resource)\n', (4718, 4733), False, 'import libceed\n'), ((7425, 7448), 'numpy.allclose', 'np.allclose', (['(0.5 * a)', 'b'], {}), '(0.5 * a, b)\n', (7436, 7448), True, 'import numpy as np\n'), ((8978, 9002), 'numpy.allclose', 'np.allclose', (['(-0.5 * a)', 'b'], {}), '(-0.5 * a, b)\n', (8989, 9002), True, 'import numpy as np\n'), ((9907, 9919), 'numpy.eye', 'np.eye', (['(4)', '(3)'], {}), '(4, 3)\n', (9913, 9919), True, 'import numpy as np\n'), ((4272, 4289), 'numpy.float32', 'np.float32', (['(-3.14)'], {}), '(-3.14)\n', (4282, 4289), True, 'import numpy as np\n'), ((6260, 6274), 'numpy.sqrt', 'np.sqrt', (['(285.0)'], {}), '(285.0)\n', (6267, 6274), True, 'import numpy as np\n'), ((9984, 9996), 'numpy.eye', 'np.eye', (['(4)', '(3)'], {}), '(4, 3)\n', (9990, 9996), True, 'import numpy as np\n')]
|
import cv2
import ezdxf
import numpy as np
def draw_hatch(img, entity, color, mask):
for poly_path in entity.paths.paths:
# print(poly_path.path_type_flags)
polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int)
if poly_path.path_type_flags & 1 == 1:
cv2.fillPoly(img, [polygon], color)
cv2.fillPoly(mask, [polygon], (255, 255, 255))
else:
cv2.fillPoly(img, [polygon], (255, 255, 255))
return color
def draw_line(img, entity, color, mask):
p1 = entity.dxf.start[:-1]
p2 = entity.dxf.end[:-1]
cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1)
cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2)
return color
def draw_lwpolyline(img, entity, color, mask):
polyline = []
a = np.array(entity.lwpoints.values).astype(int)
while len(a) > 0:
polyline.append((a[0], a[1]))
a = a[5:]
cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1)
cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2)
return color
def draw_arc(img, entity, color, mask):
s = entity.dxf.start_angle * np.pi / 180
e = entity.dxf.end_angle * np.pi / 180
if s > e:
s -= 2 * np.pi
d = (e - s) / (int((e - s) * 180 / np.pi) + 1)
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
angles = np.arange(s, e + d / 2, d)
x = cx + r * np.cos(angles)
y = cy + r * np.sin(angles)
points = np.column_stack((x, y)).astype(int)
cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1)
cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2)
return color
def draw_circle(img, entity, color, mask):
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), int(r), color, 1)
cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1)
return color
def draw_ellipse(img, entity, color, mask):
cx, cy = entity.dxf.center.xyz[:-1]
ma = entity.dxf.major_axis.magnitude
angle = entity.dxf.major_axis.angle_deg
mi = ma * entity.dxf.ratio
s = entity.dxf.start_param * 180 / np.pi
e = entity.dxf.end_param * 180 / np.pi
if entity.dxf.extrusion.z == -1:
s = 360 - s
e = 360 - e
cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1)
cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1)
return color
def draw_point(img, entity, color, mask):
cx, cy = entity.dxf.location.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), 0, color, 1)
cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1)
return color
draw_map = {
'HATCH': draw_hatch,
'LINE': draw_line,
'LWPOLYLINE': draw_lwpolyline,
'ARC': draw_arc,
'CIRCLE': draw_circle,
'ELLIPSE': draw_ellipse,
'POINT': draw_point,
}
def paint(in_path, out_path, config):
doc = ezdxf.readfile(in_path)
extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN']
xmin, ymin = np.floor(extmin[:-1]).astype(int)
xmax, ymax = np.ceil(extmax[:-1]).astype(int)
img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255
mask = np.zeros_like(img)
msp = doc.modelspace()
layers = config.get('layers', {})
colors = config.get('colors', {})
# print(doc.layers.entries.keys())
for layer_name, names in layers.items():
color = tuple(colors.get(layer_name, [0, 0, 0]))
for name in names:
if name not in doc.layers:
continue
entities = msp.query('*[layer=="%s"]' % name)
tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8)
for entity in entities:
if entity.DXFTYPE in draw_map:
draw_map[entity.DXFTYPE](img, entity, color, tmp)
else:
print("%s: %s" % (name, entity.DXFTYPE))
contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(mask, contours, -1, color, -1)
res, img_png = cv2.imencode('.png', cv2.flip(img, 0))
res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0))
with open(out_path, 'wb') as f:
f.write(img_png.tobytes())
with open(out_path[:-4] + "_mask.png", 'wb') as f:
f.write(mask_png.tobytes())
|
[
"cv2.fillPoly",
"numpy.ceil",
"cv2.drawContours",
"cv2.flip",
"numpy.ones",
"numpy.floor",
"numpy.column_stack",
"cv2.findContours",
"ezdxf.readfile",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"numpy.zeros_like",
"numpy.arange"
] |
[((1455, 1481), 'numpy.arange', 'np.arange', (['s', '(e + d / 2)', 'd'], {}), '(s, e + d / 2, d)\n', (1464, 1481), True, 'import numpy as np\n'), ((3039, 3062), 'ezdxf.readfile', 'ezdxf.readfile', (['in_path'], {}), '(in_path)\n', (3053, 3062), False, 'import ezdxf\n'), ((3306, 3324), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (3319, 3324), True, 'import numpy as np\n'), ((3240, 3288), 'numpy.ones', 'np.ones', (['(ymax + ymin, xmax + xmin, 3)', 'np.uint8'], {}), '((ymax + ymin, xmax + xmin, 3), np.uint8)\n', (3247, 3288), True, 'import numpy as np\n'), ((4220, 4236), 'cv2.flip', 'cv2.flip', (['img', '(0)'], {}), '(img, 0)\n', (4228, 4236), False, 'import cv2\n'), ((4279, 4296), 'cv2.flip', 'cv2.flip', (['mask', '(0)'], {}), '(mask, 0)\n', (4287, 4296), False, 'import cv2\n'), ((317, 352), 'cv2.fillPoly', 'cv2.fillPoly', (['img', '[polygon]', 'color'], {}), '(img, [polygon], color)\n', (329, 352), False, 'import cv2\n'), ((365, 411), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', '[polygon]', '(255, 255, 255)'], {}), '(mask, [polygon], (255, 255, 255))\n', (377, 411), False, 'import cv2\n'), ((438, 483), 'cv2.fillPoly', 'cv2.fillPoly', (['img', '[polygon]', '(255, 255, 255)'], {}), '(img, [polygon], (255, 255, 255))\n', (450, 483), False, 'import cv2\n'), ((867, 899), 'numpy.array', 'np.array', (['entity.lwpoints.values'], {}), '(entity.lwpoints.values)\n', (875, 899), True, 'import numpy as np\n'), ((1014, 1032), 'numpy.array', 'np.array', (['polyline'], {}), '(polyline)\n', (1022, 1032), True, 'import numpy as np\n'), ((1085, 1103), 'numpy.array', 'np.array', (['polyline'], {}), '(polyline)\n', (1093, 1103), True, 'import numpy as np\n'), ((1499, 1513), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (1505, 1513), True, 'import numpy as np\n'), ((1531, 1545), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (1537, 1545), True, 'import numpy as np\n'), ((1559, 1582), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (1574, 1582), True, 'import numpy as np\n'), ((3146, 3167), 'numpy.floor', 'np.floor', (['extmin[:-1]'], {}), '(extmin[:-1])\n', (3154, 3167), True, 'import numpy as np\n'), ((3197, 3217), 'numpy.ceil', 'np.ceil', (['extmax[:-1]'], {}), '(extmax[:-1])\n', (3204, 3217), True, 'import numpy as np\n'), ((3736, 3782), 'numpy.zeros', 'np.zeros', (['(ymax + ymin, xmax + xmin)', 'np.uint8'], {}), '((ymax + ymin, xmax + xmin), np.uint8)\n', (3744, 3782), True, 'import numpy as np\n'), ((4053, 4118), 'cv2.findContours', 'cv2.findContours', (['tmp', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (4069, 4118), False, 'import cv2\n'), ((4131, 4178), 'cv2.drawContours', 'cv2.drawContours', (['mask', 'contours', '(-1)', 'color', '(-1)'], {}), '(mask, contours, -1, color, -1)\n', (4147, 4178), False, 'import cv2\n'), ((189, 245), 'numpy.array', 'np.array', (['[vertex[:-1] for vertex in poly_path.vertices]'], {}), '([vertex[:-1] for vertex in poly_path.vertices])\n', (197, 245), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
|
[
"os.path.dirname",
"numpy.zeros",
"os.path.join",
"numpy.modf"
] |
[((321, 341), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (331, 341), True, 'from os import path as op\n'), ((1080, 1124), 'numpy.zeros', 'np.zeros', (['(value.shape + (4,))'], {'dtype': 'np.ubyte'}), '(value.shape + (4,), dtype=np.ubyte)\n', (1088, 1124), True, 'import numpy as np\n'), ((1178, 1200), 'numpy.modf', 'np.modf', (['(value * 256.0)'], {}), '(value * 256.0)\n', (1185, 1200), True, 'import numpy as np\n'), ((2315, 2355), 'os.path.join', 'op.join', (['DATA_DIR', '"""spatial-filters.npy"""'], {}), "(DATA_DIR, 'spatial-filters.npy')\n", (2322, 2355), True, 'from os import path as op\n')]
|
import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \
convert_examples_to_hierarchical_features
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
from utils.emotion import Emotion
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
class BertEvaluator(object):
def __init__(self, model, processor, args, split='dev'):
self.args = args
self.model = model
self.processor = processor
self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters)
if split == 'test':
self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name)
elif split == 'dev':
self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name)
else:
self.eval_examples = self.processor.get_any_examples(args.data_dir, split)
def get_scores(self, silent=False, return_indices=False):
all_indices = []
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_scores = [f.sentiment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_scores, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
total_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
predicted_labels, target_labels = list(), list()
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
if return_indices:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, return_indices=return_indices)
else:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids)
if isinstance(outs, tuple):
outs, _ = outs
if return_indices:
logits, indices = outs
all_indices.extend(indices.cpu().detach().numpy())
else:
logits = outs
if self.args.is_multilabel:
predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy())
target_labels.extend(label_ids.cpu().detach().numpy())
loss = F.binary_cross_entropy_with_logits(logits, label_ids.float(), size_average=False)
average, average_mac = 'micro', 'macro'
else:
predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy())
target_labels.extend(torch.argmax(label_ids, dim=1).cpu().detach().numpy())
loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1))
average, average_mac = 'binary', 'binary'
if self.args.n_gpu > 1:
loss = loss.mean()
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
total_loss += loss.item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels)
accuracy = metrics.accuracy_score(target_labels, predicted_labels)
precision = metrics.precision_score(target_labels, predicted_labels, average=average)
recall = metrics.recall_score(target_labels, predicted_labels, average=average)
avg_loss = total_loss / nb_eval_steps
hamming_loss = metrics.hamming_loss(target_labels, predicted_labels)
jaccard_score = metrics.jaccard_score(target_labels, predicted_labels, average=average)
f1_micro = metrics.f1_score(target_labels, predicted_labels, average=average)
f1_macro = metrics.f1_score(target_labels, predicted_labels, average=average_mac)
if return_indices:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels, all_indices],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels', 'all_indices']
else:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels']
def get_bert_layers(self, silent=False, last_bert_layers=-1):
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_ids = [f.emotioniment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_ids, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
bert_layers_l, label_ids_l = [], []
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
bert_layers = self.model.get_bert_embedding(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, last_bert_layers=last_bert_layers)
label_ids = torch.argmax(label_ids, dim=1).cpu().detach().numpy()
bert_layers_l.extend(bert_layers)
label_ids_l.extend(label_ids)
bert_layers_l = torch.stack(bert_layers_l, dim=0)
return bert_layers_l, label_ids_l
|
[
"datasets.bert_processors.abstract_processor.convert_examples_to_hierarchical_features",
"torch.nn.functional.sigmoid",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"sklearn.metrics.jaccard_score",
"utils.emotion.Emotion",
"sklearn.metrics.hamming_loss",
"datasets.bert_processors.abstract_processor.convert_examples_to_features_with_emotion",
"torch.argmax",
"torch.utils.data.SequentialSampler",
"torch.utils.data.TensorDataset",
"sklearn.metrics.accuracy_score",
"warnings.filterwarnings",
"utils.preprocessing.pad_input_matrix",
"sklearn.metrics.f1_score",
"tqdm.tqdm",
"torch.stack",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.no_grad",
"utils.tokenization.BertTokenizer.from_pretrained"
] |
[((539, 572), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (562, 572), False, 'import warnings\n'), ((785, 858), 'utils.tokenization.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.model'], {'is_lowercase': 'args.is_lowercase'}), '(args.model, is_lowercase=args.is_lowercase)\n', (814, 858), False, 'from utils.tokenization import BertTokenizer\n'), ((885, 946), 'utils.emotion.Emotion', 'Emotion', (['args.nrc_path', 'args.max_em_len', 'args.emotion_filters'], {}), '(args.nrc_path, args.max_em_len, args.emotion_filters)\n', (892, 946), False, 'from utils.emotion import Emotion\n'), ((2358, 2408), 'torch.tensor', 'torch.tensor', (['unpadded_input_ids'], {'dtype': 'torch.long'}), '(unpadded_input_ids, dtype=torch.long)\n', (2370, 2408), False, 'import torch\n'), ((2438, 2489), 'torch.tensor', 'torch.tensor', (['unpadded_input_mask'], {'dtype': 'torch.long'}), '(unpadded_input_mask, dtype=torch.long)\n', (2450, 2489), False, 'import torch\n'), ((2520, 2572), 'torch.tensor', 'torch.tensor', (['unpadded_segment_ids'], {'dtype': 'torch.long'}), '(unpadded_segment_ids, dtype=torch.long)\n', (2532, 2572), False, 'import torch\n'), ((2603, 2658), 'torch.tensor', 'torch.tensor', (['unpadded_emotion_scores'], {'dtype': 'torch.long'}), '(unpadded_emotion_scores, dtype=torch.long)\n', (2615, 2658), False, 'import torch\n'), ((2680, 2747), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in eval_features]'], {'dtype': 'torch.long'}), '([f.label_id for f in eval_features], dtype=torch.long)\n', (2692, 2747), False, 'import torch\n'), ((2771, 2876), 'torch.utils.data.TensorDataset', 'TensorDataset', (['padded_input_ids', 'padded_input_mask', 'padded_segment_ids', 'padded_emotion_ids', 'label_ids'], {}), '(padded_input_ids, padded_input_mask, padded_segment_ids,\n padded_emotion_ids, label_ids)\n', (2784, 2876), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((2897, 2925), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (2914, 2925), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((2953, 3029), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'self.args.batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)\n', (2963, 3029), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((3268, 3324), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""', 'disable': 'silent'}), "(eval_dataloader, desc='Evaluating', disable=silent)\n", (3272, 3324), False, 'from tqdm import tqdm\n'), ((5390, 5445), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['target_labels', 'predicted_labels'], {}), '(target_labels, predicted_labels)\n', (5412, 5445), False, 'from sklearn import metrics\n'), ((5467, 5540), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['target_labels', 'predicted_labels'], {'average': 'average'}), '(target_labels, predicted_labels, average=average)\n', (5490, 5540), False, 'from sklearn import metrics\n'), ((5559, 5629), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['target_labels', 'predicted_labels'], {'average': 'average'}), '(target_labels, predicted_labels, average=average)\n', (5579, 5629), False, 'from sklearn import metrics\n'), ((5703, 5756), 'sklearn.metrics.hamming_loss', 'metrics.hamming_loss', (['target_labels', 'predicted_labels'], {}), '(target_labels, predicted_labels)\n', (5723, 5756), False, 'from sklearn import metrics\n'), ((5782, 5853), 'sklearn.metrics.jaccard_score', 'metrics.jaccard_score', (['target_labels', 'predicted_labels'], {'average': 'average'}), '(target_labels, predicted_labels, average=average)\n', (5803, 5853), False, 'from sklearn import metrics\n'), ((5874, 5940), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['target_labels', 'predicted_labels'], {'average': 'average'}), '(target_labels, predicted_labels, average=average)\n', (5890, 5940), False, 'from sklearn import metrics\n'), ((5961, 6031), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['target_labels', 'predicted_labels'], {'average': 'average_mac'}), '(target_labels, predicted_labels, average=average_mac)\n', (5977, 6031), False, 'from sklearn import metrics\n'), ((7734, 7784), 'torch.tensor', 'torch.tensor', (['unpadded_input_ids'], {'dtype': 'torch.long'}), '(unpadded_input_ids, dtype=torch.long)\n', (7746, 7784), False, 'import torch\n'), ((7814, 7865), 'torch.tensor', 'torch.tensor', (['unpadded_input_mask'], {'dtype': 'torch.long'}), '(unpadded_input_mask, dtype=torch.long)\n', (7826, 7865), False, 'import torch\n'), ((7896, 7948), 'torch.tensor', 'torch.tensor', (['unpadded_segment_ids'], {'dtype': 'torch.long'}), '(unpadded_segment_ids, dtype=torch.long)\n', (7908, 7948), False, 'import torch\n'), ((7979, 8031), 'torch.tensor', 'torch.tensor', (['unpadded_emotion_ids'], {'dtype': 'torch.long'}), '(unpadded_emotion_ids, dtype=torch.long)\n', (7991, 8031), False, 'import torch\n'), ((8053, 8120), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in eval_features]'], {'dtype': 'torch.long'}), '([f.label_id for f in eval_features], dtype=torch.long)\n', (8065, 8120), False, 'import torch\n'), ((8144, 8249), 'torch.utils.data.TensorDataset', 'TensorDataset', (['padded_input_ids', 'padded_input_mask', 'padded_segment_ids', 'padded_emotion_ids', 'label_ids'], {}), '(padded_input_ids, padded_input_mask, padded_segment_ids,\n padded_emotion_ids, label_ids)\n', (8157, 8249), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((8270, 8298), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (8287, 8298), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((8326, 8402), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'self.args.batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)\n', (8336, 8402), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((8556, 8612), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""', 'disable': 'silent'}), "(eval_dataloader, desc='Evaluating', disable=silent)\n", (8560, 8612), False, 'from tqdm import tqdm\n'), ((9304, 9337), 'torch.stack', 'torch.stack', (['bert_layers_l'], {'dim': '(0)'}), '(bert_layers_l, dim=0)\n', (9315, 9337), False, 'import torch\n'), ((1462, 1570), 'datasets.bert_processors.abstract_processor.convert_examples_to_hierarchical_features', 'convert_examples_to_hierarchical_features', (['self.eval_examples', 'self.args.max_seq_length', 'self.tokenizer'], {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer)\n', (1503, 1570), False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((1628, 1752), 'datasets.bert_processors.abstract_processor.convert_examples_to_features_with_emotion', 'convert_examples_to_features_with_emotion', (['self.eval_examples', 'self.args.max_seq_length', 'self.tokenizer', 'self.emotioner'], {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer, self.emotioner)\n', (1669, 1752), False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((2110, 2172), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', (['unpadded_input_ids', 'self.args.max_doc_length'], {}), '(unpadded_input_ids, self.args.max_doc_length)\n', (2126, 2172), False, 'from utils.preprocessing import pad_input_matrix\n'), ((2186, 2249), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', (['unpadded_input_mask', 'self.args.max_doc_length'], {}), '(unpadded_input_mask, self.args.max_doc_length)\n', (2202, 2249), False, 'from utils.preprocessing import pad_input_matrix\n'), ((2263, 2327), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', (['unpadded_segment_ids', 'self.args.max_doc_length'], {}), '(unpadded_segment_ids, self.args.max_doc_length)\n', (2279, 2327), False, 'from utils.preprocessing import pad_input_matrix\n'), ((5318, 5344), 'numpy.array', 'np.array', (['predicted_labels'], {}), '(predicted_labels)\n', (5326, 5344), True, 'import numpy as np\n'), ((5346, 5369), 'numpy.array', 'np.array', (['target_labels'], {}), '(target_labels)\n', (5354, 5369), True, 'import numpy as np\n'), ((6838, 6946), 'datasets.bert_processors.abstract_processor.convert_examples_to_hierarchical_features', 'convert_examples_to_hierarchical_features', (['self.eval_examples', 'self.args.max_seq_length', 'self.tokenizer'], {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer)\n', (6879, 6946), False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((7004, 7128), 'datasets.bert_processors.abstract_processor.convert_examples_to_features_with_emotion', 'convert_examples_to_features_with_emotion', (['self.eval_examples', 'self.args.max_seq_length', 'self.tokenizer', 'self.emotioner'], {}), '(self.eval_examples, self.args.\n max_seq_length, self.tokenizer, self.emotioner)\n', (7045, 7128), False, 'from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, convert_examples_to_hierarchical_features\n'), ((7486, 7548), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', (['unpadded_input_ids', 'self.args.max_doc_length'], {}), '(unpadded_input_ids, self.args.max_doc_length)\n', (7502, 7548), False, 'from utils.preprocessing import pad_input_matrix\n'), ((7562, 7625), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', (['unpadded_input_mask', 'self.args.max_doc_length'], {}), '(unpadded_input_mask, self.args.max_doc_length)\n', (7578, 7625), False, 'from utils.preprocessing import pad_input_matrix\n'), ((7639, 7703), 'utils.preprocessing.pad_input_matrix', 'pad_input_matrix', (['unpadded_segment_ids', 'self.args.max_doc_length'], {}), '(unpadded_segment_ids, self.args.max_doc_length)\n', (7655, 7703), False, 'from utils.preprocessing import pad_input_matrix\n'), ((3636, 3651), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3649, 3651), False, 'import torch\n'), ((8924, 8939), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8937, 8939), False, 'import torch\n'), ((4855, 4885), 'torch.argmax', 'torch.argmax', (['label_ids'], {'dim': '(1)'}), '(label_ids, dim=1)\n', (4867, 4885), False, 'import torch\n'), ((9127, 9157), 'torch.argmax', 'torch.argmax', (['label_ids'], {'dim': '(1)'}), '(label_ids, dim=1)\n', (9139, 9157), False, 'import torch\n'), ((4662, 4689), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (4674, 4689), False, 'import torch\n'), ((4752, 4782), 'torch.argmax', 'torch.argmax', (['label_ids'], {'dim': '(1)'}), '(label_ids, dim=1)\n', (4764, 4782), False, 'import torch\n'), ((4308, 4325), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['logits'], {}), '(logits)\n', (4317, 4325), True, 'import torch.nn.functional as F\n')]
|
#!/usr/bin/env python
# coding: utf-8
""" Learning Koopman Invariant Subspace
(c) <NAME>, 2017.
<EMAIL>
"""
import numpy as np
np.random.seed(1234567890)
from argparse import ArgumentParser
from os import path
import time
from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner
from losses import combined_loss
from torch import device, save, manual_seed
from torch.optim import SGD
import matplotlib.pyplot as plt
import seaborn as sns
# -- Parse arguments
t = time.time()
parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)')
parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment")
parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model")
parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into")
parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no")
parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space")
parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl")
parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting")
parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag")
parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space")
parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by")
parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate")
parser.add_argument("--validation-data-path", "-v", type=str, default="")
#ToDo: Implement
parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set")
if __name__ == "__main__":
# grab the command line arguments
cli_args = parser.parse_args()
manual_seed(216)
# find and load the training data
data_path = cli_args.data_path
print(f"Loading training data from {data_path}")
data_train = np.load(data_path)
if len(data_train.shape) == 1:
data_train = data_train.reshape(-1, 1)
print(f"Loaded a dataset with dimension: {data_train.shape}")
validate = cli_args.validation_data_path != ""
data_val = None
if validate:
data_path = cli_args.validation_data_path
print(f"Loading validation data from {data_path}")
data_val = np.load(data_path)
# process the delay either set by the user or is set to one 10th of the data
delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10)
# based on the number of batches, delay, and size of the data compute the samples per batch
samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches
# construct the data preparer
batch_iterator = TimeSeriesBatchMaker(
y=data_train,
batch_size=samples_per_batch,
max_lag=delay
)
if validate:
val_batch_iterator = TimeSeriesBatchMaker(
y=data_val,
max_lag=delay
)
# construct the end-to-end model
lkis = KoopmanInvariantSubspaceLearner(
observable_dim=data_train.shape[1],
latent_dim=cli_args.state_space,
intermediate_observable=cli_args.intermediate_observable,
delay=delay
)
if cli_args.gpu:
device = device("cuda")
# initialize the optimizer
optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate)
losses = []
val_losses = []
for epoch in range(cli_args.epochs):
loss = 0
for b in range(cli_args.num_batches):
optimizer.zero_grad()
time_delayed_ys, y_true = next(batch_iterator)
if cli_args.gpu:
time_delayed_ys.to(device)
y_true.to(device)
g_pred, y_pred = lkis(time_delayed_ys)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
# display the epoch training loss
print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}")
losses.append(loss)
if validate:
y_time_delayed_val, y_true = next(val_batch_iterator)
if cli_args.gpu:
y_time_delayed_val.to(device)
y_true.to(device)
g_pred, y_pred = lkis(y_time_delayed_val)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
val_loss = batch_loss.item()
print(f"\tval-loss = {val_loss:.6f}")
val_losses.append(val_loss)
if cli_args.save_model:
save(lkis, f"{cli_args.name}.torch.mdl")
if cli_args.save_training_plot:
sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss")
if validate:
sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss")
plt.xlabel("Epochs")
plt.ylabel("Combined Reconstruction and DMD Loss")
plt.title(f"Training Loss for {cli_args.name}")
plt.savefig(f"{cli_args.name}-training-loss.png")
|
[
"torch.manual_seed",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"lkis.TimeSeriesBatchMaker",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"lkis.KoopmanInvariantSubspaceLearner",
"losses.combined_loss",
"numpy.random.seed",
"torch.save",
"matplotlib.pyplot.title",
"numpy.load",
"time.time",
"torch.device"
] |
[((131, 157), 'numpy.random.seed', 'np.random.seed', (['(1234567890)'], {}), '(1234567890)\n', (145, 157), True, 'import numpy as np\n'), ((485, 496), 'time.time', 'time.time', ([], {}), '()\n', (494, 496), False, 'import time\n'), ((506, 596), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Learning Koopman Invariant Subspace (Now with PyTorch!)"""'}), "(description=\n 'Learning Koopman Invariant Subspace (Now with PyTorch!)')\n", (520, 596), False, 'from argparse import ArgumentParser\n'), ((2227, 2243), 'torch.manual_seed', 'manual_seed', (['(216)'], {}), '(216)\n', (2238, 2243), False, 'from torch import device, save, manual_seed\n'), ((2388, 2406), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (2395, 2406), True, 'import numpy as np\n'), ((3189, 3268), 'lkis.TimeSeriesBatchMaker', 'TimeSeriesBatchMaker', ([], {'y': 'data_train', 'batch_size': 'samples_per_batch', 'max_lag': 'delay'}), '(y=data_train, batch_size=samples_per_batch, max_lag=delay)\n', (3209, 3268), False, 'from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner\n'), ((3476, 3656), 'lkis.KoopmanInvariantSubspaceLearner', 'KoopmanInvariantSubspaceLearner', ([], {'observable_dim': 'data_train.shape[1]', 'latent_dim': 'cli_args.state_space', 'intermediate_observable': 'cli_args.intermediate_observable', 'delay': 'delay'}), '(observable_dim=data_train.shape[1],\n latent_dim=cli_args.state_space, intermediate_observable=cli_args.\n intermediate_observable, delay=delay)\n', (3507, 3656), False, 'from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner\n'), ((2771, 2789), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (2778, 2789), True, 'import numpy as np\n'), ((3345, 3392), 'lkis.TimeSeriesBatchMaker', 'TimeSeriesBatchMaker', ([], {'y': 'data_val', 'max_lag': 'delay'}), '(y=data_val, max_lag=delay)\n', (3365, 3392), False, 'from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner\n'), ((3725, 3739), 'torch.device', 'device', (['"""cuda"""'], {}), "('cuda')\n", (3731, 3739), False, 'from torch import device, save, manual_seed\n'), ((5195, 5235), 'torch.save', 'save', (['lkis', 'f"""{cli_args.name}.torch.mdl"""'], {}), "(lkis, f'{cli_args.name}.torch.mdl')\n", (5199, 5235), False, 'from torch import device, save, manual_seed\n'), ((5484, 5504), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5494, 5504), True, 'import matplotlib.pyplot as plt\n'), ((5513, 5563), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Combined Reconstruction and DMD Loss"""'], {}), "('Combined Reconstruction and DMD Loss')\n", (5523, 5563), True, 'import matplotlib.pyplot as plt\n'), ((5572, 5619), 'matplotlib.pyplot.title', 'plt.title', (['f"""Training Loss for {cli_args.name}"""'], {}), "(f'Training Loss for {cli_args.name}')\n", (5581, 5619), True, 'import matplotlib.pyplot as plt\n'), ((5628, 5677), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{cli_args.name}-training-loss.png"""'], {}), "(f'{cli_args.name}-training-loss.png')\n", (5639, 5677), True, 'import matplotlib.pyplot as plt\n'), ((4315, 4376), 'losses.combined_loss', 'combined_loss', ([], {'y_pred': 'y_pred', 'y_true': 'y_true', 'g_0': 'g_0', 'g_1': 'g_1'}), '(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)\n', (4328, 4376), False, 'from losses import combined_loss\n'), ((4964, 5025), 'losses.combined_loss', 'combined_loss', ([], {'y_pred': 'y_pred', 'y_true': 'y_true', 'g_0': 'g_0', 'g_1': 'g_1'}), '(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)\n', (4977, 5025), False, 'from losses import combined_loss\n'), ((658, 669), 'time.time', 'time.time', ([], {}), '()\n', (667, 669), False, 'import time\n')]
|
import numpy as np
from math import pi,exp
def static_stability(height,area,theta,s_et=None,n_et=None):
"""
The function "static_stability" computes the vertical gradient (z-derivative)
of hemispheric-averaged potential temperature, i.e. d\tilde{theta}/dz in the def-
inition of QGPV in eq.(3) of Huang and Nakamura (2016), by central differencing.
At the boundary, the static stability is estimated by forward/backward differen-
cing involving two adjacent z-grid points:
i.e. stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
height : sequence or array_like
Array of z-coordinate [in meters] with dimension = (kmax), equally spaced
area : ndarray
Two-dimension numpy array specifying differential areal element of each grid point;
dimension = (nlat, nlon).
theta : ndarray
Matrix of potential temperature [K] with dimension (kmax,nlat,nlon) or (kmax,nlat)
s_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
n_et : int, optional
Index of the latitude that defines the boundary of the Southern hemispheric domain;
initialized as nlat/2 if not input
Returns
-------
t0_n : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_s : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_n : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_s : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
"""
nlat = theta.shape[1]
if s_et==None:
s_et = nlat//2
if n_et==None:
n_et = nlat//2
stat_n = np.zeros(theta.shape[0])
stat_s = np.zeros(theta.shape[0])
if theta.ndim==3:
zonal_mean = np.mean(theta,axis=-1)
elif theta.ndim==2:
zonal_mean = theta
if area.ndim==2:
area_zonal_mean = np.mean(area,axis=-1)
elif area.ndim==1:
area_zonal_mean = area
csm_n_et = np.sum(area_zonal_mean[-n_et:])
csm_s_et = np.sum(area_zonal_mean[:s_et])
t0_n = np.sum(zonal_mean[:,-n_et:]*area_zonal_mean[np.newaxis,-n_et:],axis=-1)/csm_n_et
t0_s = np.sum(zonal_mean[:,:s_et]*area_zonal_mean[np.newaxis,:s_et],axis=-1)/csm_s_et
stat_n[1:-1] = (t0_n[2:]-t0_n[:-2])/(height[2:]-height[:-2])
stat_s[1:-1] = (t0_s[2:]-t0_s[:-2])/(height[2:]-height[:-2])
stat_n[0] = (t0_n[1]-t0_n[0])/(height[1]-height[0])
stat_n[-1] = (t0_n[-2]-t0_n[-1])/(height[-2]-height[-1])
stat_s[0] = (t0_s[1]-t0_s[0])/(height[1]-height[0])
stat_s[-1] = (t0_s[-2]-t0_s[-1])/(height[-2]-height[-1])
return t0_n,t0_s,stat_n,stat_s
def compute_qgpv_givenvort(omega,nlat,nlon,kmax,unih,ylat,avort,potential_temp,
t0_cn,t0_cs,stat_cn,stat_cs,nlat_s=None,scale_height=7000.):
"""
The function "compute_qgpv_givenvort" computes the quasi-geostrophic potential
vorticity based on the absolute vorticity, potential temperature and static
stability given.
Please make inquiries and report issues via Github: https://github.com/csyhuang/hn2016_falwa/issues
Parameters
----------
omega : float, optional
Rotation rate of the planet.
nlat : int
Latitudinal dimension of the latitude grid.
nlon : int
Longitudinal dimension of the longitude grid.
kmax : int
Vertical dimension of the height grid.
unih : sequence or array_like
Numpy array of height in [meters]; dimension = (kmax)
ylat : sequence or array_like
Numpy array of latitudes in [degrees]; dimension = (nlat)
avort : ndarray
Three-dimension numpy array of absolute vorticity (i.e. relative vorticity
+ 2*Omega*sin(lat)) in [1/s]; dimension = (kmax x nlat x nlon)
potential_temp : ndarray
Three-dimension numpy array of potential temperature in [K];
dimension = (kmax x nlat x nlon)
t0_cn : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Northern hemispheric domain with dimension = (kmax)
t0_cs : sequence or array_like
Area-weighted average of potential temperature (\tilde{\theta} in HN16)
in the Southern hemispheric domain with dimension = (kmax)
stat_cn : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Northern hemispheric
domain with dimension = (kmax)
stat_cs : sequence or array_like
Static stability (d\tilde{\theta}/dz in HN16) in the Southern hemispheric
domain with dimension = (kmax)
scale_height : float
Scale height of the atmosphere in [m] with default value 7000.
Returns
-------
QGPV : ndarray
Three-dimension numpy array of quasi-geostrophic potential vorticity;
dimension = (kmax x nlat x nlon)
dzdiv : ndarray
Three-dimension numpy array of the stretching term in QGPV;
dimension = (kmax x nlat x nlon)
"""
if nlat_s==None:
nlat_s=nlat//2
clat = np.cos(ylat*pi/180.)
clat = np.abs(clat) # Just to avoid the negative value at poles
# --- Next, calculate PV ---
av2 = np.empty_like(potential_temp) # dv/d(lon)
av3 = np.empty_like(potential_temp) # du/d(lat)
qgpv = np.empty_like(potential_temp) # av1+av2+av3+dzdiv
av1 = np.ones((kmax,nlat,nlon)) * 2*omega*np.sin(ylat[np.newaxis,:,np.newaxis]*pi/180.)
# Calculate the z-divergence term
zdiv = np.empty_like(potential_temp)
dzdiv = np.empty_like(potential_temp)
for kk in range(kmax): # This is more efficient
zdiv[kk,:nlat_s,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,:nlat_s,:]-t0_cs[kk])/stat_cs[kk]
zdiv[kk,-nlat_s:,:] = exp(-unih[kk]/scale_height)*(potential_temp[kk,-nlat_s:,:]-t0_cn[kk])/stat_cn[kk]
dzdiv[1:kmax-1,:,:] = np.exp(unih[1:kmax-1,np.newaxis,np.newaxis]/scale_height)* \
(zdiv[2:kmax,:,:]-zdiv[0:kmax-2,:,:]) \
/(unih[2:kmax,np.newaxis,np.newaxis]-unih[0:kmax-2,np.newaxis,np.newaxis])
dzdiv[0,:,:] = exp(unih[0]/scale_height)*(zdiv[1,:,:]-zdiv[0,:,:])/ \
(unih[1,np.newaxis,np.newaxis]-unih[0,np.newaxis,np.newaxis])
dzdiv[kmax-1,:,:] = exp(unih[kmax-1]/scale_height)*(zdiv[kmax-1,:,:]-zdiv[kmax-2,:,:])/ \
(unih[kmax-1,np.newaxis,np.newaxis]-unih[kmax-2,np.newaxis,np.newaxis])
qgpv = avort+dzdiv * av1
return qgpv, dzdiv
|
[
"numpy.abs",
"numpy.mean",
"numpy.ones",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.empty_like",
"numpy.cos",
"numpy.sin",
"math.exp"
] |
[((2300, 2324), 'numpy.zeros', 'np.zeros', (['theta.shape[0]'], {}), '(theta.shape[0])\n', (2308, 2324), True, 'import numpy as np\n'), ((2338, 2362), 'numpy.zeros', 'np.zeros', (['theta.shape[0]'], {}), '(theta.shape[0])\n', (2346, 2362), True, 'import numpy as np\n'), ((2621, 2652), 'numpy.sum', 'np.sum', (['area_zonal_mean[-n_et:]'], {}), '(area_zonal_mean[-n_et:])\n', (2627, 2652), True, 'import numpy as np\n'), ((2668, 2698), 'numpy.sum', 'np.sum', (['area_zonal_mean[:s_et]'], {}), '(area_zonal_mean[:s_et])\n', (2674, 2698), True, 'import numpy as np\n'), ((5689, 5714), 'numpy.cos', 'np.cos', (['(ylat * pi / 180.0)'], {}), '(ylat * pi / 180.0)\n', (5695, 5714), True, 'import numpy as np\n'), ((5721, 5733), 'numpy.abs', 'np.abs', (['clat'], {}), '(clat)\n', (5727, 5733), True, 'import numpy as np\n'), ((5822, 5851), 'numpy.empty_like', 'np.empty_like', (['potential_temp'], {}), '(potential_temp)\n', (5835, 5851), True, 'import numpy as np\n'), ((5874, 5903), 'numpy.empty_like', 'np.empty_like', (['potential_temp'], {}), '(potential_temp)\n', (5887, 5903), True, 'import numpy as np\n'), ((5927, 5956), 'numpy.empty_like', 'np.empty_like', (['potential_temp'], {}), '(potential_temp)\n', (5940, 5956), True, 'import numpy as np\n'), ((6120, 6149), 'numpy.empty_like', 'np.empty_like', (['potential_temp'], {}), '(potential_temp)\n', (6133, 6149), True, 'import numpy as np\n'), ((6162, 6191), 'numpy.empty_like', 'np.empty_like', (['potential_temp'], {}), '(potential_temp)\n', (6175, 6191), True, 'import numpy as np\n'), ((2407, 2430), 'numpy.mean', 'np.mean', (['theta'], {'axis': '(-1)'}), '(theta, axis=-1)\n', (2414, 2430), True, 'import numpy as np\n'), ((2529, 2551), 'numpy.mean', 'np.mean', (['area'], {'axis': '(-1)'}), '(area, axis=-1)\n', (2536, 2551), True, 'import numpy as np\n'), ((2711, 2787), 'numpy.sum', 'np.sum', (['(zonal_mean[:, -n_et:] * area_zonal_mean[np.newaxis, -n_et:])'], {'axis': '(-1)'}), '(zonal_mean[:, -n_et:] * area_zonal_mean[np.newaxis, -n_et:], axis=-1)\n', (2717, 2787), True, 'import numpy as np\n'), ((2803, 2877), 'numpy.sum', 'np.sum', (['(zonal_mean[:, :s_et] * area_zonal_mean[np.newaxis, :s_et])'], {'axis': '(-1)'}), '(zonal_mean[:, :s_et] * area_zonal_mean[np.newaxis, :s_et], axis=-1)\n', (2809, 2877), True, 'import numpy as np\n'), ((6024, 6076), 'numpy.sin', 'np.sin', (['(ylat[np.newaxis, :, np.newaxis] * pi / 180.0)'], {}), '(ylat[np.newaxis, :, np.newaxis] * pi / 180.0)\n', (6030, 6076), True, 'import numpy as np\n'), ((6493, 6556), 'numpy.exp', 'np.exp', (['(unih[1:kmax - 1, np.newaxis, np.newaxis] / scale_height)'], {}), '(unih[1:kmax - 1, np.newaxis, np.newaxis] / scale_height)\n', (6499, 6556), True, 'import numpy as np\n'), ((6697, 6724), 'math.exp', 'exp', (['(unih[0] / scale_height)'], {}), '(unih[0] / scale_height)\n', (6700, 6724), False, 'from math import pi, exp\n'), ((6842, 6876), 'math.exp', 'exp', (['(unih[kmax - 1] / scale_height)'], {}), '(unih[kmax - 1] / scale_height)\n', (6845, 6876), False, 'from math import pi, exp\n'), ((5988, 6015), 'numpy.ones', 'np.ones', (['(kmax, nlat, nlon)'], {}), '((kmax, nlat, nlon))\n', (5995, 6015), True, 'import numpy as np\n'), ((6273, 6302), 'math.exp', 'exp', (['(-unih[kk] / scale_height)'], {}), '(-unih[kk] / scale_height)\n', (6276, 6302), False, 'from math import pi, exp\n'), ((6384, 6413), 'math.exp', 'exp', (['(-unih[kk] / scale_height)'], {}), '(-unih[kk] / scale_height)\n', (6387, 6413), False, 'from math import pi, exp\n')]
|
import numpy as np
import scipy.interpolate
import scipy.ndimage
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
def _calc_patch_grid_dims(shape, patch_size, patch_stride):
x_w, x_h, x_c = shape
num_rows = 1 + (x_h - patch_size) // patch_stride
num_cols = 1 + (x_w - patch_size) // patch_stride
return num_rows, num_cols
def make_patch_grid(x, patch_size, patch_stride=1):
'''x shape: (num_channels, rows, cols)'''
x = x.transpose(2, 1, 0)
patches = extract_patches_2d(x, (patch_size, patch_size))
x_w, x_h, x_c = x.shape
num_rows, num_cols = _calc_patch_grid_dims(x.shape, patch_size, patch_stride)
patches = patches.reshape((num_rows, num_cols, patch_size, patch_size, x_c))
patches = patches.transpose((0, 1, 4, 2, 3))
#patches = np.rollaxis(patches, -1, 2)
return patches
def combine_patches_grid(in_patches, out_shape):
'''Reconstruct an image from these `patches`
input shape: (rows, cols, channels, patch_row, patch_col)
'''
num_rows, num_cols = in_patches.shape[:2]
num_channels = in_patches.shape[-3]
patch_size = in_patches.shape[-1]
num_patches = num_rows * num_cols
in_patches = np.reshape(in_patches, (num_patches, num_channels, patch_size, patch_size)) # (patches, channels, pr, pc)
in_patches = np.transpose(in_patches, (0, 2, 3, 1)) # (patches, p, p, channels)
recon = reconstruct_from_patches_2d(in_patches, out_shape)
return recon.transpose(2, 1, 0).astype(np.float32)
class PatchMatcher(object):
'''A matcher of image patches inspired by the PatchMatch algorithm.
image shape: (width, height, channels)
'''
def __init__(self, input_shape, target_img, patch_size=1, patch_stride=1, jump_size=0.5,
num_propagation_steps=5, num_random_steps=5, random_max_radius=1.0, random_scale=0.5):
self.input_shape = input_shape
self.patch_size = patch_size
self.patch_stride = patch_stride
self.jump_size = jump_size
self.num_propagation_steps = num_propagation_steps
self.num_random_steps = num_random_steps
self.random_max_radius = random_max_radius
self.random_scale = random_scale
self.num_input_rows, self.num_input_cols = _calc_patch_grid_dims(input_shape, patch_size, patch_stride)
self.target_patches = make_patch_grid(target_img, patch_size)
self.target_patches_normed = self.normalize_patches(self.target_patches)
self.coords = np.random.uniform(0.0, 1.0, # TODO: switch to pixels
(2, self.num_input_rows, self.num_input_cols))# * [[[self.num_input_rows]],[[self.num_input_cols]]]
self.similarity = np.zeros(input_shape[:2:-1], dtype=np.float32)
self.min_propagration_row = 1.0 / self.num_input_rows
self.min_propagration_col = 1.0 / self.num_input_cols
self.delta_row = np.array([[[self.min_propagration_row]], [[0.0]]])
self.delta_col = np.array([[[0.0]], [[self.min_propagration_col]]])
def update(self, input_img, reverse_propagation=False):
input_patches = self.get_patches_for(input_img)
self.update_with_patches(self.normalize_patches(input_patches), reverse_propagation=reverse_propagation)
def update_with_patches(self, input_patches, reverse_propagation=False):
self._propagate(input_patches, reverse_propagation=reverse_propagation)
self._random_update(input_patches)
def get_patches_for(self, img):
return make_patch_grid(img, self.patch_size);
def normalize_patches(self, patches):
norm = np.sqrt(np.sum(np.square(patches), axis=(2, 3, 4), keepdims=True))
return patches / norm
def _propagate(self, input_patches, reverse_propagation=False):
if reverse_propagation:
roll_direction = 1
else:
roll_direction = -1
sign = float(roll_direction)
for step_i in range(self.num_propagation_steps):
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 1) + self.delta_row * sign)
coords_row, similarity_row = self.eval_state(new_coords, input_patches)
new_coords = self.clip_coords(np.roll(self.coords, roll_direction, 2) + self.delta_col * sign)
coords_col, similarity_col = self.eval_state(new_coords, input_patches)
self.coords, self.similarity = self.take_best(coords_row, similarity_row, coords_col, similarity_col)
def _random_update(self, input_patches):
for alpha in range(1, self.num_random_steps + 1): # NOTE this should actually stop when the move is < 1
new_coords = self.clip_coords(self.coords + np.random.uniform(-self.random_max_radius, self.random_max_radius, self.coords.shape) * self.random_scale ** alpha)
self.coords, self.similarity = self.eval_state(new_coords, input_patches)
def eval_state(self, new_coords, input_patches):
new_similarity = self.patch_similarity(input_patches, new_coords)
delta_similarity = new_similarity - self.similarity
coords = np.where(delta_similarity > 0, new_coords, self.coords)
best_similarity = np.where(delta_similarity > 0, new_similarity, self.similarity)
return coords, best_similarity
def take_best(self, coords_a, similarity_a, coords_b, similarity_b):
delta_similarity = similarity_a - similarity_b
best_coords = np.where(delta_similarity > 0, coords_a, coords_b)
best_similarity = np.where(delta_similarity > 0, similarity_a, similarity_b)
return best_coords, best_similarity
def patch_similarity(self, source, coords):
'''Check the similarity of the patches specified in coords.'''
target_vals = self.lookup_coords(self.target_patches_normed, coords)
err = source * target_vals
return np.sum(err, axis=(2, 3, 4))
def clip_coords(self, coords):
# TODO: should this all be in pixel space?
coords = np.clip(coords, 0.0, 1.0)
return coords
def lookup_coords(self, x, coords):
x_shape = np.expand_dims(np.expand_dims(x.shape, -1), -1)
i_coords = np.round(coords * (x_shape[:2] - 1)).astype('int32')
return x[i_coords[0], i_coords[1]]
def get_reconstruction(self, patches=None, combined=None):
if combined is not None:
patches = make_patch_grid(combined, self.patch_size)
if patches is None:
patches = self.target_patches
patches = self.lookup_coords(patches, self.coords)
recon = combine_patches_grid(patches, self.input_shape)
return recon
def scale(self, new_shape, new_target_img):
'''Create a new matcher of the given shape and replace its
state with a scaled up version of the current matcher's state.
'''
new_matcher = PatchMatcher(new_shape, new_target_img, patch_size=self.patch_size,
patch_stride=self.patch_stride, jump_size=self.jump_size,
num_propagation_steps=self.num_propagation_steps,
num_random_steps=self.num_random_steps,
random_max_radius=self.random_max_radius,
random_scale=self.random_scale)
new_matcher.coords = congrid(self.coords, new_matcher.coords.shape, method='neighbour')
new_matcher.similarity = congrid(self.similarity, new_matcher.coords.shape, method='neighbour')
return new_matcher
def congrid(a, newdims, method='linear', centre=False, minusone=False):
'''Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using
scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
'''
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
trorder = [ndims - 1] + range( ndims - 1 )
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = np.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
if __name__ == '__main__':
import sys
import time
from scipy.misc import imsave
from image_analogy.img_utils import load_image, preprocess_image, deprocess_image
content_image_path, style_image_path, output_prefix = sys.argv[1:]
jump_size = 1.0
num_steps = 7
patch_size = 1
patch_stride = 1
feat_chans = 512
feat_style_shape = (feat_chans, 12, 18)
feat_style = np.random.uniform(0.0, 1.0, feat_style_shape)
feat_in_shape = (feat_chans, 17, 10)
feat_in = np.random.uniform(0.0, 1.0, feat_in_shape)
matcher = PatchMatcher(feat_in_shape[::-1], feat_style, patch_size=patch_size)
feat_in_normed = matcher.normalize_patches(matcher.get_patches_for(feat_in))
for i in range(num_steps):
matcher.update_with_patches(feat_in_normed)
r = matcher.get_reconstruction()
content_img_img = load_image(content_image_path)
content_n_channels, content_n_rows, content_n_cols = content_img_img.shape[::-1]
content_img = preprocess_image(content_img_img, content_n_cols, content_n_rows)[0]#.transpose((2,1,0))
style_img = load_image(style_image_path)
style_n_channels, style_n_rows, style_n_cols = content_img_img.shape[::-1]
style_img = preprocess_image(
load_image(style_image_path), style_n_cols, style_n_rows)[0]#.transpose((2,1,0))
pg = make_patch_grid(content_img, patch_size)
result = combine_patches_grid(pg, content_img.shape[::-1])
outimg = deprocess_image(result, contrast_percent=0)
imsave(output_prefix + '_bestre.png', outimg)
# # #
matcher = PatchMatcher((content_n_cols, content_n_rows, content_n_channels), style_img, patch_size=patch_size)
for i in range(num_steps):
start = time.time()
matcher.update(content_img, reverse_propagation=bool(i % 2))
print(matcher.similarity.min(), matcher.similarity.max(), matcher.similarity.mean())
end = time.time()
#print end-start
start = time.time()
result = matcher.get_reconstruction(patches=matcher.target_patches)
print(result.shape)
end = time.time()
print(end-start)
outimg = deprocess_image(result, contrast_percent=0)
# # imsave takes (rows, cols, channels)
imsave(output_prefix + '_best.png', outimg)
|
[
"numpy.clip",
"sklearn.feature_extraction.image.extract_patches_2d",
"numpy.array",
"image_analogy.img_utils.preprocess_image",
"numpy.arange",
"numpy.rank",
"numpy.reshape",
"numpy.where",
"scipy.misc.imsave",
"numpy.asarray",
"image_analogy.img_utils.load_image",
"image_analogy.img_utils.deprocess_image",
"numpy.round",
"numpy.indices",
"numpy.square",
"numpy.transpose",
"time.time",
"numpy.roll",
"sklearn.feature_extraction.image.reconstruct_from_patches_2d",
"numpy.sum",
"numpy.zeros",
"numpy.expand_dims",
"numpy.random.uniform"
] |
[((527, 574), 'sklearn.feature_extraction.image.extract_patches_2d', 'extract_patches_2d', (['x', '(patch_size, patch_size)'], {}), '(x, (patch_size, patch_size))\n', (545, 574), False, 'from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d\n'), ((1228, 1303), 'numpy.reshape', 'np.reshape', (['in_patches', '(num_patches, num_channels, patch_size, patch_size)'], {}), '(in_patches, (num_patches, num_channels, patch_size, patch_size))\n', (1238, 1303), True, 'import numpy as np\n'), ((1352, 1390), 'numpy.transpose', 'np.transpose', (['in_patches', '(0, 2, 3, 1)'], {}), '(in_patches, (0, 2, 3, 1))\n', (1364, 1390), True, 'import numpy as np\n'), ((1431, 1481), 'sklearn.feature_extraction.image.reconstruct_from_patches_2d', 'reconstruct_from_patches_2d', (['in_patches', 'out_shape'], {}), '(in_patches, out_shape)\n', (1458, 1481), False, 'from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d\n'), ((8773, 8790), 'numpy.array', 'np.array', (['a.shape'], {}), '(a.shape)\n', (8781, 8790), True, 'import numpy as np\n'), ((9049, 9081), 'numpy.asarray', 'np.asarray', (['newdims'], {'dtype': 'float'}), '(newdims, dtype=float)\n', (9059, 9081), True, 'import numpy as np\n'), ((11676, 11721), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', 'feat_style_shape'], {}), '(0.0, 1.0, feat_style_shape)\n', (11693, 11721), True, 'import numpy as np\n'), ((11777, 11819), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', 'feat_in_shape'], {}), '(0.0, 1.0, feat_in_shape)\n', (11794, 11819), True, 'import numpy as np\n'), ((12127, 12157), 'image_analogy.img_utils.load_image', 'load_image', (['content_image_path'], {}), '(content_image_path)\n', (12137, 12157), False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((12366, 12394), 'image_analogy.img_utils.load_image', 'load_image', (['style_image_path'], {}), '(style_image_path)\n', (12376, 12394), False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((12724, 12767), 'image_analogy.img_utils.deprocess_image', 'deprocess_image', (['result'], {'contrast_percent': '(0)'}), '(result, contrast_percent=0)\n', (12739, 12767), False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((12772, 12817), 'scipy.misc.imsave', 'imsave', (["(output_prefix + '_bestre.png')", 'outimg'], {}), "(output_prefix + '_bestre.png', outimg)\n", (12778, 12817), False, 'from scipy.misc import imsave\n'), ((13228, 13239), 'time.time', 'time.time', ([], {}), '()\n', (13237, 13239), False, 'import time\n'), ((13346, 13357), 'time.time', 'time.time', ([], {}), '()\n', (13355, 13357), False, 'import time\n'), ((13392, 13435), 'image_analogy.img_utils.deprocess_image', 'deprocess_image', (['result'], {'contrast_percent': '(0)'}), '(result, contrast_percent=0)\n', (13407, 13435), False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((13484, 13527), 'scipy.misc.imsave', 'imsave', (["(output_prefix + '_best.png')", 'outimg'], {}), "(output_prefix + '_best.png', outimg)\n", (13490, 13527), False, 'from scipy.misc import imsave\n'), ((2520, 2594), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(2, self.num_input_rows, self.num_input_cols)'], {}), '(0.0, 1.0, (2, self.num_input_rows, self.num_input_cols))\n', (2537, 2594), True, 'import numpy as np\n'), ((2712, 2758), 'numpy.zeros', 'np.zeros', (['input_shape[:2:-1]'], {'dtype': 'np.float32'}), '(input_shape[:2:-1], dtype=np.float32)\n', (2720, 2758), True, 'import numpy as np\n'), ((2908, 2958), 'numpy.array', 'np.array', (['[[[self.min_propagration_row]], [[0.0]]]'], {}), '([[[self.min_propagration_row]], [[0.0]]])\n', (2916, 2958), True, 'import numpy as np\n'), ((2984, 3034), 'numpy.array', 'np.array', (['[[[0.0]], [[self.min_propagration_col]]]'], {}), '([[[0.0]], [[self.min_propagration_col]]])\n', (2992, 3034), True, 'import numpy as np\n'), ((5102, 5157), 'numpy.where', 'np.where', (['(delta_similarity > 0)', 'new_coords', 'self.coords'], {}), '(delta_similarity > 0, new_coords, self.coords)\n', (5110, 5157), True, 'import numpy as np\n'), ((5184, 5247), 'numpy.where', 'np.where', (['(delta_similarity > 0)', 'new_similarity', 'self.similarity'], {}), '(delta_similarity > 0, new_similarity, self.similarity)\n', (5192, 5247), True, 'import numpy as np\n'), ((5438, 5488), 'numpy.where', 'np.where', (['(delta_similarity > 0)', 'coords_a', 'coords_b'], {}), '(delta_similarity > 0, coords_a, coords_b)\n', (5446, 5488), True, 'import numpy as np\n'), ((5515, 5573), 'numpy.where', 'np.where', (['(delta_similarity > 0)', 'similarity_a', 'similarity_b'], {}), '(delta_similarity > 0, similarity_a, similarity_b)\n', (5523, 5573), True, 'import numpy as np\n'), ((5865, 5892), 'numpy.sum', 'np.sum', (['err'], {'axis': '(2, 3, 4)'}), '(err, axis=(2, 3, 4))\n', (5871, 5892), True, 'import numpy as np\n'), ((5997, 6022), 'numpy.clip', 'np.clip', (['coords', '(0.0)', '(1.0)'], {}), '(coords, 0.0, 1.0)\n', (6004, 6022), True, 'import numpy as np\n'), ((12261, 12326), 'image_analogy.img_utils.preprocess_image', 'preprocess_image', (['content_img_img', 'content_n_cols', 'content_n_rows'], {}), '(content_img_img, content_n_cols, content_n_rows)\n', (12277, 12326), False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((12991, 13002), 'time.time', 'time.time', ([], {}), '()\n', (13000, 13002), False, 'import time\n'), ((13179, 13190), 'time.time', 'time.time', ([], {}), '()\n', (13188, 13190), False, 'import time\n'), ((6119, 6146), 'numpy.expand_dims', 'np.expand_dims', (['x.shape', '(-1)'], {}), '(x.shape, -1)\n', (6133, 6146), True, 'import numpy as np\n'), ((12517, 12545), 'image_analogy.img_utils.load_image', 'load_image', (['style_image_path'], {}), '(style_image_path)\n', (12527, 12545), False, 'from image_analogy.img_utils import load_image, preprocess_image, deprocess_image\n'), ((3630, 3648), 'numpy.square', 'np.square', (['patches'], {}), '(patches)\n', (3639, 3648), True, 'import numpy as np\n'), ((6171, 6207), 'numpy.round', 'np.round', (['(coords * (x_shape[:2] - 1))'], {}), '(coords * (x_shape[:2] - 1))\n', (6179, 6207), True, 'import numpy as np\n'), ((9184, 9203), 'numpy.indices', 'np.indices', (['newdims'], {}), '(newdims)\n', (9194, 9203), True, 'import numpy as np\n'), ((9547, 9568), 'numpy.arange', 'np.arange', (['newdims[i]'], {}), '(newdims[i])\n', (9556, 9568), True, 'import numpy as np\n'), ((9732, 9760), 'numpy.arange', 'np.arange', (['i'], {'dtype': 'np.float'}), '(i, dtype=np.float)\n', (9741, 9760), True, 'import numpy as np\n'), ((4026, 4065), 'numpy.roll', 'np.roll', (['self.coords', 'roll_direction', '(1)'], {}), '(self.coords, roll_direction, 1)\n', (4033, 4065), True, 'import numpy as np\n'), ((4217, 4256), 'numpy.roll', 'np.roll', (['self.coords', 'roll_direction', '(2)'], {}), '(self.coords, roll_direction, 2)\n', (4224, 4256), True, 'import numpy as np\n'), ((10607, 10625), 'numpy.rank', 'np.rank', (['newcoords'], {}), '(newcoords)\n', (10614, 10625), True, 'import numpy as np\n'), ((4695, 4785), 'numpy.random.uniform', 'np.random.uniform', (['(-self.random_max_radius)', 'self.random_max_radius', 'self.coords.shape'], {}), '(-self.random_max_radius, self.random_max_radius, self.\n coords.shape)\n', (4712, 4785), True, 'import numpy as np\n'), ((9335, 9352), 'numpy.array', 'np.array', (['dimlist'], {}), '(dimlist)\n', (9343, 9352), True, 'import numpy as np\n'), ((10864, 10879), 'numpy.asarray', 'np.asarray', (['old'], {}), '(old)\n', (10874, 10879), True, 'import numpy as np\n')]
|
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
from scipy.spatial.distance import pdist, squareform
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.fig = None
self.line1 = None
self.filter_len = int(config['filter_length'])
self.nx_system = 4
self.n_nodes = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max # 0.5 * self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
self.pooling = []
if config.getboolean('sum_pooling'):
self.pooling.append(np.nansum)
if config.getboolean('min_pooling'):
self.pooling.append(np.nanmin)
if config.getboolean('max_pooling'):
self.pooling.append(np.nanmax)
self.n_pools = len(self.pooling)
# number of features and outputs
self.n_features = int(config['N_features'])
self.nx = int(self.n_features / self.n_pools / self.filter_len)
self.nu = int(config['N_outputs']) # outputs
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x = np.zeros((self.n_nodes, self.nx_system))
self.u = np.zeros((self.n_nodes, self.nu))
self.mean_vel = np.zeros((self.n_nodes, self.nu))
# TODO
self.max_accel = 40
self.max_z = 200
# self.b = np.ones((self.n_nodes,1))
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(self.n_nodes, 2), dtype=np.float32 )
# self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(
# self.n_nodes, self.nx * self.filter_len * self.n_pools) , dtype=np.float32)
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2,) , dtype=np.float32 )
self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(self.n_features, ), dtype=np.float32)
self.seed()
def render(self, mode='human'):
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
x = self.x
x_ = np.zeros((self.n_nodes, self.nx_system))
#u = np.vstack((np.zeros((self.n_leaders, 2)), u))
# x position
x_[:, 0] = x[:, 0] + x[:, 2] * self.dt
# y position
x_[:, 1] = x[:, 1] + x[:, 3] * self.dt
# x velocity
x_[:, 2] = x[:, 2] + 0.1 * u[:, 0] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# y velocity
x_[:, 3] = x[:, 3] + 0.1 * u[:, 1] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,))
# TODO - check the 0.1
self.x = x_
self.x_agg = self.aggregate(self.x, self.x_agg)
self.u = u
return self._get_obs(), -self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
return np.sum(np.var(self.x[:, 2:4], axis=0)) #+ np.sum(np.square(self.u)) * 0.00001
#return np.sum(np.square(self.x[:,2:4] - self.mean_vel))
def _get_obs(self):
reshaped = self.x_agg.reshape((self.n_nodes, self.n_features))
clipped = np.clip(reshaped, a_min=-self.max_z, a_max=self.max_z)
return clipped #[self.n_leaders:, :]
def reset(self):
x = np.zeros((self.n_nodes, self.nx_system))
degree = 0
min_dist = 0
while degree < 2 or min_dist < 0.1: # < 0.25: # 0.25: #0.5: #min_dist < 0.25:
# randomly initialize the state of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_nodes,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_nodes,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[1]
# compute distances between agents
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
# no self loops
a_net = a_net + 2 * self.comm_radius * np.eye(self.n_nodes)
# compute minimum distance between agents and degree of network
min_dist = np.min(np.min(a_net))
a_net = a_net < self.comm_radius
degree = np.min(np.sum(a_net.astype(int), axis=1))
self.mean_vel = np.mean(x[:,2:4],axis=0)
self.x = x
self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools))
self.x_agg = self.aggregate(self.x, self.x_agg)
return self._get_obs()
# def render(self, mode='human'):
# pass
def close(self):
pass
def aggregate(self, xt, x_agg):
"""
Perform aggegration operation for all possible pooling operations using helper functions get_pool and get_comms
Args:
x_agg (): Last time step's aggregated info
xt (): Current state of all agents
Returns:
Aggregated state values
"""
x_features = self.get_x_features(xt)
a_net = self.get_connectivity(xt)
for k in range(0, self.n_pools):
comm_data = self.get_comms(np.dstack((x_features, self.get_features(x_agg[:, :, k]))), a_net)
x_agg[:, :, k] = self.get_pool(comm_data, self.pooling[k])
return x_agg
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current states of all agents
Returns: adjacency matrix of network
"""
x_t_loc = x[:, 0:2] # x,y location determines connectivity
a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean'))
a_net = (a_net < self.comm_radius).astype(float)
np.fill_diagonal(a_net, 0)
return a_net
def get_x_features(self, xt): # TODO
"""
Compute the non-linear features necessary for implementing Turner 2003
Args:
xt (): current state of all agents
Returns: matrix of features for each agent
"""
diff = xt.reshape((self.n_nodes, 1, self.nx_system)) - xt.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(diff[:, :, 0], diff[:, :, 0]) + np.multiply(diff[:, :, 1], diff[:, :, 1]) + np.eye(
self.n_nodes)
return np.dstack((diff[:, :, 2], np.divide(diff[:, :, 0], np.multiply(r2, r2)), np.divide(diff[:, :, 0], r2),
diff[:, :, 3], np.divide(diff[:, :, 1], np.multiply(r2, r2)), np.divide(diff[:, :, 1], r2)))
def get_features(self, agg):
"""
Matrix of
Args:
agg (): the aggregated matrix from the last time step
Returns: matrix of aggregated features from all nodes at current time
"""
return np.tile(agg[:, :-self.nx].reshape((self.n_nodes, 1, -1)), (1, self.n_nodes, 1)) # TODO check indexing
def get_comms(self, mat, a_net):
"""
Enforces that agents who are not connected in the network cannot observe each others' states
Args:
mat (): matrix of state information for the whole graph
a_net (): adjacency matrix for flock network (weighted networks unsupported for now)
Returns:
mat (): sparse matrix with NaN values where agents can't communicate
"""
a_net[a_net == 0] = np.nan
return mat * a_net.reshape(self.n_nodes, self.n_nodes, 1)
def get_pool(self, mat, func):
"""
Perform pooling operations on the matrix of state information. The replacement of values with NaNs for agents who
can't communicate must already be enforced.
Args:
mat (): matrix of state information
func (): pooling function (np.nansum(), np.nanmin() or np.nanmax()). Must ignore NaNs.
Returns:
information pooled from neighbors for each agent
"""
return func(mat, axis=1).reshape((self.n_nodes, self.n_features)) # TODO check this axis = 1
def controller(self):
"""
The controller for flocking from Turner 2003.
Args:
x (): the current state
Returns: the optimal action
"""
x = self.x
s_diff = x.reshape((self.n_nodes, 1, self.nx_system)) - x.reshape((1, self.n_nodes, self.nx_system))
r2 = np.multiply(s_diff[:, :, 0], s_diff[:, :, 0]) + np.multiply(s_diff[:, :, 1], s_diff[:, :, 1]) + np.eye(
self.n_nodes)
p = np.dstack((s_diff, self.potential_grad(s_diff[:, :, 0], r2), self.potential_grad(s_diff[:, :, 1], r2)))
p_sum = np.nansum(p, axis=1).reshape((self.n_nodes, self.nx_system + 2))
return np.hstack(((- p_sum[:, 4] - p_sum[:, 2]).reshape((-1, 1)), (- p_sum[:, 3] - p_sum[:, 5]).reshape(-1, 1)))
def potential_grad(self, pos_diff, r2):
"""
Computes the gradient of the potential function for flocking proposed in Turner 2003.
Args:
pos_diff (): difference in a component of position among all agents
r2 (): distance squared between agents
Returns: corresponding component of the gradient of the potential
"""
grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2)
grad[r2 > self.comm_radius] = 0
return grad
|
[
"numpy.clip",
"configparser.ConfigParser",
"numpy.sin",
"numpy.divide",
"gym.utils.seeding.np_random",
"numpy.mean",
"numpy.multiply",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.random.normal",
"numpy.eye",
"matplotlib.pyplot.gca",
"numpy.fill_diagonal",
"os.path.dirname",
"numpy.cos",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.nansum",
"gym.spaces.Box",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.uniform",
"numpy.var"
] |
[((488, 515), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (513, 515), False, 'import configparser\n'), ((1655, 1720), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx * self.filter_len, self.n_pools)'], {}), '((self.n_nodes, self.nx * self.filter_len, self.n_pools))\n', (1663, 1720), True, 'import numpy as np\n'), ((1738, 1778), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx_system)'], {}), '((self.n_nodes, self.nx_system))\n', (1746, 1778), True, 'import numpy as np\n'), ((1796, 1829), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nu)'], {}), '((self.n_nodes, self.nu))\n', (1804, 1829), True, 'import numpy as np\n'), ((1854, 1887), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nu)'], {}), '((self.n_nodes, self.nu))\n', (1862, 1887), True, 'import numpy as np\n'), ((2336, 2423), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-self.max_accel)', 'high': 'self.max_accel', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-self.max_accel, high=self.max_accel, shape=(2,), dtype=np.\n float32)\n', (2346, 2423), False, 'from gym import spaces, error, utils\n'), ((2454, 2546), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-self.max_z)', 'high': 'self.max_z', 'shape': '(self.n_features,)', 'dtype': 'np.float32'}), '(low=-self.max_z, high=self.max_z, shape=(self.n_features,),\n dtype=np.float32)\n', (2464, 2546), False, 'from gym import spaces, error, utils\n'), ((3434, 3457), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (3451, 3457), False, 'from gym.utils import seeding\n'), ((3536, 3576), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx_system)'], {}), '((self.n_nodes, self.nx_system))\n', (3544, 3576), True, 'import numpy as np\n'), ((4556, 4610), 'numpy.clip', 'np.clip', (['reshaped'], {'a_min': '(-self.max_z)', 'a_max': 'self.max_z'}), '(reshaped, a_min=-self.max_z, a_max=self.max_z)\n', (4563, 4610), True, 'import numpy as np\n'), ((4690, 4730), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx_system)'], {}), '((self.n_nodes, self.nx_system))\n', (4698, 4730), True, 'import numpy as np\n'), ((6098, 6163), 'numpy.zeros', 'np.zeros', (['(self.n_nodes, self.nx * self.filter_len, self.n_pools)'], {}), '((self.n_nodes, self.nx * self.filter_len, self.n_pools))\n', (6106, 6163), True, 'import numpy as np\n'), ((7520, 7546), 'numpy.fill_diagonal', 'np.fill_diagonal', (['a_net', '(0)'], {}), '(a_net, 0)\n', (7536, 7546), True, 'import numpy as np\n'), ((427, 449), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (439, 449), False, 'from os import path\n'), ((2644, 2653), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2651, 2653), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2684), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2682, 2684), True, 'import matplotlib.pyplot as plt\n'), ((2885, 2930), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.0 * self.r_max)', '(1.0 * self.r_max)'], {}), '(-1.0 * self.r_max, 1.0 * self.r_max)\n', (2893, 2930), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2988), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.0 * self.r_max)', '(1.0 * self.r_max)'], {}), '(-1.0 * self.r_max, 1.0 * self.r_max)\n', (2951, 2988), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3010), 'matplotlib.pyplot.gca', 'gca', ([], {}), '()\n', (3008, 3010), False, 'from matplotlib.pyplot import gca\n'), ((3127, 3154), 'matplotlib.pyplot.title', 'plt.title', (['"""GNN Controller"""'], {}), "('GNN Controller')\n", (3136, 3154), True, 'import matplotlib.pyplot as plt\n'), ((3850, 3900), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.std_dev', '(self.n_nodes,)'], {}), '(0, self.std_dev, (self.n_nodes,))\n', (3866, 3900), True, 'import numpy as np\n'), ((3977, 4027), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self.std_dev', '(self.n_nodes,)'], {}), '(0, self.std_dev, (self.n_nodes,))\n', (3993, 4027), True, 'import numpy as np\n'), ((4306, 4336), 'numpy.var', 'np.var', (['self.x[:, 2:4]'], {'axis': '(0)'}), '(self.x[:, 2:4], axis=0)\n', (4312, 4336), True, 'import numpy as np\n'), ((5188, 5252), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_bias)', 'high': 'self.v_bias', 'size': '(2,)'}), '(low=-self.v_bias, high=self.v_bias, size=(2,))\n', (5205, 5252), True, 'import numpy as np\n'), ((6032, 6058), 'numpy.mean', 'np.mean', (['x[:, 2:4]'], {'axis': '(0)'}), '(x[:, 2:4], axis=0)\n', (6039, 6058), True, 'import numpy as np\n'), ((8038, 8058), 'numpy.eye', 'np.eye', (['self.n_nodes'], {}), '(self.n_nodes)\n', (8044, 8058), True, 'import numpy as np\n'), ((10212, 10232), 'numpy.eye', 'np.eye', (['self.n_nodes'], {}), '(self.n_nodes)\n', (10218, 10232), True, 'import numpy as np\n'), ((4948, 5002), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.r_max'], {'size': '(self.n_nodes,)'}), '(0, self.r_max, size=(self.n_nodes,))\n', (4965, 5002), True, 'import numpy as np\n'), ((5032, 5077), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {'size': '(self.n_nodes,)'}), '(0, 2, size=(self.n_nodes,))\n', (5049, 5077), True, 'import numpy as np\n'), ((5109, 5122), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5115, 5122), True, 'import numpy as np\n'), ((5154, 5167), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5160, 5167), True, 'import numpy as np\n'), ((5275, 5348), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_nodes,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_nodes,))\n', (5292, 5348), True, 'import numpy as np\n'), ((5381, 5454), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_nodes,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_nodes,))\n', (5398, 5454), True, 'import numpy as np\n'), ((5880, 5893), 'numpy.min', 'np.min', (['a_net'], {}), '(a_net)\n', (5886, 5893), True, 'import numpy as np\n'), ((7950, 7991), 'numpy.multiply', 'np.multiply', (['diff[:, :, 0]', 'diff[:, :, 0]'], {}), '(diff[:, :, 0], diff[:, :, 0])\n', (7961, 7991), True, 'import numpy as np\n'), ((7994, 8035), 'numpy.multiply', 'np.multiply', (['diff[:, :, 1]', 'diff[:, :, 1]'], {}), '(diff[:, :, 1], diff[:, :, 1])\n', (8005, 8035), True, 'import numpy as np\n'), ((8160, 8188), 'numpy.divide', 'np.divide', (['diff[:, :, 0]', 'r2'], {}), '(diff[:, :, 0], r2)\n', (8169, 8188), True, 'import numpy as np\n'), ((8278, 8306), 'numpy.divide', 'np.divide', (['diff[:, :, 1]', 'r2'], {}), '(diff[:, :, 1], r2)\n', (8287, 8306), True, 'import numpy as np\n'), ((10116, 10161), 'numpy.multiply', 'np.multiply', (['s_diff[:, :, 0]', 's_diff[:, :, 0]'], {}), '(s_diff[:, :, 0], s_diff[:, :, 0])\n', (10127, 10161), True, 'import numpy as np\n'), ((10164, 10209), 'numpy.multiply', 'np.multiply', (['s_diff[:, :, 1]', 's_diff[:, :, 1]'], {}), '(s_diff[:, :, 1], s_diff[:, :, 1])\n', (10175, 10209), True, 'import numpy as np\n'), ((10378, 10398), 'numpy.nansum', 'np.nansum', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (10387, 10398), True, 'import numpy as np\n'), ((11017, 11040), 'numpy.divide', 'np.divide', (['pos_diff', 'r2'], {}), '(pos_diff, r2)\n', (11026, 11040), True, 'import numpy as np\n'), ((5752, 5772), 'numpy.eye', 'np.eye', (['self.n_nodes'], {}), '(self.n_nodes)\n', (5758, 5772), True, 'import numpy as np\n'), ((8138, 8157), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (8149, 8157), True, 'import numpy as np\n'), ((8256, 8275), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (8267, 8275), True, 'import numpy as np\n'), ((10990, 11009), 'numpy.multiply', 'np.multiply', (['r2', 'r2'], {}), '(r2, r2)\n', (11001, 11009), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import os
import logging
# suppress warnings
import warnings;
warnings.filterwarnings('ignore');
from tqdm.autonotebook import tqdm
# register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
tqdm.pandas()
# https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options
# adjust pandas display
pd.options.display.max_columns = 30 # default 20
pd.options.display.max_rows = 200 # default 60
pd.options.display.float_format = '{:.2f}'.format
# pd.options.display.precision = 2
pd.options.display.max_colwidth = 200 # default 50; None = all
# Number of array items in summary at beginning and end of each dimension
# np.set_printoptions(edgeitems=3) # default 3
np.set_printoptions(suppress=True) # no scientific notation for small numbers
# IPython (Jupyter) setting:
# Print out every value instead of just "last_expr" (default)
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import matplotlib as mpl
from matplotlib import pyplot as plt
# defaults: mpl.rcParamsDefault
rc_params = {'figure.figsize': (8, 4),
'axes.labelsize': 'large',
'axes.titlesize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'savefig.dpi': 100,
'figure.dpi': 100 }
# adjust matplotlib defaults
mpl.rcParams.update(rc_params)
import seaborn as sns
sns.set_style("darkgrid")
# sns.set()
|
[
"tqdm.autonotebook.tqdm.pandas",
"matplotlib.rcParams.update",
"seaborn.set_style",
"warnings.filterwarnings",
"numpy.set_printoptions"
] |
[((103, 136), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (126, 136), False, 'import warnings\n'), ((252, 265), 'tqdm.autonotebook.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (263, 265), False, 'from tqdm.autonotebook import tqdm\n'), ((746, 780), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (765, 780), True, 'import numpy as np\n'), ((1429, 1459), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['rc_params'], {}), '(rc_params)\n', (1448, 1459), True, 'import matplotlib as mpl\n'), ((1483, 1508), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (1496, 1508), True, 'import seaborn as sns\n')]
|
import sys
import soundcard
import numpy
import pytest
ones = numpy.ones(1024)
signal = numpy.concatenate([[ones], [-ones]]).T
def test_speakers():
for speaker in soundcard.all_speakers():
assert isinstance(speaker.name, str)
assert hasattr(speaker, 'id')
assert isinstance(speaker.channels, int)
assert speaker.channels > 0
def test_microphones():
for microphone in soundcard.all_microphones():
assert isinstance(microphone.name, str)
assert hasattr(microphone, 'id')
assert isinstance(microphone.channels, int)
assert microphone.channels > 0
def test_default_playback():
soundcard.default_speaker().play(signal, 44100, channels=2)
def test_default_record():
recording = soundcard.default_microphone().record(1024, 44100)
assert len(recording == 1024)
def test_default_blockless_record():
recording = soundcard.default_microphone().record(None, 44100)
@pytest.fixture
def loopback_speaker():
import sys
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_speaker('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_speaker('Soundflower64')
elif sys.platform == 'linux':
# pacmd load-module module-null-sink channels=6 rate=48000
return soundcard.get_speaker('Null')
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_player(loopback_speaker):
with loopback_speaker.player(48000, channels=2, blocksize=512) as player:
yield player
@pytest.fixture
def loopback_microphone():
if sys.platform == 'win32':
# must install https://www.vb-audio.com/Cable/index.htm
return soundcard.get_microphone('Cable')
elif sys.platform == 'darwin':
# must install soundflower
return soundcard.get_microphone('Soundflower64')
elif sys.platform == 'linux':
return soundcard.get_microphone('Null', include_loopback=True)
else:
raise RuntimeError('Unknown platform {}'.format(sys.platform))
@pytest.fixture
def loopback_recorder(loopback_microphone):
with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder:
yield recorder
def test_loopback_playback(loopback_player, loopback_recorder):
loopback_player.play(signal)
recording = loopback_recorder.record(1024*10)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert right.mean() > 0
assert left.mean() < 0
assert (right > 0.5).sum() == len(signal)
assert (left < -0.5).sum() == len(signal)
def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder):
with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player:
loopback_player.play(signal[:,0])
recording = loopback_recorder.record(1024*12)
assert recording.shape[1] == 2
left, right = recording.T
assert left.mean() > 0
if sys.platform == 'linux':
# unmapped channels on linux are filled with the mean of other channels
assert right.mean() < left.mean()
else:
assert abs(right.mean()) < 0.01 # something like zero
assert (left > 0.5).sum() == len(signal)
def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone):
with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 1 or recording.shape[1] == 1
assert recording.mean() > 0
assert (recording > 0.5).sum() == len(signal)
def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone):
with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player:
with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder:
loopback_player.play(signal)
recording = loopback_recorder.record(1024*12)
assert len(recording.shape) == 2
left, right = recording.T
assert left.mean() > 0
assert right.mean() < 0
assert (left > 0.5).sum() == len(signal)
assert (right < -0.5).sum() == len(signal)
|
[
"soundcard.get_microphone",
"soundcard.all_speakers",
"soundcard.all_microphones",
"numpy.ones",
"soundcard.default_microphone",
"soundcard.default_speaker",
"soundcard.get_speaker",
"numpy.concatenate"
] |
[((63, 79), 'numpy.ones', 'numpy.ones', (['(1024)'], {}), '(1024)\n', (73, 79), False, 'import numpy\n'), ((89, 125), 'numpy.concatenate', 'numpy.concatenate', (['[[ones], [-ones]]'], {}), '([[ones], [-ones]])\n', (106, 125), False, 'import numpy\n'), ((169, 193), 'soundcard.all_speakers', 'soundcard.all_speakers', ([], {}), '()\n', (191, 193), False, 'import soundcard\n'), ((410, 437), 'soundcard.all_microphones', 'soundcard.all_microphones', ([], {}), '()\n', (435, 437), False, 'import soundcard\n'), ((1114, 1144), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Cable"""'], {}), "('Cable')\n", (1135, 1144), False, 'import soundcard\n'), ((1806, 1839), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Cable"""'], {}), "('Cable')\n", (1830, 1839), False, 'import soundcard\n'), ((653, 680), 'soundcard.default_speaker', 'soundcard.default_speaker', ([], {}), '()\n', (678, 680), False, 'import soundcard\n'), ((757, 787), 'soundcard.default_microphone', 'soundcard.default_microphone', ([], {}), '()\n', (785, 787), False, 'import soundcard\n'), ((896, 926), 'soundcard.default_microphone', 'soundcard.default_microphone', ([], {}), '()\n', (924, 926), False, 'import soundcard\n'), ((1230, 1268), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Soundflower64"""'], {}), "('Soundflower64')\n", (1251, 1268), False, 'import soundcard\n'), ((1925, 1966), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Soundflower64"""'], {}), "('Soundflower64')\n", (1949, 1966), False, 'import soundcard\n'), ((1385, 1414), 'soundcard.get_speaker', 'soundcard.get_speaker', (['"""Null"""'], {}), "('Null')\n", (1406, 1414), False, 'import soundcard\n'), ((2016, 2071), 'soundcard.get_microphone', 'soundcard.get_microphone', (['"""Null"""'], {'include_loopback': '(True)'}), "('Null', include_loopback=True)\n", (2040, 2071), False, 'import soundcard\n')]
|
import numpy as np
import h5py
import os
from devito.logger import info
from devito import TimeFunction, clear_cache
from examples.seismic.acoustic import AcousticWaveSolver
from examples.seismic import Model, RickerSource, Receiver, TimeAxis
from math import floor
from scipy.interpolate import griddata
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data_path', dest='data_path', type=str, default='/home/ec2-user/data', help='raw data path')
parser.add_argument('--save_dir', dest='save_dir', type=str, default='/home/ec2-user/data', help='saving directory')
args = parser.parse_args()
data_path = args.data_path
save_dir = args.save_dir
origin = (0., 0.)
spacing=(7.5, 7.5)
tn=1100.
nbpml=40
# Define your vp in km/sec (x, z)
vp = np.fromfile(os.path.join(data_path, 'vp_marmousi_bi'),
dtype='float32', sep="")
vp = np.reshape(vp, (1601, 401))
# vp = vp[400:1401, 0:401]
shape=[401, 301]
values = np.zeros([vp.shape[0]*vp.shape[1], ])
points = np.zeros([vp.shape[0]*vp.shape[1], 2])
k = 0
for indx in range(0, vp.shape[0]):
for indy in range(0, vp.shape[1]):
values[k] = vp[indx, indy]
points[k, 0] = indx
points[k, 1] = indy
k = k + 1
# nx, ny = shape[0], shape[1]
X, Y = np.meshgrid(np.array(np.linspace(1000, 1287, shape[0])), np.array(np.linspace(120, 232, shape[1])))
int_vp = griddata(points, values, (X, Y), method='cubic')
int_vp = np.transpose(int_vp)
vp = int_vp
# create model
model = Model(origin, spacing, shape, 2, vp, nbpml=nbpml)
# Derive timestepping from model spacing
dt = model.critical_dt
t0 = 0.0
nt = int(1 + (tn-t0) / dt) # Number of timesteps
time = np.linspace(t0, tn, nt) # Discretized time axis
datasize0 = int(np.shape(range(0, shape[0], 4))[0])
datasize1 = int(np.shape(range(100, nt, 20))[0])
datasize = datasize0*datasize1
strTrainA = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5')
strTrainB = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5')
dataset_train = "train_dataset"
file_trainA = h5py.File(strTrainA, 'w-')
datasetA = file_trainA.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
file_trainB = h5py.File(strTrainB, 'w-')
datasetB = file_trainB.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml))
num_rec = 601
rec_samp = np.linspace(0., model.domain_size[0], num=num_rec);
rec_samp = rec_samp[1]-rec_samp[0]
time_range = TimeAxis(start=t0, stop=tn, step=dt)
src = RickerSource(name='src', grid=model.grid, f0=0.025, time_range=time_range, space_order=1, npoint=1)
src.coordinates.data[0, :] = np.array([1*spacing[0], 2*spacing[1]]).astype(np.float32)
rec = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
solverbad = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=2, freesurface=False)
solvergood = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True,
space_order=20, freesurface=False)
ulocgood = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=20, save=nt)
ulocbad = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=2, save=nt)
kk = 0
for xsrc in range(0, shape[0], 4):
clear_cache()
ulocgood.data.fill(0.)
ulocbad.data.fill(0.)
src.coordinates.data[0, :] = np.array([xsrc*spacing[0], 2*spacing[1]]).astype(np.float32)
rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec)
rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
_, ulocgood, _ = solvergood.forward(m=model.m, src=src, time=nt-1, save=True)
_, ulocbad, _ = solverbad.forward(m=model.m, src=src, time=nt-1, save=True)
datasetA[kk:(kk+datasize1), :, :] = np.array(ulocgood.data[range(100, nt, 20), :, :])
datasetB[kk:(kk+datasize1), :, :] = np.array(ulocbad.data[range(100, nt, 20), :, :])
kk = kk + datasize1
file_trainA.close()
file_trainB.close()
|
[
"examples.seismic.TimeAxis",
"numpy.reshape",
"argparse.ArgumentParser",
"scipy.interpolate.griddata",
"devito.TimeFunction",
"os.path.join",
"h5py.File",
"examples.seismic.RickerSource",
"numpy.zeros",
"examples.seismic.Model",
"numpy.linspace",
"examples.seismic.Receiver",
"devito.clear_cache",
"numpy.array",
"examples.seismic.acoustic.AcousticWaveSolver",
"numpy.transpose"
] |
[((331, 370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (354, 370), False, 'import argparse\n'), ((877, 904), 'numpy.reshape', 'np.reshape', (['vp', '(1601, 401)'], {}), '(vp, (1601, 401))\n', (887, 904), True, 'import numpy as np\n'), ((959, 996), 'numpy.zeros', 'np.zeros', (['[vp.shape[0] * vp.shape[1]]'], {}), '([vp.shape[0] * vp.shape[1]])\n', (967, 996), True, 'import numpy as np\n'), ((1006, 1046), 'numpy.zeros', 'np.zeros', (['[vp.shape[0] * vp.shape[1], 2]'], {}), '([vp.shape[0] * vp.shape[1], 2])\n', (1014, 1046), True, 'import numpy as np\n'), ((1385, 1433), 'scipy.interpolate.griddata', 'griddata', (['points', 'values', '(X, Y)'], {'method': '"""cubic"""'}), "(points, values, (X, Y), method='cubic')\n", (1393, 1433), False, 'from scipy.interpolate import griddata\n'), ((1443, 1463), 'numpy.transpose', 'np.transpose', (['int_vp'], {}), '(int_vp)\n', (1455, 1463), True, 'import numpy as np\n'), ((1500, 1549), 'examples.seismic.Model', 'Model', (['origin', 'spacing', 'shape', '(2)', 'vp'], {'nbpml': 'nbpml'}), '(origin, spacing, shape, 2, vp, nbpml=nbpml)\n', (1505, 1549), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((1680, 1703), 'numpy.linspace', 'np.linspace', (['t0', 'tn', 'nt'], {}), '(t0, tn, nt)\n', (1691, 1703), True, 'import numpy as np\n'), ((1877, 1979), 'os.path.join', 'os.path.join', (['save_dir', '"""Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5"""'], {}), "(save_dir,\n 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5')\n", (1889, 1979), False, 'import os\n'), ((1988, 2090), 'os.path.join', 'os.path.join', (['save_dir', '"""Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5"""'], {}), "(save_dir,\n 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5')\n", (2000, 2090), False, 'import os\n'), ((2134, 2160), 'h5py.File', 'h5py.File', (['strTrainA', '"""w-"""'], {}), "(strTrainA, 'w-')\n", (2143, 2160), False, 'import h5py\n'), ((2277, 2303), 'h5py.File', 'h5py.File', (['strTrainB', '"""w-"""'], {}), "(strTrainB, 'w-')\n", (2286, 2303), False, 'import h5py\n'), ((2431, 2482), 'numpy.linspace', 'np.linspace', (['(0.0)', 'model.domain_size[0]'], {'num': 'num_rec'}), '(0.0, model.domain_size[0], num=num_rec)\n', (2442, 2482), True, 'import numpy as np\n'), ((2533, 2569), 'examples.seismic.TimeAxis', 'TimeAxis', ([], {'start': 't0', 'stop': 'tn', 'step': 'dt'}), '(start=t0, stop=tn, step=dt)\n', (2541, 2569), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((2576, 2679), 'examples.seismic.RickerSource', 'RickerSource', ([], {'name': '"""src"""', 'grid': 'model.grid', 'f0': '(0.025)', 'time_range': 'time_range', 'space_order': '(1)', 'npoint': '(1)'}), "(name='src', grid=model.grid, f0=0.025, time_range=time_range,\n space_order=1, npoint=1)\n", (2588, 2679), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((2770, 2846), 'examples.seismic.Receiver', 'Receiver', ([], {'name': '"""rec"""', 'grid': 'model.grid', 'time_range': 'time_range', 'npoint': 'num_rec'}), "(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec)\n", (2778, 2846), False, 'from examples.seismic import Model, RickerSource, Receiver, TimeAxis\n'), ((2876, 2927), 'numpy.linspace', 'np.linspace', (['(0.0)', 'model.domain_size[0]'], {'num': 'num_rec'}), '(0.0, model.domain_size[0], num=num_rec)\n', (2887, 2927), True, 'import numpy as np\n'), ((2998, 3112), 'examples.seismic.acoustic.AcousticWaveSolver', 'AcousticWaveSolver', (['model'], {'source': 'src', 'receiver': 'rec', 'kernel': '"""OT2"""', 'isic': '(True)', 'space_order': '(2)', 'freesurface': '(False)'}), "(model, source=src, receiver=rec, kernel='OT2', isic=True,\n space_order=2, freesurface=False)\n", (3016, 3112), False, 'from examples.seismic.acoustic import AcousticWaveSolver\n'), ((3130, 3245), 'examples.seismic.acoustic.AcousticWaveSolver', 'AcousticWaveSolver', (['model'], {'source': 'src', 'receiver': 'rec', 'kernel': '"""OT2"""', 'isic': '(True)', 'space_order': '(20)', 'freesurface': '(False)'}), "(model, source=src, receiver=rec, kernel='OT2', isic=True,\n space_order=20, freesurface=False)\n", (3148, 3245), False, 'from examples.seismic.acoustic import AcousticWaveSolver\n'), ((3262, 3340), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""u"""', 'grid': 'model.grid', 'time_order': '(2)', 'space_order': '(20)', 'save': 'nt'}), "(name='u', grid=model.grid, time_order=2, space_order=20, save=nt)\n", (3274, 3340), False, 'from devito import TimeFunction, clear_cache\n'), ((3351, 3428), 'devito.TimeFunction', 'TimeFunction', ([], {'name': '"""u"""', 'grid': 'model.grid', 'time_order': '(2)', 'space_order': '(2)', 'save': 'nt'}), "(name='u', grid=model.grid, time_order=2, space_order=2, save=nt)\n", (3363, 3428), False, 'from devito import TimeFunction, clear_cache\n'), ((792, 833), 'os.path.join', 'os.path.join', (['data_path', '"""vp_marmousi_bi"""'], {}), "(data_path, 'vp_marmousi_bi')\n", (804, 833), False, 'import os\n'), ((3478, 3491), 'devito.clear_cache', 'clear_cache', ([], {}), '()\n', (3489, 3491), False, 'from devito import TimeFunction, clear_cache\n'), ((3674, 3725), 'numpy.linspace', 'np.linspace', (['(0.0)', 'model.domain_size[0]'], {'num': 'num_rec'}), '(0.0, model.domain_size[0], num=num_rec)\n', (3685, 3725), True, 'import numpy as np\n'), ((1296, 1329), 'numpy.linspace', 'np.linspace', (['(1000)', '(1287)', 'shape[0]'], {}), '(1000, 1287, shape[0])\n', (1307, 1329), True, 'import numpy as np\n'), ((1341, 1372), 'numpy.linspace', 'np.linspace', (['(120)', '(232)', 'shape[1]'], {}), '(120, 232, shape[1])\n', (1352, 1372), True, 'import numpy as np\n'), ((2705, 2747), 'numpy.array', 'np.array', (['[1 * spacing[0], 2 * spacing[1]]'], {}), '([1 * spacing[0], 2 * spacing[1]])\n', (2713, 2747), True, 'import numpy as np\n'), ((3580, 3625), 'numpy.array', 'np.array', (['[xsrc * spacing[0], 2 * spacing[1]]'], {}), '([xsrc * spacing[0], 2 * spacing[1]])\n', (3588, 3625), True, 'import numpy as np\n')]
|
"""
Functions for reading Magritek Spinsolve binary (dx/1d) files and
parameter (acqu.par/proc.par) files.
"""
import os
from warnings import warn
import numpy as np
from . import fileiobase
from . import jcampdx
__developer_info__ = """
Spinsolve is the software used on the Magritek benchtop NMR devices.
A spectrum is saved in a folder with several files. The spectral data is
stored in these files: 'data.1d' (FID), 'spectrum.1d' (Fourier transformed)
and 'spectrum_processed.1d' (FT + processed by spinsolve)
Optional spectral data (System->Prefs->Setup->Global data storage):
'nmr_fid.dx' (FID stored in `JCAMP-DX standard <http://www.jcamp-dx.org/>`),
'spectrum.csv' and 'spectrum_processed.csv' (FT + processed by Spinsovle with ppm for each
point and intensity delimited by ';')
Other files:
'acqu.par' - all parameters that are used for acquisition
'Protocol.par' - text file used to reload data back into the Spinsolve software
'processing.script' - text file to transfer Spinsolve software protocol settings
into MNOVA
The Spinsolve Expert software has a slightly different output:
[Needs to be double checked as I do not have access to this software -LCageman]
- Output into JCAMP-DX is not possible
- 'spectrum_processed.1d' is not generated
- (new) 'fid.1d' - seems to be the same as 'data.1d'
- (new) 'proc.par' - contains processing parameters in the same style as 'acqu.par'
- (new) .pt1 files - seem to be plot files specific for the expert software, cannot
be read by NMRglue
"""
def read(dir='.', specfile=None, acqupar="acqu.par", procpar="proc.par"):
"""
Reads spinsolve files from a directory
When no spectrum filename is given (specfile), the following list is tried, in
that specific order
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
To use the resolution enhanced spectrum use the './Enhanced' folder as input.
Note that spectrum.1d and spectrum_processed.1d contain only data in the
frequency domain, so no Fourier transformation is needed. Also, use
dic["spectrum"]["xaxis"] to plot the x-axis
Parameters
----------
dir : str
Directory to read from
specfile : str, optional
Filename to import spectral data from. None uses standard filename from:
["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"]
acqupar : str, optional
Filename for acquisition parameters. None uses standard name.
procpar : str, optional
Filename for processing parameters. None uses standard name.
Returns
-------
dic : dict
All parameters that can be present in the data folder:
dic["spectrum"] - First bytes of spectrum(_processed).1d
dic["acqu"] - Parameters present in acqu.par
dic["proc"] - Parameters present in proc.par
dic["dx"] - - Parameters present in the header of nmr_fid.dx
data : ndarray
Array of NMR data
"""
if os.path.isdir(dir) is not True:
raise IOError("directory %s does not exist" % (dir))
# Create empty dic
dic = {"spectrum": {}, "acqu": {}, "proc":{}, "dx":{}}
# Read in acqu.par and write to dic
acqupar = os.path.join(dir, acqupar)
if os.path.isfile(acqupar):
with open(acqupar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["acqu"][k.strip()] = v.strip()
# Read in proc.par and write to dic
procpar = os.path.join(dir,procpar)
if os.path.isfile(procpar):
with open(procpar, "r") as f:
info = f.readlines()
for line in info:
line = line.replace("\n", "")
k, v = line.split("=")
dic["proc"][k.strip()] = v.strip()
# Define which spectrumfile to take, using 'specfile' when defined, otherwise
# the files in 'priority_list' are tried, in that particular order
priority_list = ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d", None]
if specfile:
inputfile = os.path.join(dir, specfile)
if not os.path.isfile(inputfile):
raise IOError("File %s does not exist" % (inputfile))
else:
for priority in priority_list:
if priority == None:
raise IOError("directory %s does not contain spectral data" % (dir))
inputfile = os.path.join(dir, priority)
if os.path.isfile(inputfile):
break
# Detect which file we are dealing with from the extension and read in the spectral data
# Reading .dx file using existing nmrglue.fileio.jcampdx module
if inputfile.split('.')[-1] == "dx":
dic["dx"], raw_data = jcampdx.read(inputfile)
data = np.empty((int(dic["dx"]["$TD"][0]), ), dtype='complex128')
data = raw_data[0][:] + 1j * raw_data[1][:]
# Reading .1d files
elif inputfile.split('.')[-1] == "1d":
with open(inputfile, "rb") as f:
raw_data = f.read()
# Write out parameters from the first 32 bytes into dic["spectrum"]
keys = ["owner", "format", "version", "dataType", "xDim", "yDim", "zDim", "qDim"]
for i, k in enumerate(keys):
start = i * 4
end = start + 4
value = int.from_bytes( raw_data[start:end], "little")
dic["spectrum"][k] = value
data = np.frombuffer(raw_data[end:], "<f")
# The first 1/3 of the file is xaxis data (s or ppm)
split = data.shape[-1] // 3
xscale = data[0 : split]
dic["spectrum"]["xaxis"] = xscale
# The rest is real and imaginary data points interleaved
data = data[split : : 2] + 1j * data[split + 1 : : 2]
else:
raise IOError("File %s cannot be interpreted, use .dx or .1d instead" % (inputfile))
return dic,data
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic
|
[
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"warnings.warn",
"numpy.frombuffer"
] |
[((3216, 3242), 'os.path.join', 'os.path.join', (['dir', 'acqupar'], {}), '(dir, acqupar)\n', (3228, 3242), False, 'import os\n'), ((3250, 3273), 'os.path.isfile', 'os.path.isfile', (['acqupar'], {}), '(acqupar)\n', (3264, 3273), False, 'import os\n'), ((3551, 3577), 'os.path.join', 'os.path.join', (['dir', 'procpar'], {}), '(dir, procpar)\n', (3563, 3577), False, 'import os\n'), ((3584, 3607), 'os.path.isfile', 'os.path.isfile', (['procpar'], {}), '(procpar)\n', (3598, 3607), False, 'import os\n'), ((2980, 2998), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (2993, 2998), False, 'import os\n'), ((4125, 4152), 'os.path.join', 'os.path.join', (['dir', 'specfile'], {}), '(dir, specfile)\n', (4137, 4152), False, 'import os\n'), ((6583, 6620), 'warnings.warn', 'warn', (['"""No data, cannot set udic size"""'], {}), "('No data, cannot set udic size')\n", (6587, 6620), False, 'from warnings import warn\n'), ((4168, 4193), 'os.path.isfile', 'os.path.isfile', (['inputfile'], {}), '(inputfile)\n', (4182, 4193), False, 'import os\n'), ((4452, 4479), 'os.path.join', 'os.path.join', (['dir', 'priority'], {}), '(dir, priority)\n', (4464, 4479), False, 'import os\n'), ((4495, 4520), 'os.path.isfile', 'os.path.isfile', (['inputfile'], {}), '(inputfile)\n', (4509, 4520), False, 'import os\n'), ((5466, 5501), 'numpy.frombuffer', 'np.frombuffer', (['raw_data[end:]', '"""<f"""'], {}), "(raw_data[end:], '<f')\n", (5479, 5501), True, 'import numpy as np\n'), ((7644, 7766), 'warnings.warn', 'warn', (['"""Cannot set observe frequency - set manually using: \'udic[0][\'obs\'] = x\' where x is magnetic field in MHz"""'], {}), '(\n "Cannot set observe frequency - set manually using: \'udic[0][\'obs\'] = x\' where x is magnetic field in MHz"\n )\n', (7648, 7766), False, 'from warnings import warn\n'), ((8622, 8663), 'warnings.warn', 'warn', (['"""Cannot set observed nucleus label"""'], {}), "('Cannot set observed nucleus label')\n", (8626, 8663), False, 'from warnings import warn\n'), ((7326, 7447), 'warnings.warn', 'warn', (['"""Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"""'], {}), '(\n "Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"\n )\n', (7330, 7447), False, 'from warnings import warn\n'), ((8242, 8368), 'warnings.warn', 'warn', (['"""Cannot set carrier - try: \'udic[0][\'car\'] = x * udic[0][\'obs\']\' where x is the center of the spectrum in ppm"""'], {}), '(\n "Cannot set carrier - try: \'udic[0][\'car\'] = x * udic[0][\'obs\']\' where x is the center of the spectrum in ppm"\n )\n', (8246, 8368), False, 'from warnings import warn\n'), ((7169, 7290), 'warnings.warn', 'warn', (['"""Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"""'], {}), '(\n "Cannot set spectral width - set manually using: \'udic[0][\'sw\'] = x\' where x is the spectral width in Hz"\n )\n', (7173, 7290), False, 'from warnings import warn\n')]
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Module houses class that implements ``PandasOnRayDataframe`` class using cuDF."""
import numpy as np
import ray
from ..partitioning.partition import cuDFOnRayDataframePartition
from ..partitioning.partition_manager import cuDFOnRayDataframePartitionManager
from modin.core.execution.ray.implementations.pandas_on_ray.dataframe.dataframe import (
PandasOnRayDataframe,
)
from modin.error_message import ErrorMessage
class cuDFOnRayDataframe(PandasOnRayDataframe):
"""
The class implements the interface in ``PandasOnRayDataframe`` using cuDF.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = cuDFOnRayDataframePartitionManager
def synchronize_labels(self, axis=None):
"""
Synchronize labels by applying the index object (Index or Columns) to the partitions eagerly.
Parameters
----------
axis : {0, 1, None}, default: None
The axis to apply to. If None, it applies to both axes.
"""
ErrorMessage.catch_bugs_and_request_email(
axis is not None and axis not in [0, 1]
)
cum_row_lengths = np.cumsum([0] + self._row_lengths)
cum_col_widths = np.cumsum([0] + self._column_widths)
def apply_idx_objs(df, idx, cols, axis):
# cudf does not support set_axis. It only supports rename with 1-to-1 mapping.
# Therefore, we need to create the dictionary that have the relationship between
# current index and new ones.
idx = {df.index[i]: idx[i] for i in range(len(idx))}
cols = {df.index[i]: cols[i] for i in range(len(cols))}
if axis == 0:
return df.rename(index=idx)
elif axis == 1:
return df.rename(columns=cols)
else:
return df.rename(index=idx, columns=cols)
keys = np.array(
[
[
self._partitions[i][j].apply(
apply_idx_objs,
idx=self.index[
slice(cum_row_lengths[i], cum_row_lengths[i + 1])
],
cols=self.columns[
slice(cum_col_widths[j], cum_col_widths[j + 1])
],
axis=axis,
)
for j in range(len(self._partitions[i]))
]
for i in range(len(self._partitions))
]
)
self._partitions = np.array(
[
[
cuDFOnRayDataframePartition(
self._partitions[i][j].get_gpu_manager(),
keys[i][j],
self._partitions[i][j]._length_cache,
self._partitions[i][j]._width_cache,
)
for j in range(len(keys[i]))
]
for i in range(len(keys))
]
)
def mask(
self,
row_indices=None,
row_numeric_idx=None,
col_indices=None,
col_numeric_idx=None,
):
"""
Lazily select columns or rows from given indices.
Parameters
----------
row_indices : list of hashable, optional
The row labels to extract.
row_numeric_idx : list of int, optional
The row indices to extract.
col_indices : list of hashable, optional
The column labels to extract.
col_numeric_idx : list of int, optional
The column indices to extract.
Returns
-------
cuDFOnRayDataframe
A new ``cuDFOnRayDataframe`` from the mask provided.
Notes
-----
If both `row_indices` and `row_numeric_idx` are set, `row_indices` will be used.
The same rule applied to `col_indices` and `col_numeric_idx`.
"""
if isinstance(row_numeric_idx, slice) and (
row_numeric_idx == slice(None) or row_numeric_idx == slice(0, None)
):
row_numeric_idx = None
if isinstance(col_numeric_idx, slice) and (
col_numeric_idx == slice(None) or col_numeric_idx == slice(0, None)
):
col_numeric_idx = None
if (
row_indices is None
and row_numeric_idx is None
and col_indices is None
and col_numeric_idx is None
):
return self.copy()
if row_indices is not None:
row_numeric_idx = self.index.get_indexer_for(row_indices)
if row_numeric_idx is not None:
row_partitions_list = self._get_dict_of_block_index(0, row_numeric_idx)
if isinstance(row_numeric_idx, slice):
# Row lengths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_row_lengths = [
len(range(*idx.indices(self._row_lengths[p])))
for p, idx in row_partitions_list.items()
]
# Use the slice to calculate the new row index
new_index = self.index[row_numeric_idx]
else:
new_row_lengths = [len(idx) for _, idx in row_partitions_list.items()]
new_index = self.index[sorted(row_numeric_idx)]
else:
row_partitions_list = {
i: slice(None) for i in range(len(self._row_lengths))
}
new_row_lengths = self._row_lengths
new_index = self.index
if col_indices is not None:
col_numeric_idx = self.columns.get_indexer_for(col_indices)
if col_numeric_idx is not None:
col_partitions_list = self._get_dict_of_block_index(1, col_numeric_idx)
if isinstance(col_numeric_idx, slice):
# Column widths for slice are calculated as the length of the slice
# on the partition. Often this will be the same length as the current
# length, but sometimes it is different, thus the extra calculation.
new_col_widths = [
len(range(*idx.indices(self._column_widths[p])))
for p, idx in col_partitions_list.items()
]
# Use the slice to calculate the new columns
new_columns = self.columns[col_numeric_idx]
assert sum(new_col_widths) == len(
new_columns
), "{} != {}.\n{}\n{}\n{}".format(
sum(new_col_widths),
len(new_columns),
col_numeric_idx,
self._column_widths,
col_partitions_list,
)
if self._dtypes is not None:
new_dtypes = self.dtypes[col_numeric_idx]
else:
new_dtypes = None
else:
new_col_widths = [len(idx) for _, idx in col_partitions_list.items()]
new_columns = self.columns[sorted(col_numeric_idx)]
if self._dtypes is not None:
new_dtypes = self.dtypes.iloc[sorted(col_numeric_idx)]
else:
new_dtypes = None
else:
col_partitions_list = {
i: slice(None) for i in range(len(self._column_widths))
}
new_col_widths = self._column_widths
new_columns = self.columns
if self._dtypes is not None:
new_dtypes = self.dtypes
else:
new_dtypes = None
key_and_gpus = np.array(
[
[
[
self._partitions[row_idx][col_idx].mask(
row_internal_indices, col_internal_indices
),
self._partitions[row_idx][col_idx].get_gpu_manager(),
]
for col_idx, col_internal_indices in col_partitions_list.items()
if isinstance(col_internal_indices, slice)
or len(col_internal_indices) > 0
]
for row_idx, row_internal_indices in row_partitions_list.items()
if isinstance(row_internal_indices, slice)
or len(row_internal_indices) > 0
]
)
shape = key_and_gpus.shape[:2]
keys = ray.get(key_and_gpus[:, :, 0].flatten().tolist())
gpu_managers = key_and_gpus[:, :, 1].flatten().tolist()
new_partitions = self._partition_mgr_cls._create_partitions(
keys, gpu_managers
).reshape(shape)
intermediate = self.__constructor__(
new_partitions,
new_index,
new_columns,
new_row_lengths,
new_col_widths,
new_dtypes,
)
# Check if monotonically increasing, return if it is. Fast track code path for
# common case to keep it fast.
if (
row_numeric_idx is None
or isinstance(row_numeric_idx, slice)
or len(row_numeric_idx) == 1
or np.all(row_numeric_idx[1:] >= row_numeric_idx[:-1])
) and (
col_numeric_idx is None
or isinstance(col_numeric_idx, slice)
or len(col_numeric_idx) == 1
or np.all(col_numeric_idx[1:] >= col_numeric_idx[:-1])
):
return intermediate
# The new labels are often smaller than the old labels, so we can't reuse the
# original order values because those were mapped to the original data. We have
# to reorder here based on the expected order from within the data.
# We create a dictionary mapping the position of the numeric index with respect
# to all others, then recreate that order by mapping the new order values from
# the old. This information is sent to `_reorder_labels`.
if row_numeric_idx is not None:
row_order_mapping = dict(
zip(sorted(row_numeric_idx), range(len(row_numeric_idx)))
)
new_row_order = [row_order_mapping[idx] for idx in row_numeric_idx]
else:
new_row_order = None
if col_numeric_idx is not None:
col_order_mapping = dict(
zip(sorted(col_numeric_idx), range(len(col_numeric_idx)))
)
new_col_order = [col_order_mapping[idx] for idx in col_numeric_idx]
else:
new_col_order = None
return intermediate._reorder_labels(
row_numeric_idx=new_row_order, col_numeric_idx=new_col_order
)
|
[
"numpy.cumsum",
"modin.error_message.ErrorMessage.catch_bugs_and_request_email",
"numpy.all"
] |
[((2458, 2544), 'modin.error_message.ErrorMessage.catch_bugs_and_request_email', 'ErrorMessage.catch_bugs_and_request_email', (['(axis is not None and axis not in [0, 1])'], {}), '(axis is not None and axis not in\n [0, 1])\n', (2499, 2544), False, 'from modin.error_message import ErrorMessage\n'), ((2590, 2624), 'numpy.cumsum', 'np.cumsum', (['([0] + self._row_lengths)'], {}), '([0] + self._row_lengths)\n', (2599, 2624), True, 'import numpy as np\n'), ((2650, 2686), 'numpy.cumsum', 'np.cumsum', (['([0] + self._column_widths)'], {}), '([0] + self._column_widths)\n', (2659, 2686), True, 'import numpy as np\n'), ((10833, 10884), 'numpy.all', 'np.all', (['(row_numeric_idx[1:] >= row_numeric_idx[:-1])'], {}), '(row_numeric_idx[1:] >= row_numeric_idx[:-1])\n', (10839, 10884), True, 'import numpy as np\n'), ((11043, 11094), 'numpy.all', 'np.all', (['(col_numeric_idx[1:] >= col_numeric_idx[:-1])'], {}), '(col_numeric_idx[1:] >= col_numeric_idx[:-1])\n', (11049, 11094), True, 'import numpy as np\n')]
|
# Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import uuid
import numpy as np
import pydicom
from pymedphys._dicom.create import dicom_dataset_from_dict
from pymedphys._dicom.header import (
RED_adjustment_map_from_structure_names,
adjust_machine_name,
adjust_RED_by_structure_name,
adjust_rel_elec_density,
)
from pymedphys._dicom.utilities import remove_file
HERE = os.path.dirname(__file__)
ORIGINAL_DICOM_FILENAME = os.path.join(
HERE, "scratch", "original-{}.dcm".format(str(uuid.uuid4()))
)
ADJUSTED_DICOM_FILENAME = os.path.join(
HERE, "scratch", "adjusted-{}.dcm".format(str(uuid.uuid4()))
)
def compare_dicom_cli(command, original, expected):
pydicom.write_file(ORIGINAL_DICOM_FILENAME, original)
try:
subprocess.check_call(command)
cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME, force=True)
assert str(cli_adjusted_ds) == str(expected)
finally:
remove_file(ORIGINAL_DICOM_FILENAME)
remove_file(ADJUSTED_DICOM_FILENAME)
def test_adjust_machine_name():
new_name = "new_name"
original_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": "hello"},
{"TreatmentMachineName": "george"},
]
}
)
expected_ds = dicom_dataset_from_dict(
{
"BeamSequence": [
{"TreatmentMachineName": new_name},
{"TreatmentMachineName": new_name},
]
}
)
adjusted_ds = adjust_machine_name(original_ds, new_name)
assert adjusted_ds != original_ds
assert adjusted_ds == expected_ds
command = "pymedphys dicom adjust-machine-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
new_name,
]
compare_dicom_cli(command, original_ds, expected_ds)
def test_electron_density_append():
adjustment_map = {
"to_be_changed 1": 1.0,
"to_be_changed 2": 0.5,
"to_be_changed 3": 1.5,
}
excess_adjustment_map = {**adjustment_map, **{"this_structure_doesnt_exist": 1.0}}
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{"ROINumber": 1, "ROIName": "to_be_changed 1"},
{"ROINumber": 2, "ROIName": "dont_change_me"},
{"ROINumber": 10, "ROIName": "to_be_changed 2"},
{"ROINumber": 99, "ROIName": "to_be_changed 3"},
],
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
}
],
},
{"ReferencedROINumber": 2},
{"ReferencedROINumber": 10},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": 0,
}
],
},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "EFFECTIVE_Z",
"ROIPhysicalPropertyValue": 6,
},
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 1"
],
},
],
},
{"ReferencedROINumber": 2},
{
"ReferencedROINumber": 10,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 2"
],
}
],
},
{
"ReferencedROINumber": 99,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": adjustment_map[
"to_be_changed 3"
],
}
],
},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_rel_elec_density(original_ds, adjustment_map)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
adjusted_with_excess_ds = adjust_rel_elec_density(
original_ds, excess_adjustment_map, ignore_missing_structure=True
)
assert adjusted_with_excess_ds != original_ds
assert str(expected_ds) == str(adjusted_with_excess_ds)
excess_adjustment_map_as_list = [
["{}".format(key), item] for key, item in excess_adjustment_map.items()
]
excess_adjustment_map_flat = np.concatenate(excess_adjustment_map_as_list).tolist()
command = (
"pymedphys dicom adjust-RED -i ".split()
+ [ORIGINAL_DICOM_FILENAME, ADJUSTED_DICOM_FILENAME]
+ excess_adjustment_map_flat
)
compare_dicom_cli(command, original_ds, expected_ds)
def test_structure_name_parse():
structure_names = [
"a RED=1",
"b",
"c",
"d RED=2.2",
"e red = 3",
"f",
"g Red: 4.7",
"h RED=0.5 ",
]
expected_adjustment_map = {
"a RED=1": 1,
"d RED=2.2": 2.2,
"e red = 3": 3,
"g Red: 4.7": 4.7,
"h RED=0.5 ": 0.5,
}
adjustment_map = RED_adjustment_map_from_structure_names(structure_names)
assert expected_adjustment_map == adjustment_map
def test_structure_name_based_RED_append():
electron_density_to_use = 0.5
original_ds = dicom_dataset_from_dict(
{
"StructureSetROISequence": [
{
"ROINumber": 1,
"ROIName": "a_structure RED={}".format(electron_density_to_use),
},
{"ROINumber": 2, "ROIName": "dont_change_me"},
],
"RTROIObservationsSequence": [
{"ReferencedROINumber": 1},
{"ReferencedROINumber": 2},
],
}
)
expected_ds = dicom_dataset_from_dict(
{
"RTROIObservationsSequence": [
{
"ReferencedROINumber": 1,
"ROIPhysicalPropertiesSequence": [
{
"ROIPhysicalProperty": "REL_ELEC_DENSITY",
"ROIPhysicalPropertyValue": electron_density_to_use,
}
],
},
{"ReferencedROINumber": 2},
]
},
template_ds=original_ds,
)
adjusted_ds = adjust_RED_by_structure_name(original_ds)
assert adjusted_ds != original_ds
assert str(expected_ds) == str(adjusted_ds)
command = "pymedphys dicom adjust-RED-by-structure-name".split() + [
ORIGINAL_DICOM_FILENAME,
ADJUSTED_DICOM_FILENAME,
]
compare_dicom_cli(command, original_ds, expected_ds)
|
[
"pymedphys._dicom.header.adjust_RED_by_structure_name",
"pymedphys._dicom.header.adjust_machine_name",
"pymedphys._dicom.header.RED_adjustment_map_from_structure_names",
"subprocess.check_call",
"pymedphys._dicom.utilities.remove_file",
"uuid.uuid4",
"os.path.dirname",
"pymedphys._dicom.create.dicom_dataset_from_dict",
"pydicom.read_file",
"numpy.concatenate",
"pydicom.write_file",
"pymedphys._dicom.header.adjust_rel_elec_density"
] |
[((958, 983), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (973, 983), False, 'import os\n'), ((1256, 1309), 'pydicom.write_file', 'pydicom.write_file', (['ORIGINAL_DICOM_FILENAME', 'original'], {}), '(ORIGINAL_DICOM_FILENAME, original)\n', (1274, 1309), False, 'import pydicom\n'), ((1676, 1794), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'BeamSequence': [{'TreatmentMachineName': 'hello'}, {\n 'TreatmentMachineName': 'george'}]}"], {}), "({'BeamSequence': [{'TreatmentMachineName': 'hello'},\n {'TreatmentMachineName': 'george'}]})\n", (1699, 1794), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((1893, 2013), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'BeamSequence': [{'TreatmentMachineName': new_name}, {\n 'TreatmentMachineName': new_name}]}"], {}), "({'BeamSequence': [{'TreatmentMachineName': new_name\n }, {'TreatmentMachineName': new_name}]})\n", (1916, 2013), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((2111, 2153), 'pymedphys._dicom.header.adjust_machine_name', 'adjust_machine_name', (['original_ds', 'new_name'], {}), '(original_ds, new_name)\n', (2130, 2153), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((2714, 3361), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'StructureSetROISequence': [{'ROINumber': 1, 'ROIName': 'to_be_changed 1'},\n {'ROINumber': 2, 'ROIName': 'dont_change_me'}, {'ROINumber': 10,\n 'ROIName': 'to_be_changed 2'}, {'ROINumber': 99, 'ROIName':\n 'to_be_changed 3'}], 'RTROIObservationsSequence': [{\n 'ReferencedROINumber': 1, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'EFFECTIVE_Z', 'ROIPhysicalPropertyValue': 6}]},\n {'ReferencedROINumber': 2}, {'ReferencedROINumber': 10}, {\n 'ReferencedROINumber': 99, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': \n 0}]}]}"], {}), "({'StructureSetROISequence': [{'ROINumber': 1,\n 'ROIName': 'to_be_changed 1'}, {'ROINumber': 2, 'ROIName':\n 'dont_change_me'}, {'ROINumber': 10, 'ROIName': 'to_be_changed 2'}, {\n 'ROINumber': 99, 'ROIName': 'to_be_changed 3'}],\n 'RTROIObservationsSequence': [{'ReferencedROINumber': 1,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty': 'EFFECTIVE_Z',\n 'ROIPhysicalPropertyValue': 6}]}, {'ReferencedROINumber': 2}, {\n 'ReferencedROINumber': 10}, {'ReferencedROINumber': 99,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': 0}]}]})\n", (2737, 3361), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((3926, 4667), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'RTROIObservationsSequence': [{'ReferencedROINumber': 1,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty': 'EFFECTIVE_Z',\n 'ROIPhysicalPropertyValue': 6}, {'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': adjustment_map[\n 'to_be_changed 1']}]}, {'ReferencedROINumber': 2}, {\n 'ReferencedROINumber': 10, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n adjustment_map['to_be_changed 2']}]}, {'ReferencedROINumber': 99,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': adjustment_map[\n 'to_be_changed 3']}]}]}"], {'template_ds': 'original_ds'}), "({'RTROIObservationsSequence': [{\n 'ReferencedROINumber': 1, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'EFFECTIVE_Z', 'ROIPhysicalPropertyValue': 6}, {\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n adjustment_map['to_be_changed 1']}]}, {'ReferencedROINumber': 2}, {\n 'ReferencedROINumber': 10, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n adjustment_map['to_be_changed 2']}]}, {'ReferencedROINumber': 99,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': adjustment_map[\n 'to_be_changed 3']}]}]}, template_ds=original_ds)\n", (3949, 4667), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((5623, 5675), 'pymedphys._dicom.header.adjust_rel_elec_density', 'adjust_rel_elec_density', (['original_ds', 'adjustment_map'], {}), '(original_ds, adjustment_map)\n', (5646, 5675), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((5794, 5888), 'pymedphys._dicom.header.adjust_rel_elec_density', 'adjust_rel_elec_density', (['original_ds', 'excess_adjustment_map'], {'ignore_missing_structure': '(True)'}), '(original_ds, excess_adjustment_map,\n ignore_missing_structure=True)\n', (5817, 5888), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((6850, 6906), 'pymedphys._dicom.header.RED_adjustment_map_from_structure_names', 'RED_adjustment_map_from_structure_names', (['structure_names'], {}), '(structure_names)\n', (6889, 6906), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((7553, 7842), 'pymedphys._dicom.create.dicom_dataset_from_dict', 'dicom_dataset_from_dict', (["{'RTROIObservationsSequence': [{'ReferencedROINumber': 1,\n 'ROIPhysicalPropertiesSequence': [{'ROIPhysicalProperty':\n 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue': electron_density_to_use\n }]}, {'ReferencedROINumber': 2}]}"], {'template_ds': 'original_ds'}), "({'RTROIObservationsSequence': [{\n 'ReferencedROINumber': 1, 'ROIPhysicalPropertiesSequence': [{\n 'ROIPhysicalProperty': 'REL_ELEC_DENSITY', 'ROIPhysicalPropertyValue':\n electron_density_to_use}]}, {'ReferencedROINumber': 2}]}, template_ds=\n original_ds)\n", (7576, 7842), False, 'from pymedphys._dicom.create import dicom_dataset_from_dict\n'), ((8123, 8164), 'pymedphys._dicom.header.adjust_RED_by_structure_name', 'adjust_RED_by_structure_name', (['original_ds'], {}), '(original_ds)\n', (8151, 8164), False, 'from pymedphys._dicom.header import RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density\n'), ((1328, 1358), 'subprocess.check_call', 'subprocess.check_call', (['command'], {}), '(command)\n', (1349, 1358), False, 'import subprocess\n'), ((1385, 1439), 'pydicom.read_file', 'pydicom.read_file', (['ADJUSTED_DICOM_FILENAME'], {'force': '(True)'}), '(ADJUSTED_DICOM_FILENAME, force=True)\n', (1402, 1439), False, 'import pydicom\n'), ((1515, 1551), 'pymedphys._dicom.utilities.remove_file', 'remove_file', (['ORIGINAL_DICOM_FILENAME'], {}), '(ORIGINAL_DICOM_FILENAME)\n', (1526, 1551), False, 'from pymedphys._dicom.utilities import remove_file\n'), ((1560, 1596), 'pymedphys._dicom.utilities.remove_file', 'remove_file', (['ADJUSTED_DICOM_FILENAME'], {}), '(ADJUSTED_DICOM_FILENAME)\n', (1571, 1596), False, 'from pymedphys._dicom.utilities import remove_file\n'), ((1074, 1086), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1084, 1086), False, 'import uuid\n'), ((1181, 1193), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1191, 1193), False, 'import uuid\n'), ((6168, 6213), 'numpy.concatenate', 'np.concatenate', (['excess_adjustment_map_as_list'], {}), '(excess_adjustment_map_as_list)\n', (6182, 6213), True, 'import numpy as np\n')]
|
""" Simple Example using coreali to access a register model. Needs no h^ardware"""
# Import dependencies and compile register model with systemrdl-compiler
from systemrdl import RDLCompiler
import coreali
import numpy as np
import os
from coreali import RegisterModel
rdlc = RDLCompiler()
rdlc.compile_file(os.path.dirname(__file__)+"/../systemrdl/logger.rdl")
root = rdlc.elaborate()
# Generate hierarchical register model
rio = coreali.registerio.RegIoNoHW(np.zeros([256], np.uint8()))
logger = RegisterModel(root, rio)
# Use the generated register model
logger.Ctrl.read()
logger.LogMem.write(0,[1,2,3])
logger.LogMem.read()
logger.LogMem[1].write(0,[11,12,13])
print(logger)
|
[
"os.path.dirname",
"systemrdl.RDLCompiler",
"coreali.RegisterModel",
"numpy.uint8"
] |
[((278, 291), 'systemrdl.RDLCompiler', 'RDLCompiler', ([], {}), '()\n', (289, 291), False, 'from systemrdl import RDLCompiler\n'), ((502, 526), 'coreali.RegisterModel', 'RegisterModel', (['root', 'rio'], {}), '(root, rio)\n', (515, 526), False, 'from coreali import RegisterModel\n'), ((310, 335), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (325, 335), False, 'import os\n'), ((480, 490), 'numpy.uint8', 'np.uint8', ([], {}), '()\n', (488, 490), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file contains the generators and their inverses for common archimedean copulas.
"""
import numpy as np
def boundsConditions(x):
if x < 0 or x > 1:
raise ValueError("Unable to compute generator for x equals to {}".format(x))
def claytonGenerator(x, theta):
boundsConditions(x)
if theta == 0:
raise ValueError("The parameter of a Clayton copula must not be equal to 0.")
if theta < -1:
raise ValueError("The parameter of a Clayton copula must be greater than -1 and different from 0.")
return (1. / theta) * (x**(-theta) - 1.)
def claytonGeneratorInvert(x, theta):
if theta == 0:
raise ValueError("The parameter of a Clayton copula must not be equal to 0.")
if theta < -1:
raise ValueError("The parameter of a Clayton copula must be greater than -1 and different from 0.")
return (1. + theta * x)**(-1. / max(theta,1e-6))
def gumbelGenerator(x, theta):
boundsConditions(x)
if theta < 1:
raise ValueError("The parameter of a Gumbel copula must be greater than 1.")
return (-np.log(x))**theta
def gumbelGeneratorInvert(x, theta):
if len(theta) > 1:
theta = theta[0]
if theta < 1:
raise ValueError("The parameter of a Gumbel copula must be greater than 1.")
if (x < 1 and theta != 1):
raise(ValueError("The inverse Gumbel generator cannot be evaluated for negative input and theta > 1"))
return np.exp(-np.power(x,np.divide(1, theta)))
def frankGenerator(x, theta):
boundsConditions(x)
if theta == 0:
raise ValueError("The parameter of a Frank copula must not be equal to 0.")
return -np.log((np.exp(-theta[0] * x) - 1) / (np.exp(-theta[0]) - 1))
def frankGeneratorInvert(x, theta):
if theta == 0:
raise ValueError("The parameter of a Frank copula must not be equal to 0.")
return -1. / theta * np.log(1. + np.exp(-x) * (np.exp(-theta) - 1.))
def joeGenerator(x, theta):
boundsConditions(x)
if theta < 1:
raise ValueError("The parameter of a Joe copula must be greater than 1.")
return -np.log(1. - (1. - x)**theta)
def joeGeneratorInvert(x, theta):
if theta < 1:
raise ValueError("The parameter of a Joe copula must be greater than 1.")
return 1. - (1. - np.exp(-x))**(1. / max(theta,1e-6))
def aliMikhailHaqGenerator(x, theta):
boundsConditions(x)
if theta < -1 or theta >= 1:
raise ValueError("The parameter of an Ali-Mikhail-Haq copula must be between -1 included and 1 excluded.")
return np.log((1. - theta * (1. - x)) / x)
def aliMikhailHaqGeneratorInvert(x, theta):
if theta < -1 or theta >= 1:
raise ValueError("The parameter of an Ali-Mikhail-Haq copula must be between -1 included and 1 excluded.")
return (1. - theta) / (np.exp(x) - theta)
|
[
"numpy.exp",
"numpy.log",
"numpy.divide"
] |
[((2585, 2622), 'numpy.log', 'np.log', (['((1.0 - theta * (1.0 - x)) / x)'], {}), '((1.0 - theta * (1.0 - x)) / x)\n', (2591, 2622), True, 'import numpy as np\n'), ((2141, 2173), 'numpy.log', 'np.log', (['(1.0 - (1.0 - x) ** theta)'], {}), '(1.0 - (1.0 - x) ** theta)\n', (2147, 2173), True, 'import numpy as np\n'), ((1128, 1137), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1134, 1137), True, 'import numpy as np\n'), ((2841, 2850), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2847, 2850), True, 'import numpy as np\n'), ((1508, 1527), 'numpy.divide', 'np.divide', (['(1)', 'theta'], {}), '(1, theta)\n', (1517, 1527), True, 'import numpy as np\n'), ((2327, 2337), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2333, 2337), True, 'import numpy as np\n'), ((1709, 1730), 'numpy.exp', 'np.exp', (['(-theta[0] * x)'], {}), '(-theta[0] * x)\n', (1715, 1730), True, 'import numpy as np\n'), ((1739, 1756), 'numpy.exp', 'np.exp', (['(-theta[0])'], {}), '(-theta[0])\n', (1745, 1756), True, 'import numpy as np\n'), ((1940, 1950), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1946, 1950), True, 'import numpy as np\n'), ((1954, 1968), 'numpy.exp', 'np.exp', (['(-theta)'], {}), '(-theta)\n', (1960, 1968), True, 'import numpy as np\n')]
|
"""Implementations of algorithms for continuous control."""
import functools
from typing import Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import numpy as np
import optax
from jaxrl.agents.sac import temperature
from jaxrl.agents.sac.actor import update as update_actor
from jaxrl.agents.sac.critic import target_update
from jaxrl.agents.sac_v1.critic import update_q, update_v
from jaxrl.datasets import Batch
from jaxrl.networks import critic_net, policies
from jaxrl.networks.common import InfoDict, Model, PRNGKey
@functools.partial(jax.jit, static_argnames=('update_target'))
def _update_jit(
rng: PRNGKey, actor: Model, critic: Model, value: Model,
target_value: Model, temp: Model, batch: Batch, discount: float,
tau: float, target_entropy: float, update_target: bool
) -> Tuple[PRNGKey, Model, Model, Model, Model, Model, InfoDict]:
new_critic, critic_info = update_q(critic, target_value, batch, discount)
rng, key = jax.random.split(rng)
new_actor, actor_info = update_actor(key, actor, new_critic, temp, batch)
rng, key = jax.random.split(rng)
new_value, value_info = update_v(key, new_actor, new_critic, value, temp,
batch, True)
if update_target:
new_target_value = target_update(new_value, target_value, tau)
else:
new_target_value = target_value
new_temp, alpha_info = temperature.update(temp, actor_info['entropy'],
target_entropy)
return rng, new_actor, new_critic, new_value, new_target_value, new_temp, {
**critic_info,
**value_info,
**actor_info,
**alpha_info
}
class SACV1Learner(object):
def __init__(self,
seed: int,
observations: jnp.ndarray,
actions: jnp.ndarray,
actor_lr: float = 3e-4,
value_lr: float = 3e-4,
critic_lr: float = 3e-4,
temp_lr: float = 3e-4,
hidden_dims: Sequence[int] = (256, 256),
discount: float = 0.99,
tau: float = 0.005,
target_update_period: int = 1,
target_entropy: Optional[float] = None,
init_temperature: float = 1.0):
"""
An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290
"""
action_dim = actions.shape[-1]
if target_entropy is None:
self.target_entropy = -action_dim / 2
else:
self.target_entropy = target_entropy
self.tau = tau
self.target_update_period = target_update_period
self.discount = discount
rng = jax.random.PRNGKey(seed)
rng, actor_key, critic_key, temp_key = jax.random.split(rng, 4)
actor_def = policies.NormalTanhPolicy(hidden_dims, action_dim)
actor = Model.create(actor_def,
inputs=[actor_key, observations],
tx=optax.adam(learning_rate=actor_lr))
critic_def = critic_net.DoubleCritic(hidden_dims)
critic = Model.create(critic_def,
inputs=[critic_key, observations, actions],
tx=optax.adam(learning_rate=critic_lr))
value_def = critic_net.ValueCritic(hidden_dims)
value = Model.create(value_def,
inputs=[critic_key, observations],
tx=optax.adam(learning_rate=value_lr))
target_value = Model.create(value_def,
inputs=[critic_key, observations])
temp = Model.create(temperature.Temperature(init_temperature),
inputs=[temp_key],
tx=optax.adam(learning_rate=temp_lr))
self.actor = actor
self.critic = critic
self.value = value
self.target_value = target_value
self.temp = temp
self.rng = rng
self.step = 1
def sample_actions(self,
observations: np.ndarray,
temperature: float = 1.0) -> jnp.ndarray:
rng, actions = policies.sample_actions(self.rng, self.actor.apply_fn,
self.actor.params, observations,
temperature)
self.rng = rng
actions = np.asarray(actions)
return np.clip(actions, -1, 1)
def update(self, batch: Batch) -> InfoDict:
self.step += 1
new_rng, new_actor, new_critic, new_value, new_target_value, new_temp, info = _update_jit(
self.rng, self.actor, self.critic, self.value, self.target_value,
self.temp, batch, self.discount, self.tau, self.target_entropy,
self.step % self.target_update_period == 0)
self.rng = new_rng
self.actor = new_actor
self.critic = new_critic
self.value = new_value
self.target_value = new_target_value
self.temp = new_temp
return info
|
[
"jaxrl.networks.critic_net.ValueCritic",
"numpy.clip",
"optax.adam",
"jax.random.PRNGKey",
"jaxrl.agents.sac.temperature.update",
"jaxrl.networks.policies.sample_actions",
"jaxrl.networks.common.Model.create",
"jaxrl.agents.sac_v1.critic.update_q",
"numpy.asarray",
"jaxrl.agents.sac.actor.update",
"jaxrl.agents.sac.critic.target_update",
"jaxrl.agents.sac.temperature.Temperature",
"jaxrl.networks.policies.NormalTanhPolicy",
"functools.partial",
"jaxrl.agents.sac_v1.critic.update_v",
"jaxrl.networks.critic_net.DoubleCritic",
"jax.random.split"
] |
[((542, 601), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnames': '"""update_target"""'}), "(jax.jit, static_argnames='update_target')\n", (559, 601), False, 'import functools\n'), ((907, 954), 'jaxrl.agents.sac_v1.critic.update_q', 'update_q', (['critic', 'target_value', 'batch', 'discount'], {}), '(critic, target_value, batch, discount)\n', (915, 954), False, 'from jaxrl.agents.sac_v1.critic import update_q, update_v\n'), ((971, 992), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (987, 992), False, 'import jax\n'), ((1021, 1070), 'jaxrl.agents.sac.actor.update', 'update_actor', (['key', 'actor', 'new_critic', 'temp', 'batch'], {}), '(key, actor, new_critic, temp, batch)\n', (1033, 1070), True, 'from jaxrl.agents.sac.actor import update as update_actor\n'), ((1087, 1108), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (1103, 1108), False, 'import jax\n'), ((1137, 1199), 'jaxrl.agents.sac_v1.critic.update_v', 'update_v', (['key', 'new_actor', 'new_critic', 'value', 'temp', 'batch', '(True)'], {}), '(key, new_actor, new_critic, value, temp, batch, True)\n', (1145, 1199), False, 'from jaxrl.agents.sac_v1.critic import update_q, update_v\n'), ((1409, 1472), 'jaxrl.agents.sac.temperature.update', 'temperature.update', (['temp', "actor_info['entropy']", 'target_entropy'], {}), "(temp, actor_info['entropy'], target_entropy)\n", (1427, 1472), False, 'from jaxrl.agents.sac import temperature\n'), ((1287, 1330), 'jaxrl.agents.sac.critic.target_update', 'target_update', (['new_value', 'target_value', 'tau'], {}), '(new_value, target_value, tau)\n', (1300, 1330), False, 'from jaxrl.agents.sac.critic import target_update\n'), ((2763, 2787), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (2781, 2787), False, 'import jax\n'), ((2835, 2859), 'jax.random.split', 'jax.random.split', (['rng', '(4)'], {}), '(rng, 4)\n', (2851, 2859), False, 'import jax\n'), ((2881, 2931), 'jaxrl.networks.policies.NormalTanhPolicy', 'policies.NormalTanhPolicy', (['hidden_dims', 'action_dim'], {}), '(hidden_dims, action_dim)\n', (2906, 2931), False, 'from jaxrl.networks import critic_net, policies\n'), ((3125, 3161), 'jaxrl.networks.critic_net.DoubleCritic', 'critic_net.DoubleCritic', (['hidden_dims'], {}), '(hidden_dims)\n', (3148, 3161), False, 'from jaxrl.networks import critic_net, policies\n'), ((3369, 3404), 'jaxrl.networks.critic_net.ValueCritic', 'critic_net.ValueCritic', (['hidden_dims'], {}), '(hidden_dims)\n', (3391, 3404), False, 'from jaxrl.networks import critic_net, policies\n'), ((3601, 3659), 'jaxrl.networks.common.Model.create', 'Model.create', (['value_def'], {'inputs': '[critic_key, observations]'}), '(value_def, inputs=[critic_key, observations])\n', (3613, 3659), False, 'from jaxrl.networks.common import InfoDict, Model, PRNGKey\n'), ((4244, 4348), 'jaxrl.networks.policies.sample_actions', 'policies.sample_actions', (['self.rng', 'self.actor.apply_fn', 'self.actor.params', 'observations', 'temperature'], {}), '(self.rng, self.actor.apply_fn, self.actor.params,\n observations, temperature)\n', (4267, 4348), False, 'from jaxrl.networks import critic_net, policies\n'), ((4481, 4500), 'numpy.asarray', 'np.asarray', (['actions'], {}), '(actions)\n', (4491, 4500), True, 'import numpy as np\n'), ((4516, 4539), 'numpy.clip', 'np.clip', (['actions', '(-1)', '(1)'], {}), '(actions, -1, 1)\n', (4523, 4539), True, 'import numpy as np\n'), ((3725, 3766), 'jaxrl.agents.sac.temperature.Temperature', 'temperature.Temperature', (['init_temperature'], {}), '(init_temperature)\n', (3748, 3766), False, 'from jaxrl.agents.sac import temperature\n'), ((3067, 3101), 'optax.adam', 'optax.adam', ([], {'learning_rate': 'actor_lr'}), '(learning_rate=actor_lr)\n', (3077, 3101), False, 'import optax\n'), ((3311, 3346), 'optax.adam', 'optax.adam', ([], {'learning_rate': 'critic_lr'}), '(learning_rate=critic_lr)\n', (3321, 3346), False, 'import optax\n'), ((3541, 3575), 'optax.adam', 'optax.adam', ([], {'learning_rate': 'value_lr'}), '(learning_rate=value_lr)\n', (3551, 3575), False, 'import optax\n'), ((3846, 3879), 'optax.adam', 'optax.adam', ([], {'learning_rate': 'temp_lr'}), '(learning_rate=temp_lr)\n', (3856, 3879), False, 'import optax\n')]
|
#! /usr/bin/env python
import cv2
import matplotlib.pyplot as plt
import skimage
import skimage.io
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.pyplot import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, \
fromstring, ceil, dtype, float32, sqrt, dot, zeros
from misc import WithTimer
def norm01(arr):
arr = arr.copy()
arr -= arr.min()
arr /= arr.max() + 1e-10
return arr
def norm01c(arr, center):
'''Maps the input range to [0,1] such that the center value maps to .5'''
arr = arr.copy()
arr -= center
arr /= max(2 * arr.max(), -2 * arr.min()) + 1e-10
arr += .5
assert arr.min() >= 0
assert arr.max() <= 1
return arr
def norm0255(arr):
'''Maps the input range to [0,255] as dtype uint8'''
arr = arr.copy()
arr -= arr.min()
arr *= 255.0 / (arr.max() + 1e-10)
arr = array(arr, 'uint8')
return arr
def cv2_read_cap_rgb(cap, saveto=None):
rval, frame = cap.read()
if saveto:
cv2.imwrite(saveto, frame)
if len(frame.shape) == 2:
# Upconvert single channel grayscale to color
frame = frame[:, :, newaxis]
if frame.shape[2] == 1:
frame = tile(frame, (1, 1, 3))
if frame.shape[2] > 3:
# Chop off transparency
frame = frame[:, :, :3]
frame = frame[:, :, ::-1] # Convert native OpenCV BGR -> RGB
return frame
def plt_plot_signal(data, labels, zoom_level=-1, offset=0, markers=None, title=None):
fig = Figure(figsize=(5, 5))
canvas = FigureCanvas(fig)
ax = None
if len(data.shape) == 1:
data = expand_dims(data, axis=1)
if zoom_level == -1:
zoom_level = data.shape[0]
color = iter(cm.rainbow(linspace(0, 1, data.shape[1])))
s = offset
e = s + zoom_level
x = arange(s, e)
for i in range(data.shape[1]):
c = next(color)
label = labels[i] if labels is not None else 'Signal {}'.format(i + 1)
ax = fig.add_subplot(data.shape[1], 1, (i + 1), sharex=ax)
ax.plot(x, data[s:e, i], lw=1, label=label, c=c)
# # ax.set_adjustable('box-forced')
# ax.set_xlim(left=0, right=zoom_level)
# ax.get_xaxis().set_visible(i == data.shape[1] - 1)
# ax.xaxis.set_ticks(arange(s, e + 1, (e - s) / 10.0))
# ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))
ax.legend(loc='lower right')
if markers is not None and i in markers:
for val in markers[i]:
if val >= s and val < e:
ax.axvline(x=val)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def plt_plot_heatmap(data,
shape,
rows,
cols,
title=None,
x_axis_label=None,
y_axis_label=None,
x_axis_values=None,
y_axis_values=None,
hide_axis=True,
vmin=None,
vmax=None):
res = []
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# for i in xrange(y.shape[0]):
# sns.heatmap(y[i], ax=ax, vmin=minn, vmax=maxx)
# canvas.draw() # draw the canvas, cache the renderer
#
# l, b, w, h = fig.bbox.bounds
# w, h = int(w), int(h)
# im = fromstring(canvas.tostring_rgb(), dtype='uint8')
# im.shape = h, w, 3
# res.append(im)
img = ax.imshow(
zeros((data.shape[1], data.shape[2])),
cmap='viridis',
vmin=vmin if vmin is not None else data.min(),
vmax=vmax if vmax is not None else data.max(),
interpolation='none',
aspect='auto'
)
# get rid of spines and fix range of axes, rotate x-axis labels
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if hide_axis:
ax.set_xticks([])
ax.set_yticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if title is not None:
plt.title(title)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if x_axis_values is not None:
a = arange(0, x_axis_values.shape[0], 3) + 0.5
b = arange(x_axis_values.min(), x_axis_values.max() + 1.5, 1.5)
ax.set_xticks(a)
ax.set_xticklabels(b, rotation=90)
if y_axis_values is not None:
a = arange(0, y_axis_values.shape[0], 3) + 0.5
# c = roundup((y_axis_values.max() - y_axis_values.min()) / 11)
# b = arange(y_axis_values.min(), y_axis_values.max(), c)
b = linspace(y_axis_values.min(), y_axis_values.max(), num=10, dtype=int)
ax.set_yticks(a)
ax.set_yticklabels(b)
# for tick in ax.get_xticklabels():
# tick.set_rotation(90)
if not hide_axis:
divider = make_axes_locatable(ax)
# colorbar on the right of ax. Colorbar width in % of ax and space between them is defined by pad in inches
cax = divider.append_axes('right', size='5%', pad=0.07)
cb = fig.colorbar(img, cax=cax)
# remove colorbar frame/spines
cb.outline.set_visible(False)
# don't stop after each subfigure change
plt.show(block=False)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
# start = time.time()
for i in xrange(data.shape[0]):
img.set_array(data[i])
# restore background
fig.canvas.restore_region(background)
ax.draw_artist(img)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
# loop through array
# for i in xrange(data.shape[0]):
# time.sleep(0.005)
# img.set_array(data[i])
# canvas.draw()
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filter(x, y, title, x_axis_label, y_axis_label, log_scale):
fig, ax = plt.subplots(1, 1, figsize=(4, 4))
canvas = FigureCanvas(fig)
x = arange(0, y.shape[0]) if x is None else x
if log_scale == 1:
ax.semilogy(x, y, lw=2)
else:
ax.plot(x, y, lw=2)
ax.set(xlabel=x_axis_label, ylabel=y_axis_label, title=title)
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
fig.clf()
plt.clf()
plt.close()
return im
def plt_plot_filters_blit(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
log_scale=0,
hide_axis=False):
res = []
x = arange(0, y.shape[1]) if x is None else x
# if log_scale == 1:
# y = log(y)
# elif log_scale == 2:
# x = log(x)
# elif log_scale == 3:
# x = log(x)
# y = log(y)
shape = (max(2, ceil(shape[1] / 80 / cols)), max(2, ceil(shape[0] / 80 / rows)))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
ax.set_xlim(min(x), max(x))
ax.set_ylim(y.min(), y.max())
if hide_axis:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.95, hspace=0, wspace=0)
else:
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
line, = ax.plot([], [], lw=2)
if not hide_axis:
fig.tight_layout()
canvas.draw() # draw the canvas, cache the renderer
# keep bg in memory
background = fig.canvas.copy_from_bbox(ax.bbox)
for i in xrange(y.shape[0]):
line.set_data(x, y[i])
# line.set_color()
# restore background
fig.canvas.restore_region(background)
# redraw just the points
ax.draw_artist(line)
# fill in the axes rectangle
fig.canvas.blit(ax.bbox)
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
fig.clf()
plt.clf()
plt.close()
return array(res)
def plt_plot_filters_fast(y, x, shape, rows, cols,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
res = []
shape = (ceil(shape[1] / 80 / cols), ceil(shape[0] / 80 / rows))
fig, ax = plt.subplots(1, 1, figsize=shape)
canvas = FigureCanvas(fig)
# ax.set_aspect('equal')
if share_axes:
if x is not None:
min_x, max_x = min(x), max(x)
else:
min_x, max_x = 0, y.shape[1]
min_y, max_y = y.min(), y.max()
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
# ax.hold(True)
plt.subplots_adjust(left=0.185, bottom=0.125, right=0.98, top=0.98)
# plt.show(False)
# plt.draw()
# background = fig.canvas.copy_from_bbox(ax.bbox)
# points = ax.plot(x[0], linewidth=1)[0]
for i in xrange(y.shape[0]):
if x is not None:
if log_scale == 1:
ax.semilogy(x, y[i], linewidth=1)
else:
ax.plot(x, y[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(y[i], linewidth=1)
else:
ax.plot(y[i], linewidth=1)
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if title is not None:
plt.title(title)
# plt.autoscale(enable=True, axis='y', tight=True)
# plt.tight_layout()
# Turn off axes and set axes limits
# ax.axis('off')
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
res.append(im)
# ax.cla()
fig.clf()
return array(res)
def plt_plot_filters(x, y, shape, rows, cols,
selected_unit=None,
selected_unit_color=None,
title=None,
x_axis_label=None,
y_axis_label=None,
share_axes=True,
log_scale=0):
shape = (ceil(shape[1] / 80), ceil(shape[0] / 80))
fig = Figure(figsize=shape)
canvas = FigureCanvas(fig)
ax, highlighted_ax, right_ax, bottom_ax, curr, right, bottom = None, None, None, None, None, None, None
if selected_unit is not None:
row = selected_unit / cols
col = selected_unit % cols
curr = selected_unit
bottom = (selected_unit + cols) if row < rows - 1 else None
right = (selected_unit + 1) if col < cols - 1 else None
for i in xrange(x.shape[0]):
if share_axes:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False, sharex=ax, sharey=ax)
else:
ax = fig.add_subplot(rows, cols, (i + 1), axisbelow=False)
if y is not None:
if log_scale == 1:
ax.semilogy(y, x[i], linewidth=1)
else:
ax.plot(y, x[i], linewidth=1)
else:
if log_scale == 1:
ax.semilogy(x[i], linewidth=1)
else:
ax.plot(x[i], linewidth=1)
ax.set_xlim(left=0, right=x.shape[1] - 1)
ax.get_xaxis().set_visible(i >= ((rows - 1) * cols))
ax.get_yaxis().set_visible(i % cols == 0)
if i == curr:
highlighted_ax = ax
if i == bottom:
bottom_ax = ax
if i == right:
right_ax = ax
if x_axis_label is not None:
ax.set_xlabel(x_axis_label)
if y_axis_label is not None:
ax.set_ylabel(y_axis_label)
if highlighted_ax is not None:
for axis in ['top', 'bottom', 'left', 'right']:
highlighted_ax.spines[axis].set_linewidth(2.5)
highlighted_ax.spines[axis].set_color(selected_unit_color)
if bottom_ax is not None:
bottom_ax.spines['top'].set_linewidth(2)
bottom_ax.spines['top'].set_color(selected_unit_color)
if right_ax is not None:
right_ax.spines['left'].set_linewidth(2)
right_ax.spines['left'].set_color(selected_unit_color)
if title is not None:
fig.suptitle(title)
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
canvas.draw() # draw the canvas, cache the renderer
l, b, w, h = fig.bbox.bounds
w, h = int(w), int(h)
im = fromstring(canvas.tostring_rgb(), dtype='uint8')
im.shape = h, w, 3
return im
def cv2_read_file_rgb(filename):
'''Reads an image from file. Always returns (x,y,3)'''
im = cv2.imread(filename)
if len(im.shape) == 2:
# Upconvert single channel grayscale to color
im = im[:, :, newaxis]
if im.shape[2] == 1:
im = tile(im, (1, 1, 3))
if im.shape[2] > 3:
# Chop off transparency
im = im[:, :, :3]
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB) # Convert native OpenCV BGR -> RGB
def crop_to_square(frame):
i_size, j_size = frame.shape[0], frame.shape[1]
if j_size > i_size:
# landscape
offset = (j_size - i_size) / 2
return frame[:, offset:offset + i_size, :]
else:
# portrait
offset = (i_size - j_size) / 2
return frame[offset:offset + j_size, :, :]
def cv2_imshow_rgb(window_name, img):
# Convert native OpenCV BGR -> RGB before displaying
cv2.imshow(window_name, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
def caffe_load_image(filename, color=True, as_uint=False):
'''
Copied from Caffe to simplify potential import problems.
Load an image converting from grayscale or alpha as needed.
Take
filename: string
color: flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Give
image: an image with type float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
'''
with WithTimer('imread', quiet=True):
if as_uint:
img = skimage.io.imread(filename)
else:
img = skimage.img_as_float(skimage.io.imread(filename)).astype(float32)
if img.ndim == 2:
img = img[:, :, newaxis]
if color:
img = tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def get_tiles_height_width(n_tiles, desired_width=None):
'''Get a height x width size that will fit n_tiles tiles.'''
if desired_width == None:
# square
width = int(ceil(sqrt(n_tiles)))
height = width
else:
assert isinstance(desired_width, int)
width = desired_width
height = int(ceil(float(n_tiles) / width))
return height, width
def get_tiles_height_width_ratio(n_tiles, width_ratio=1.0):
'''Get a height x width size that will fit n_tiles tiles.'''
width = int(ceil(sqrt(n_tiles * width_ratio)))
return get_tiles_height_width(n_tiles, desired_width=width)
def tile_images_normalize(data, c01=False, boost_indiv=0.0, boost_gamma=1.0, single_tile=False, scale_range=1.0,
neg_pos_colors=None):
data = data.copy()
if single_tile:
# promote 2D image -> 3D batch (01 -> b01) or 3D image -> 4D batch (01c -> b01c OR c01 -> bc01)
data = data[newaxis]
if c01:
# Convert bc01 -> b01c
assert len(data.shape) == 4, 'expected bc01 data'
data = data.transpose(0, 2, 3, 1)
if neg_pos_colors:
neg_clr, pos_clr = neg_pos_colors
neg_clr = array(neg_clr).reshape((1, 3))
pos_clr = array(pos_clr).reshape((1, 3))
# Keep 0 at 0
data /= max(data.max(), -data.min()) + 1e-10 # Map data to [-1, 1]
# data += .5 * scale_range # now in [0, scale_range]
# assert data.min() >= 0
# assert data.max() <= scale_range
if len(data.shape) == 3:
data = data.reshape(data.shape + (1,))
assert data.shape[3] == 1, 'neg_pos_color only makes sense if color data is not provided (channels should be 1)'
data = dot((data > 0) * data, pos_clr) + dot((data < 0) * -data, neg_clr)
data -= data.min()
data *= scale_range / (data.max() + 1e-10)
# sqrt-scale (0->0, .1->.3, 1->1)
assert boost_indiv >= 0 and boost_indiv <= 1, 'boost_indiv out of range'
# print 'using boost_indiv:', boost_indiv
if boost_indiv > 0:
if len(data.shape) == 4:
mm = (data.max(-1).max(-1).max(-1) + 1e-10) ** -boost_indiv
else:
mm = (data.max(-1).max(-1) + 1e-10) ** -boost_indiv
data = (data.T * mm).T
if boost_gamma != 1.0:
data = data ** boost_gamma
# Promote single-channel data to 3 channel color
if len(data.shape) == 3:
# b01 -> b01c
data = tile(data[:, :, :, newaxis], 3)
return data
def tile_images_make_tiles(data, padsize=1, padval=0, hw=None, highlights=None):
if hw:
height, width = hw
else:
height, width = get_tiles_height_width(data.shape[0])
assert height * width >= data.shape[0], '{} rows x {} columns cannot fit {} tiles'.format(height, width,
data.shape[0])
# First iteration: one-way padding, no highlights
# padding = ((0, width*height - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
# data = pad(data, padding, mode='constant', constant_values=(padval, padval))
# Second iteration: padding with highlights
# padding = ((0, width*height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (data.ndim - 3)
# print 'tile_images: data min,max =', data.min(), data.max()
# padder = SmartPadder()
##data = pad(data, padding, mode=jy_pad_fn)
# data = pad(data, padding, mode=padder.pad_function)
# print 'padder.calls =', padder.calls
# Third iteration: two-way padding with highlights
if highlights is not None:
assert len(highlights) == data.shape[0]
padding = ((0, width * height - data.shape[0]), (padsize, padsize), (padsize, padsize)) + ((0, 0),) * (
data.ndim - 3)
# First pad with constant vals
try:
len(padval)
except:
padval = tuple((padval,))
assert len(padval) in (1, 3), 'padval should be grayscale (len 1) or color (len 3)'
if len(padval) == 1:
data = pad(data, padding, mode='constant', constant_values=(padval, padval))
else:
data = pad(data, padding, mode='constant', constant_values=(0, 0))
for cc in (0, 1, 2):
# Replace 0s with proper color in each channel
data[:padding[0][0], :, :, cc] = padval[cc]
if padding[0][1] > 0:
data[-padding[0][1]:, :, :, cc] = padval[cc]
data[:, :padding[1][0], :, cc] = padval[cc]
if padding[1][1] > 0:
data[:, -padding[1][1]:, :, cc] = padval[cc]
data[:, :, :padding[2][0], cc] = padval[cc]
if padding[2][1] > 0:
data[:, :, -padding[2][1]:, cc] = padval[cc]
if highlights is not None:
# Then highlight if necessary
for ii, highlight in enumerate(highlights):
if highlight is not None:
data[ii, :padding[1][0], :, :] = highlight
if padding[1][1] > 0:
data[ii, -padding[1][1]:, :, :] = highlight
data[ii, :, :padding[2][0], :] = highlight
if padding[2][1] > 0:
data[ii, :, -padding[2][1]:, :] = highlight
# tile the filters into an image
data = data.reshape((height, width) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((height * data.shape[1], width * data.shape[3]) + data.shape[4:])
data = data[0:-padsize, 0:-padsize] # remove excess padding
return (height, width), data
def to_255(vals_01):
'''Convert vals in [0,1] to [0,255]'''
try:
ret = [v * 255 for v in vals_01]
if type(vals_01) is tuple:
return tuple(ret)
else:
return ret
except TypeError:
# Not iterable (single int or float)
return vals_01 * 255
def ensure_uint255_and_resize_to_fit(img, out_max_shape,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
as_uint255 = ensure_uint255(img)
return resize_to_fit(as_uint255, out_max_shape,
dtype_out='uint8',
shrink_interpolation=shrink_interpolation,
grow_interpolation=grow_interpolation)
def ensure_uint255(arr):
'''If data is float, multiply by 255 and convert to uint8. Else leave as uint8.'''
if arr.dtype == 'uint8':
return arr
elif arr.dtype in ('float32', 'float64'):
# print 'extra check...'
# assert arr.max() <= 1.1
return array(arr * 255, dtype='uint8')
else:
raise Exception('ensure_uint255 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def ensure_float01(arr, dtype_preference='float32'):
'''If data is uint, convert to float and divide by 255. Else leave at float.'''
if arr.dtype == 'uint8':
# print 'extra check...'
# assert arr.max() <= 256
return array(arr, dtype=dtype_preference) / 255
elif arr.dtype in ('float32', 'float64'):
return arr
else:
raise Exception('ensure_float01 expects uint8 or float input but got %s with range [%g,%g,].' % (
arr.dtype, arr.min(), arr.max()))
def resize_to_fit(img, out_max_shape,
dtype_out=None,
shrink_interpolation=cv2.INTER_LINEAR,
grow_interpolation=cv2.INTER_NEAREST):
'''Resizes to fit within out_max_shape. If ratio is different,
returns an image that fits but is smaller along one of the two
dimensions.
If one of the out_max_shape dimensions is None, then use only the other dimension to perform resizing.
Timing info on MBP Retina with OpenBlas:
- conclusion: uint8 is always tied or faster. float64 is slower.
Scaling down:
In [79]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[79]: 0.04950380325317383
In [77]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[77]: 0.049156904220581055
In [76]: timeit.Timer('resize_to_fit(aa, (200,200))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[76]: 0.11808204650878906
Scaling up:
In [68]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="uint8")').timeit(100)
Out[68]: 0.4357950687408447
In [70]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float32")').timeit(100)
Out[70]: 1.3411099910736084
In [73]: timeit.Timer('resize_to_fit(aa, (2000,2000))', setup='from kerasvis.app import resize_to_fit; import numpy as np; aa = array(np.random.uniform(0,255,(1000,1000,3)), dtype="float64")').timeit(100)
Out[73]: 2.6078310012817383
'''
if dtype_out is not None and img.dtype != dtype_out:
dtype_in_size = img.dtype.itemsize
dtype_out_size = dtype(dtype_out).itemsize
convert_early = (dtype_out_size < dtype_in_size)
convert_late = not convert_early
else:
convert_early = False
convert_late = False
if img.shape[0] == 0 and img.shape[1] == 0:
scale = 1
elif out_max_shape[0] is None or img.shape[0] == 0:
scale = float(out_max_shape[1]) / img.shape[1]
elif out_max_shape[1] is None or img.shape[1] == 0:
scale = float(out_max_shape[0]) / img.shape[0]
else:
scale = min(float(out_max_shape[0]) / img.shape[0],
float(out_max_shape[1]) / img.shape[1])
if convert_early:
img = array(img, dtype=dtype_out)
out = cv2.resize(img,
(int(img.shape[1] * scale), int(img.shape[0] * scale)), # in (c,r) order
interpolation=grow_interpolation if scale > 1 else shrink_interpolation)
if convert_late:
out = array(out, dtype=dtype_out)
return out
class FormattedString(object):
def __init__(self, string, defaults, face=None, fsize=None, clr=None, thick=None, align=None, width=None):
self.string = string
self.face = face if face else defaults['face']
self.fsize = fsize if fsize else defaults['fsize']
self.clr = clr if clr else defaults['clr']
self.thick = thick if thick else defaults['thick']
self.width = width # if None: calculate width automatically
self.align = align if align else defaults.get('align', 'left')
def cv2_typeset_text(data, lines, loc, between=' ', string_spacing=0, line_spacing=0, wrap=False):
'''Typesets mutliple strings on multiple lines of text, where each string may have its own formatting.
Given:
data: as in cv2.putText
loc: as in cv2.putText
lines: list of lists of FormattedString objects, may be modified by this function!
between: what to insert between each string on each line, ala str.join
string_spacing: extra spacing to insert between strings on a line
line_spacing: extra spacing to insert between lines
wrap: if true, wraps words to next line
Returns:
locy: new y location = loc[1] + y-offset resulting from lines of text
'''
data_width = data.shape[1]
# lines_modified = False
# lines = lines_in # will be deepcopied if modification is needed later
if isinstance(lines, FormattedString):
lines = [lines]
assert isinstance(lines,
list), 'lines must be a list of lines or list of FormattedString objects or a single FormattedString object'
if len(lines) == 0:
return loc[1]
if not isinstance(lines[0], list):
# If a single line of text is given as a list of strings, convert to multiline format
lines = [lines]
locy = loc[1]
line_num = 0
while line_num < len(lines):
line = lines[line_num]
maxy = 0
locx = loc[0]
for ii, fs in enumerate(line):
last_on_line = (ii == len(line) - 1)
if not last_on_line:
fs.string += between
boxsize, _ = cv2.getTextSize(fs.string, fs.face, fs.fsize, fs.thick)
if fs.width is not None:
if fs.align == 'right':
locx += fs.width - boxsize[0]
elif fs.align == 'center':
locx += (fs.width - boxsize[0]) / 2
# print 'right boundary is', locx + boxsize[0], '(%s)' % fs.string
# print 'HERE'
right_edge = locx + boxsize[0]
if wrap and ii > 0 and right_edge > data_width:
# Wrap rest of line to the next line
# if not lines_modified:
# lines = deepcopy(lines_in)
# lines_modified = True
new_this_line = line[:ii]
new_next_line = line[ii:]
lines[line_num] = new_this_line
lines.insert(line_num + 1, new_next_line)
break
###line_num += 1
###continue
cv2.putText(data, fs.string, (locx, locy), fs.face, fs.fsize, fs.clr, fs.thick)
maxy = max(maxy, boxsize[1])
if fs.width is not None:
if fs.align == 'right':
locx += boxsize[0]
elif fs.align == 'left':
locx += fs.width
elif fs.align == 'center':
locx += fs.width - (fs.width - boxsize[0]) / 2
else:
locx += boxsize[0]
locx += string_spacing
line_num += 1
locy += maxy + line_spacing
return locy
def saveimage(filename, im):
'''Saves an image with pixel values in [0,1]'''
# matplotlib.image.imsave(filename, im)
if len(im.shape) == 3:
# Reverse RGB to OpenCV BGR order for color images
cv2.imwrite(filename, 255 * im[:, :, ::-1])
else:
cv2.imwrite(filename, 255 * im)
def saveimagesc(filename, im):
saveimage(filename, norm01(im))
def saveimagescc(filename, im, center):
saveimage(filename, norm01c(im, center))
|
[
"numpy.sqrt",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.dot",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.dtype",
"numpy.tile",
"numpy.ceil",
"cv2.putText",
"skimage.io.imread",
"cv2.cvtColor",
"matplotlib.pyplot.title",
"cv2.getTextSize",
"cv2.imread",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"cv2.imwrite",
"misc.WithTimer",
"matplotlib.figure.Figure",
"matplotlib.pyplot.clf",
"numpy.zeros",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.expand_dims",
"numpy.pad",
"matplotlib.pyplot.subplots"
] |
[((1017, 1036), 'numpy.array', 'array', (['arr', '"""uint8"""'], {}), "(arr, 'uint8')\n", (1022, 1036), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((1633, 1655), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1639, 1655), False, 'from matplotlib.figure import Figure\n'), ((1669, 1686), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (1681, 1686), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((1941, 1953), 'numpy.arange', 'arange', (['s', 'e'], {}), '(s, e)\n', (1947, 1953), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((3556, 3589), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'shape'}), '(1, 1, figsize=shape)\n', (3568, 3589), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3620), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (3615, 3620), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((6141, 6162), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (6149, 6162), True, 'import matplotlib.pyplot as plt\n'), ((6985, 6994), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6992, 6994), True, 'import matplotlib.pyplot as plt\n'), ((6999, 7010), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7008, 7010), True, 'import matplotlib.pyplot as plt\n'), ((7022, 7032), 'numpy.array', 'array', (['res'], {}), '(res)\n', (7027, 7032), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((7122, 7156), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4, 4)'}), '(1, 1, figsize=(4, 4))\n', (7134, 7156), True, 'import matplotlib.pyplot as plt\n'), ((7170, 7187), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (7182, 7187), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((7637, 7646), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7644, 7646), True, 'import matplotlib.pyplot as plt\n'), ((7651, 7662), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7660, 7662), True, 'import matplotlib.pyplot as plt\n'), ((8267, 8300), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'shape'}), '(1, 1, figsize=shape)\n', (8279, 8300), True, 'import matplotlib.pyplot as plt\n'), ((8314, 8331), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (8326, 8331), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((9520, 9529), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9527, 9529), True, 'import matplotlib.pyplot as plt\n'), ((9534, 9545), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9543, 9545), True, 'import matplotlib.pyplot as plt\n'), ((9557, 9567), 'numpy.array', 'array', (['res'], {}), '(res)\n', (9562, 9567), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((9928, 9961), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'shape'}), '(1, 1, figsize=shape)\n', (9940, 9961), True, 'import matplotlib.pyplot as plt\n'), ((9975, 9992), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (9987, 9992), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((10298, 10365), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.185)', 'bottom': '(0.125)', 'right': '(0.98)', 'top': '(0.98)'}), '(left=0.185, bottom=0.125, right=0.98, top=0.98)\n', (10317, 10365), True, 'import matplotlib.pyplot as plt\n'), ((11525, 11535), 'numpy.array', 'array', (['res'], {}), '(res)\n', (11530, 11535), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((11923, 11944), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': 'shape'}), '(figsize=shape)\n', (11929, 11944), False, 'from matplotlib.figure import Figure\n'), ((11958, 11975), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (11970, 11975), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((14352, 14372), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (14362, 14372), False, 'import cv2\n'), ((14636, 14671), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (14648, 14671), False, 'import cv2\n'), ((1146, 1172), 'cv2.imwrite', 'cv2.imwrite', (['saveto', 'frame'], {}), '(saveto, frame)\n', (1157, 1172), False, 'import cv2\n'), ((1338, 1360), 'numpy.tile', 'tile', (['frame', '(1, 1, 3)'], {}), '(frame, (1, 1, 3))\n', (1342, 1360), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((1746, 1771), 'numpy.expand_dims', 'expand_dims', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (1757, 1771), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((4002, 4039), 'numpy.zeros', 'zeros', (['(data.shape[1], data.shape[2])'], {}), '((data.shape[1], data.shape[2]))\n', (4007, 4039), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((5770, 5793), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (5789, 5793), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((7196, 7217), 'numpy.arange', 'arange', (['(0)', 'y.shape[0]'], {}), '(0, y.shape[0])\n', (7202, 7217), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((7962, 7983), 'numpy.arange', 'arange', (['(0)', 'y.shape[1]'], {}), '(0, y.shape[1])\n', (7968, 7983), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((8783, 8799), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8792, 8799), True, 'import matplotlib.pyplot as plt\n'), ((9858, 9884), 'numpy.ceil', 'ceil', (['(shape[1] / 80 / cols)'], {}), '(shape[1] / 80 / cols)\n', (9862, 9884), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((9886, 9912), 'numpy.ceil', 'ceil', (['(shape[0] / 80 / rows)'], {}), '(shape[0] / 80 / rows)\n', (9890, 9912), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((11871, 11890), 'numpy.ceil', 'ceil', (['(shape[1] / 80)'], {}), '(shape[1] / 80)\n', (11875, 11890), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((11892, 11911), 'numpy.ceil', 'ceil', (['(shape[0] / 80)'], {}), '(shape[0] / 80)\n', (11896, 11911), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((14523, 14542), 'numpy.tile', 'tile', (['im', '(1, 1, 3)'], {}), '(im, (1, 1, 3))\n', (14527, 14542), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((15167, 15203), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (15179, 15203), False, 'import cv2\n'), ((15726, 15757), 'misc.WithTimer', 'WithTimer', (['"""imread"""'], {'quiet': '(True)'}), "('imread', quiet=True)\n", (15735, 15757), False, 'from misc import WithTimer\n'), ((18568, 18599), 'numpy.tile', 'tile', (['data[:, :, :, newaxis]', '(3)'], {}), '(data[:, :, :, newaxis], 3)\n', (18572, 18599), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((20200, 20269), 'numpy.pad', 'pad', (['data', 'padding'], {'mode': '"""constant"""', 'constant_values': '(padval, padval)'}), "(data, padding, mode='constant', constant_values=(padval, padval))\n", (20203, 20269), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((20295, 20354), 'numpy.pad', 'pad', (['data', 'padding'], {'mode': '"""constant"""', 'constant_values': '(0, 0)'}), "(data, padding, mode='constant', constant_values=(0, 0))\n", (20298, 20354), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((26336, 26363), 'numpy.array', 'array', (['img'], {'dtype': 'dtype_out'}), '(img, dtype=dtype_out)\n', (26341, 26363), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((26615, 26642), 'numpy.array', 'array', (['out'], {'dtype': 'dtype_out'}), '(out, dtype=dtype_out)\n', (26620, 26642), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((30594, 30637), 'cv2.imwrite', 'cv2.imwrite', (['filename', '(255 * im[:, :, ::-1])'], {}), '(filename, 255 * im[:, :, ::-1])\n', (30605, 30637), False, 'import cv2\n'), ((30656, 30687), 'cv2.imwrite', 'cv2.imwrite', (['filename', '(255 * im)'], {}), '(filename, 255 * im)\n', (30667, 30687), False, 'import cv2\n'), ((1862, 1891), 'numpy.linspace', 'linspace', (['(0)', '(1)', 'data.shape[1]'], {}), '(0, 1, data.shape[1])\n', (1870, 1891), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((3477, 3503), 'numpy.ceil', 'ceil', (['(shape[1] / 80 / cols)'], {}), '(shape[1] / 80 / cols)\n', (3481, 3503), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((3513, 3539), 'numpy.ceil', 'ceil', (['(shape[0] / 80 / rows)'], {}), '(shape[0] / 80 / rows)\n', (3517, 3539), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((4840, 4856), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4849, 4856), True, 'import matplotlib.pyplot as plt\n'), ((8188, 8214), 'numpy.ceil', 'ceil', (['(shape[1] / 80 / cols)'], {}), '(shape[1] / 80 / cols)\n', (8192, 8214), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((8224, 8250), 'numpy.ceil', 'ceil', (['(shape[0] / 80 / rows)'], {}), '(shape[0] / 80 / rows)\n', (8228, 8250), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((11062, 11078), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (11071, 11078), True, 'import matplotlib.pyplot as plt\n'), ((15797, 15824), 'skimage.io.imread', 'skimage.io.imread', (['filename'], {}), '(filename)\n', (15814, 15824), False, 'import skimage\n'), ((16014, 16034), 'numpy.tile', 'tile', (['img', '(1, 1, 3)'], {}), '(img, (1, 1, 3))\n', (16018, 16034), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((16651, 16678), 'numpy.sqrt', 'sqrt', (['(n_tiles * width_ratio)'], {}), '(n_tiles * width_ratio)\n', (16655, 16678), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((17848, 17879), 'numpy.dot', 'dot', (['((data > 0) * data)', 'pos_clr'], {}), '((data > 0) * data, pos_clr)\n', (17851, 17879), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((17882, 17914), 'numpy.dot', 'dot', (['((data < 0) * -data)', 'neg_clr'], {}), '((data < 0) * -data, neg_clr)\n', (17885, 17914), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((22799, 22830), 'numpy.array', 'array', (['(arr * 255)'], {'dtype': '"""uint8"""'}), "(arr * 255, dtype='uint8')\n", (22804, 22830), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((23243, 23277), 'numpy.array', 'array', (['arr'], {'dtype': 'dtype_preference'}), '(arr, dtype=dtype_preference)\n', (23248, 23277), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((25687, 25703), 'numpy.dtype', 'dtype', (['dtype_out'], {}), '(dtype_out)\n', (25692, 25703), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((28793, 28848), 'cv2.getTextSize', 'cv2.getTextSize', (['fs.string', 'fs.face', 'fs.fsize', 'fs.thick'], {}), '(fs.string, fs.face, fs.fsize, fs.thick)\n', (28808, 28848), False, 'import cv2\n'), ((29785, 29864), 'cv2.putText', 'cv2.putText', (['data', 'fs.string', '(locx, locy)', 'fs.face', 'fs.fsize', 'fs.clr', 'fs.thick'], {}), '(data, fs.string, (locx, locy), fs.face, fs.fsize, fs.clr, fs.thick)\n', (29796, 29864), False, 'import cv2\n'), ((5068, 5104), 'numpy.arange', 'arange', (['(0)', 'x_axis_values.shape[0]', '(3)'], {}), '(0, x_axis_values.shape[0], 3)\n', (5074, 5104), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((5318, 5354), 'numpy.arange', 'arange', (['(0)', 'y_axis_values.shape[0]', '(3)'], {}), '(0, y_axis_values.shape[0], 3)\n', (5324, 5354), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((16302, 16315), 'numpy.sqrt', 'sqrt', (['n_tiles'], {}), '(n_tiles)\n', (16306, 16315), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((17311, 17325), 'numpy.array', 'array', (['neg_clr'], {}), '(neg_clr)\n', (17316, 17325), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((17360, 17374), 'numpy.array', 'array', (['pos_clr'], {}), '(pos_clr)\n', (17365, 17374), False, 'from numpy import arange, array, newaxis, tile, linspace, pad, expand_dims, fromstring, ceil, dtype, float32, sqrt, dot, zeros\n'), ((15878, 15905), 'skimage.io.imread', 'skimage.io.imread', (['filename'], {}), '(filename)\n', (15895, 15905), False, 'import skimage\n')]
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains a collection of graph theory routines used mainly
to reorder matrices for iterative steady state solvers.
"""
__all__ = ['graph_degree', 'column_permutation', 'breadth_first_search',
'reverse_cuthill_mckee', 'maximum_bipartite_matching',
'weighted_bipartite_matching']
import numpy as np
import scipy.sparse as sp
from qutip.cy.graph_utils import (
_breadth_first_search, _node_degrees,
_reverse_cuthill_mckee, _maximum_bipartite_matching,
_weighted_bipartite_matching)
def graph_degree(A):
"""
Returns the degree for the nodes (rows) of a symmetric
graph in sparse CSR or CSC format, or a qobj.
Parameters
----------
A : qobj, csr_matrix, csc_matrix
Input quantum object or csr_matrix.
Returns
-------
degree : array
Array of integers giving the degree for each node (row).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
return _node_degrees(A.indices, A.indptr, A.shape[0])
def breadth_first_search(A, start):
"""
Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting
from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.
This function requires a matrix with symmetric structure.
Use A+trans(A) if original matrix is not symmetric or not sure.
Parameters
----------
A : csc_matrix, csr_matrix
Input graph in CSC or CSR matrix format
start : int
Staring node for BFS traversal.
Returns
-------
order : array
Order in which nodes are traversed from starting node.
levels : array
Level of the nodes in the order that they are traversed.
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
num_rows = A.shape[0]
start = int(start)
order, levels = _breadth_first_search(A.indices, A.indptr, num_rows, start)
# since maybe not all nodes are in search, check for unused entires in
# arrays
return order[order != -1], levels[levels != -1]
def column_permutation(A):
"""
Finds the non-symmetric column permutation of A such that the columns
are given in ascending order according to the number of nonzero entries.
This is sometimes useful for decreasing the fill-in of sparse LU
factorization.
Parameters
----------
A : csc_matrix
Input sparse CSC sparse matrix.
Returns
-------
perm : array
Array of permuted row and column indices.
"""
if not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
count = np.diff(A.indptr)
perm = np.argsort(count)
return perm
def reverse_cuthill_mckee(A, sym=False):
"""
Returns the permutation array that orders a sparse CSR or CSC matrix
in Reverse-Cuthill McKee ordering. Since the input matrix must be
symmetric, this routine works on the matrix A+Trans(A) if the sym flag is
set to False (Default).
It is assumed by default (*sym=False*) that the input matrix is not
symmetric. This is because it is faster to do A+Trans(A) than it is to
check for symmetry for a generic matrix. If you are guaranteed that the
matrix is symmetric in structure (values of matrix element do not matter)
then set *sym=True*
Parameters
----------
A : csc_matrix, csr_matrix
Input sparse CSC or CSR sparse matrix format.
sym : bool {False, True}
Flag to set whether input matrix is symmetric.
Returns
-------
perm : array
Array of permuted row and column indices.
Notes
-----
This routine is used primarily for internal reordering of Lindblad
superoperators for use in iterative solver routines.
References
----------
<NAME> and <NAME>, "Reducing the Bandwidth of Sparse Symmetric
Matrices", ACM '69 Proceedings of the 1969 24th national conference,
(1969).
"""
if not (sp.isspmatrix_csc(A) or sp.isspmatrix_csr(A)):
raise TypeError('Input must be CSC or CSR sparse matrix.')
nrows = A.shape[0]
if not sym:
A = A + A.transpose()
return _reverse_cuthill_mckee(A.indices, A.indptr, nrows)
def maximum_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row or column permutations that removes nonzero
elements from the diagonal of a nonsingular square CSC sparse matrix. Such
a permutation is always possible provided that the matrix is nonsingular.
This function looks at the structure of the matrix only.
The input matrix will be converted to CSC matrix format if
necessary.
Parameters
----------
A : sparse matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function relies on a maximum cardinality bipartite matching algorithm
based on a breadth-first search (BFS) of the underlying graph[1]_.
References
----------
<NAME>, <NAME>, and <NAME>, "Design, Implementation, and
Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.
38, no. 2, (2011).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError(
'Maximum bipartite matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _maximum_bipartite_matching(A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
def weighted_bipartite_matching(A, perm_type='row'):
"""
Returns an array of row permutations that attempts to maximize
the product of the ABS values of the diagonal elements in
a nonsingular square CSC sparse matrix. Such a permutation is
always possible provided that the matrix is nonsingular.
This function looks at both the structure and ABS values of the
underlying matrix.
Parameters
----------
A : csc_matrix
Input matrix
perm_type : str {'row', 'column'}
Type of permutation to generate.
Returns
-------
perm : array
Array of row or column permutations.
Notes
-----
This function uses a weighted maximum cardinality bipartite matching
algorithm based on breadth-first search (BFS). The columns are weighted
according to the element of max ABS value in the associated rows and
are traversed in descending order by weight. When performing the BFS
traversal, the row associated to a given column is the one with maximum
weight. Unlike other techniques[1]_, this algorithm does not guarantee the
product of the diagonal is maximized. However, this limitation is offset
by the substantially faster runtime of this method.
References
----------
<NAME> and <NAME>, "The design and use of algorithms for
permuting large entries to the diagonal of sparse matrices", SIAM J.
Matrix Anal. and Applics. 20, no. 4, 889 (1997).
"""
nrows = A.shape[0]
if A.shape[0] != A.shape[1]:
raise ValueError('weighted_bfs_matching requires a square matrix.')
if sp.isspmatrix_csr(A) or sp.isspmatrix_coo(A):
A = A.tocsc()
elif not sp.isspmatrix_csc(A):
raise TypeError("matrix must be in CSC, CSR, or COO format.")
if perm_type == 'column':
A = A.transpose().tocsc()
perm = _weighted_bipartite_matching(
np.asarray(np.abs(A.data), dtype=float),
A.indices, A.indptr, nrows)
if np.any(perm == -1):
raise Exception('Possibly singular input matrix.')
return perm
|
[
"scipy.sparse.isspmatrix_csc",
"scipy.sparse.isspmatrix_csr",
"numpy.abs",
"qutip.cy.graph_utils._maximum_bipartite_matching",
"qutip.cy.graph_utils._breadth_first_search",
"qutip.cy.graph_utils._node_degrees",
"numpy.diff",
"qutip.cy.graph_utils._reverse_cuthill_mckee",
"numpy.any",
"numpy.argsort",
"scipy.sparse.csc_matrix",
"scipy.sparse.isspmatrix_coo"
] |
[((2836, 2882), 'qutip.cy.graph_utils._node_degrees', '_node_degrees', (['A.indices', 'A.indptr', 'A.shape[0]'], {}), '(A.indices, A.indptr, A.shape[0])\n', (2849, 2882), False, 'from qutip.cy.graph_utils import _breadth_first_search, _node_degrees, _reverse_cuthill_mckee, _maximum_bipartite_matching, _weighted_bipartite_matching\n'), ((3777, 3836), 'qutip.cy.graph_utils._breadth_first_search', '_breadth_first_search', (['A.indices', 'A.indptr', 'num_rows', 'start'], {}), '(A.indices, A.indptr, num_rows, start)\n', (3798, 3836), False, 'from qutip.cy.graph_utils import _breadth_first_search, _node_degrees, _reverse_cuthill_mckee, _maximum_bipartite_matching, _weighted_bipartite_matching\n'), ((4528, 4545), 'numpy.diff', 'np.diff', (['A.indptr'], {}), '(A.indptr)\n', (4535, 4545), True, 'import numpy as np\n'), ((4557, 4574), 'numpy.argsort', 'np.argsort', (['count'], {}), '(count)\n', (4567, 4574), True, 'import numpy as np\n'), ((6063, 6113), 'qutip.cy.graph_utils._reverse_cuthill_mckee', '_reverse_cuthill_mckee', (['A.indices', 'A.indptr', 'nrows'], {}), '(A.indices, A.indptr, nrows)\n', (6085, 6113), False, 'from qutip.cy.graph_utils import _breadth_first_search, _node_degrees, _reverse_cuthill_mckee, _maximum_bipartite_matching, _weighted_bipartite_matching\n'), ((7566, 7621), 'qutip.cy.graph_utils._maximum_bipartite_matching', '_maximum_bipartite_matching', (['A.indices', 'A.indptr', 'nrows'], {}), '(A.indices, A.indptr, nrows)\n', (7593, 7621), False, 'from qutip.cy.graph_utils import _breadth_first_search, _node_degrees, _reverse_cuthill_mckee, _maximum_bipartite_matching, _weighted_bipartite_matching\n'), ((7630, 7648), 'numpy.any', 'np.any', (['(perm == -1)'], {}), '(perm == -1)\n', (7636, 7648), True, 'import numpy as np\n'), ((9751, 9769), 'numpy.any', 'np.any', (['(perm == -1)'], {}), '(perm == -1)\n', (9757, 9769), True, 'import numpy as np\n'), ((4465, 4485), 'scipy.sparse.isspmatrix_csc', 'sp.isspmatrix_csc', (['A'], {}), '(A)\n', (4482, 4485), True, 'import scipy.sparse as sp\n'), ((4499, 4515), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['A'], {}), '(A)\n', (4512, 4515), True, 'import scipy.sparse as sp\n'), ((7316, 7336), 'scipy.sparse.isspmatrix_csr', 'sp.isspmatrix_csr', (['A'], {}), '(A)\n', (7333, 7336), True, 'import scipy.sparse as sp\n'), ((7340, 7360), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['A'], {}), '(A)\n', (7357, 7360), True, 'import scipy.sparse as sp\n'), ((9353, 9373), 'scipy.sparse.isspmatrix_csr', 'sp.isspmatrix_csr', (['A'], {}), '(A)\n', (9370, 9373), True, 'import scipy.sparse as sp\n'), ((9377, 9397), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['A'], {}), '(A)\n', (9394, 9397), True, 'import scipy.sparse as sp\n'), ((2711, 2731), 'scipy.sparse.isspmatrix_csc', 'sp.isspmatrix_csc', (['A'], {}), '(A)\n', (2728, 2731), True, 'import scipy.sparse as sp\n'), ((2735, 2755), 'scipy.sparse.isspmatrix_csr', 'sp.isspmatrix_csr', (['A'], {}), '(A)\n', (2752, 2755), True, 'import scipy.sparse as sp\n'), ((3593, 3613), 'scipy.sparse.isspmatrix_csc', 'sp.isspmatrix_csc', (['A'], {}), '(A)\n', (3610, 3613), True, 'import scipy.sparse as sp\n'), ((3617, 3637), 'scipy.sparse.isspmatrix_csr', 'sp.isspmatrix_csr', (['A'], {}), '(A)\n', (3634, 3637), True, 'import scipy.sparse as sp\n'), ((5866, 5886), 'scipy.sparse.isspmatrix_csc', 'sp.isspmatrix_csc', (['A'], {}), '(A)\n', (5883, 5886), True, 'import scipy.sparse as sp\n'), ((5890, 5910), 'scipy.sparse.isspmatrix_csr', 'sp.isspmatrix_csr', (['A'], {}), '(A)\n', (5907, 5910), True, 'import scipy.sparse as sp\n'), ((7397, 7417), 'scipy.sparse.isspmatrix_csc', 'sp.isspmatrix_csc', (['A'], {}), '(A)\n', (7414, 7417), True, 'import scipy.sparse as sp\n'), ((9434, 9454), 'scipy.sparse.isspmatrix_csc', 'sp.isspmatrix_csc', (['A'], {}), '(A)\n', (9451, 9454), True, 'import scipy.sparse as sp\n'), ((9664, 9678), 'numpy.abs', 'np.abs', (['A.data'], {}), '(A.data)\n', (9670, 9678), True, 'import numpy as np\n')]
|
import click
import pickle
import numpy as np
from collections import defaultdict
from utils import reset_seeds, get_dataset, load_embeddings
from mlp_multilabel_wrapper import PowersetKerasWrapper, MultiOutputKerasWrapper
from mlp_utils import CrossLabelDependencyLoss
def get_random_sample(dataset_name='bbc', train_frac=0.25):
# get model runner specific dataset
_, _, y_train, y_test = get_dataset(dataset_name)
X_train, X_test = load_embeddings(dataset_name)
grps = y_train.apply(lambda v: ''.join(map(str, v)), axis=1).to_frame(0).groupby(0)[0]
train_idx = grps.apply(lambda g: g.sample(frac=train_frac)).index.get_level_values(1)
X_train_sample = X_train.loc[train_idx, :]
y_train_sample = y_train.loc[train_idx, :]
return X_train_sample, X_test, y_train_sample, y_test
def _get_label_set(y):
return set(y.apply(lambda v: ''.join(map(str, v)), axis=1).values)
@click.command()
@click.option('--n-samples', default=10)
@click.option('--dataset-name', default='moral-dataset-MeToo')
def run(n_samples, dataset_name):
mlp_cld_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_powerset_bootstrap_results = defaultdict(lambda: defaultdict(list))
mlp_labels_bootstrap_results = defaultdict(lambda: defaultdict(list))
reset_seeds()
for i in range(n_samples):
print('Running bootstrap sample: {}'.format(i + 1))
for f in np.arange(0.1, 1.1, 0.1):
X_train, X_test, y_train, y_test = get_random_sample(dataset_name, train_frac=f)
print('Training set size: {}'.format(X_train.shape))
print('Test set size: {}'.format(X_test.shape))
mlp_powerset_model = PowersetKerasWrapper(columns=y_train.columns)
mlp_powerset_model.fit(X_train.values, y_train.values)
y_pred_mlp = mlp_powerset_model.predict(X_test.values)
mlp_powerset_bootstrap_results[i][f].append(y_pred_mlp)
cld_loss = CrossLabelDependencyLoss(alpha=0.2)
mlp_cld_model = MultiOutputKerasWrapper(columns=y_train.columns, loss=cld_loss)
mlp_cld_model.fit(X_train.values, y_train.values)
y_pred_cld = mlp_cld_model.predict(X_test.values)
mlp_cld_bootstrap_results[i][f].append(y_pred_cld)
mlp_labels_bootstrap_results[i][f].append((_get_label_set(y_train), _get_label_set(y_test)))
with open('training_size_bootstrap_{}.pkl'.format(dataset_name), 'wb') as f:
pickle.dump({'cld': dict(mlp_cld_bootstrap_results),
'powerset': dict(mlp_powerset_bootstrap_results),
'labels': dict(mlp_labels_bootstrap_results)}, f)
if __name__ == '__main__':
run()
|
[
"mlp_multilabel_wrapper.MultiOutputKerasWrapper",
"click.option",
"utils.get_dataset",
"mlp_utils.CrossLabelDependencyLoss",
"collections.defaultdict",
"mlp_multilabel_wrapper.PowersetKerasWrapper",
"utils.load_embeddings",
"utils.reset_seeds",
"click.command",
"numpy.arange"
] |
[((912, 927), 'click.command', 'click.command', ([], {}), '()\n', (925, 927), False, 'import click\n'), ((929, 968), 'click.option', 'click.option', (['"""--n-samples"""'], {'default': '(10)'}), "('--n-samples', default=10)\n", (941, 968), False, 'import click\n'), ((970, 1031), 'click.option', 'click.option', (['"""--dataset-name"""'], {'default': '"""moral-dataset-MeToo"""'}), "('--dataset-name', default='moral-dataset-MeToo')\n", (982, 1031), False, 'import click\n'), ((400, 425), 'utils.get_dataset', 'get_dataset', (['dataset_name'], {}), '(dataset_name)\n', (411, 425), False, 'from utils import reset_seeds, get_dataset, load_embeddings\n'), ((448, 477), 'utils.load_embeddings', 'load_embeddings', (['dataset_name'], {}), '(dataset_name)\n', (463, 477), False, 'from utils import reset_seeds, get_dataset, load_embeddings\n'), ((1292, 1305), 'utils.reset_seeds', 'reset_seeds', ([], {}), '()\n', (1303, 1305), False, 'from utils import reset_seeds, get_dataset, load_embeddings\n'), ((1414, 1438), 'numpy.arange', 'np.arange', (['(0.1)', '(1.1)', '(0.1)'], {}), '(0.1, 1.1, 0.1)\n', (1423, 1438), True, 'import numpy as np\n'), ((1118, 1135), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1129, 1135), False, 'from collections import defaultdict\n'), ((1194, 1211), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1205, 1211), False, 'from collections import defaultdict\n'), ((1268, 1285), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1279, 1285), False, 'from collections import defaultdict\n'), ((1693, 1738), 'mlp_multilabel_wrapper.PowersetKerasWrapper', 'PowersetKerasWrapper', ([], {'columns': 'y_train.columns'}), '(columns=y_train.columns)\n', (1713, 1738), False, 'from mlp_multilabel_wrapper import PowersetKerasWrapper, MultiOutputKerasWrapper\n'), ((1965, 2000), 'mlp_utils.CrossLabelDependencyLoss', 'CrossLabelDependencyLoss', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1989, 2000), False, 'from mlp_utils import CrossLabelDependencyLoss\n'), ((2029, 2092), 'mlp_multilabel_wrapper.MultiOutputKerasWrapper', 'MultiOutputKerasWrapper', ([], {'columns': 'y_train.columns', 'loss': 'cld_loss'}), '(columns=y_train.columns, loss=cld_loss)\n', (2052, 2092), False, 'from mlp_multilabel_wrapper import PowersetKerasWrapper, MultiOutputKerasWrapper\n')]
|
from __future__ import division
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import sys
import os
import time
#
# TORCH INSTALLATION: refer to https://pytorch.org/get-started/locally/
#
def update_progress(job_title, progress):
length = 20 # modify this to change the length
block = int(round(length*progress))
msg = "\r{0}: [{1}] {2}%".format(job_title, "#"*block + "-"*(length-block), round(progress*100, 2))
if progress >= 1: msg += " DONE\r\n"
sys.stdout.write(msg)
sys.stdout.flush()
def cls():
os.system('cls' if os.name=='nt' else 'clear')
cls()
################################################################################################################
# Initialize torch tensor for coordiantes
coords_data = [[ 0.0 , 0.0 , 0.0 ],
[ 1.0/(2.0**0.5), 0.0 , 1.0/(2.0**0.5)],
[ 1.0/(2.0**0.5), 0.0 ,-1.0/(2.0**0.5)],
[ 2.0**0.5 , 0.0 , 0.0 ],
[ 0.0 , 1.0 , 0.0 ],
[ 1.0/(2.0**0.5), 1.0 , 1.0/(2.0**0.5)],
[ 1.0/(2.0**0.5), 1.0 ,-1.0/(2.0**0.5)],
[ 2.0**0.5 , 1.0 , 0.0 ],
]
coords = torch.tensor(coords_data,requires_grad=True,dtype=torch.float64)
nnodes_r = coords.size(0)
nnodes_ie = 8
nnodes_if = 4
nterms_s = 8
ndirs = 3
coord_sys = 'CARTESIAN'
# Define matrix of polynomial basis terms at support nodes
val_r_data = [[ 1.0,-1.0,-1.0,-1.0, 1.0, 1.0, 1.0,-1.0],
[ 1.0,-1.0,-1.0, 1.0,-1.0,-1.0, 1.0, 1.0],
[ 1.0, 1.0,-1.0,-1.0,-1.0, 1.0,-1.0, 1.0],
[ 1.0, 1.0,-1.0, 1.0, 1.0,-1.0,-1.0,-1.0],
[ 1.0,-1.0, 1.0,-1.0, 1.0,-1.0,-1.0, 1.0],
[ 1.0,-1.0, 1.0, 1.0,-1.0, 1.0,-1.0,-1.0],
[ 1.0, 1.0, 1.0,-1.0,-1.0,-1.0, 1.0,-1.0],
[ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
]
val_r = torch.tensor(val_r_data,requires_grad=False,dtype=torch.float64)
# Define matrices at interpolation nodes (quadrature, level = 1)
val_i_data = [[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0, 1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0, 1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0,-1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0,-1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0,-1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0,-1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0, 1.0/3.0,-1.0/3.0*np.sqrt(1.0/3.0)],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0, 1.0/3.0, 1.0/3.0*np.sqrt(1.0/3.0)],
]
val_i = torch.tensor(val_i_data,requires_grad=False,dtype=torch.float64)
ddxi_i_data = [[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0,-1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0, 1.0/3.0],
[ 0.0,0.0,0.0,1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),0.0, 1.0/3.0],
]
ddxi_i = torch.tensor(ddxi_i_data,requires_grad=False,dtype=torch.float64)
ddeta_i_data = [[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0,-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),0.0, np.sqrt(1.0/3.0), 1.0/3.0],
]
ddeta_i = torch.tensor(ddeta_i_data,requires_grad=False,dtype=torch.float64)
ddzeta_i_data= [[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 0.0,0.0,1.0,0.0,0.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
]
ddzeta_i = torch.tensor(ddzeta_i_data,requires_grad=False,dtype=torch.float64)
# Define element interpolation nodes weights for linear element
weights_e_data = [1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]
weights_e = torch.tensor(weights_e_data,requires_grad=False,dtype=torch.float64)
# Define val_f for each face
# Face 1, XI_MIN
val_1_data = [[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-1.0/3.0],
]
val_1 = torch.tensor(val_1_data,requires_grad=False,dtype=torch.float64)
# Face 2, XI_MAX
val_2_data = [[ 1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, 1.0/3.0],
]
val_2 = torch.tensor(val_2_data,requires_grad=False,dtype=torch.float64)
# Face 3, ETA_MIN
val_3_data = [[ 1.0,-1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,-1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0),-1.0/3.0],
]
val_3 = torch.tensor(val_3_data,requires_grad=False,dtype=torch.float64)
# Face 4, ETA_MAX
val_4_data = [[ 1.0,1.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,1.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,1.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,1.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0), 1.0/3.0],
]
val_4 = torch.tensor(val_4_data,requires_grad=False,dtype=torch.float64)
# Face 5, ZETA_MIN
val_5_data = [[ 1.0,-np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-1.0,-np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),-1.0, np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
]
val_5 = torch.tensor(val_5_data,requires_grad=False,dtype=torch.float64)
# Face 6, ZETA_MAX
val_6_data = [[ 1.0,-np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0), 1.0/3.0,-np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0), 1.0/3.0],
[ 1.0,-np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0),-1.0/3.0, np.sqrt(1.0/3.0),-np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),1.0,-np.sqrt(1.0/3.0),-1.0/3.0,-np.sqrt(1.0/3.0), np.sqrt(1.0/3.0),-1.0/3.0],
[ 1.0, np.sqrt(1.0/3.0),1.0, np.sqrt(1.0/3.0), 1.0/3.0, np.sqrt(1.0/3.0), np.sqrt(1.0/3.0), 1.0/3.0],
]
val_6 = torch.tensor(val_6_data,requires_grad=False,dtype=torch.float64)
#--------------------------------------------------------------------
# Matrix modes_to_nodes
val_r_inv = torch.inverse(val_r)
# Computes coordiantes modes
coords_modes = torch.mm(val_r_inv,coords)
# Initialized coordiantes
interp_coords = torch.mm(val_i,coords_modes)
# Initialized jacobian
jacobian = torch.empty(3,3,nnodes_ie, dtype=torch.float64)
for inode in range(0,nnodes_ie):
jacobian[0,0,inode] = torch.dot(ddxi_i[inode,:] , coords_modes[:,0])
jacobian[0,1,inode] = torch.dot(ddeta_i[inode,:] , coords_modes[:,0])
jacobian[0,2,inode] = torch.dot(ddzeta_i[inode,:] , coords_modes[:,0])
jacobian[1,0,inode] = torch.dot(ddxi_i[inode,:] , coords_modes[:,1])
jacobian[1,1,inode] = torch.dot(ddeta_i[inode,:] , coords_modes[:,1])
jacobian[1,2,inode] = torch.dot(ddzeta_i[inode,:] , coords_modes[:,1])
jacobian[2,0,inode] = torch.dot(ddxi_i[inode,:] , coords_modes[:,2])
jacobian[2,1,inode] = torch.dot(ddeta_i[inode,:] , coords_modes[:,2])
jacobian[2,2,inode] = torch.dot(ddzeta_i[inode,:] , coords_modes[:,2])
update_progress("Computing Jacobian ", inode/(nnodes_ie-1))
if coord_sys == 'CYLINDRICAL':
scaling_factor = torch.mm(val_i,coords_modes[:,0])
for inode in range(0,nnodes_ie):
jacobian[1,0,inode] = jacobian[1,0,inode] * scaling_factor[inode]
jacobian[1,1,inode] = jacobian[1,1,inode] * scaling_factor[inode]
jacobian[1,2,inode] = jacobian[1,2,inode] * scaling_factor[inode]
# Matrics and Determinant
metrics = torch.empty(3,3,nnodes_ie, dtype=torch.float64)
jinv = torch.empty(nnodes_ie, dtype=torch.float64)
for inode in range(0,nnodes_ie):
ijacobian = torch.empty(3,3, dtype=torch.float64)
imetric = torch.empty(3,3, dtype=torch.float64)
for irow in range(0,3):
for icol in range(0,3):
ijacobian[irow,icol] = jacobian[irow,icol,inode]
# Compute jacobian for the ith node
update_progress("Computing Jinv and Metric ", inode/(nnodes_ie-1))
jinv[inode] = torch.det(ijacobian)
imetric = torch.inverse(ijacobian)
for irow in range(0,3):
for icol in range(0,3):
metrics[irow,icol,inode] = imetric[irow,icol]
# Compute inverse Mass matrix
invmass = torch.empty(nterms_s,nterms_s,nnodes_ie, dtype=torch.float64)
mass = torch.empty(nterms_s,nterms_s,nnodes_ie, dtype=torch.float64)
val_tmp = torch.empty(nterms_s,nnodes_ie, dtype=torch.float64)
i = 1
for iterm in range(0,nterms_s):
for inode in range(0,nnodes_ie):
val_tmp[inode,iterm] = val_i[inode,iterm] * weights_e[inode] * jinv[inode]
update_progress("Computing invmass ", i/(nterms_s*nnodes_ie))
i += 1
mass = torch.mm(torch.t(val_tmp),val_i)
invmass = torch.inverse(mass)
# Compute BR2_VOL for each face
br2_vol_face1 = torch.mm(val_i,torch.mm(invmass,torch.t(val_1)))
br2_vol_face2 = torch.mm(val_i,torch.mm(invmass,torch.t(val_2)))
br2_vol_face3 = torch.mm(val_i,torch.mm(invmass,torch.t(val_3)))
br2_vol_face4 = torch.mm(val_i,torch.mm(invmass,torch.t(val_4)))
br2_vol_face5 = torch.mm(val_i,torch.mm(invmass,torch.t(val_5)))
br2_vol_face6 = torch.mm(val_i,torch.mm(invmass,torch.t(val_6)))
update_progress("Computing br2_vol ", 1)
# Compute BR2_FACE for each face
br2_face_face1 = torch.mm(val_1,torch.mm(invmass,torch.t(val_1)))
br2_face_face2 = torch.mm(val_2,torch.mm(invmass,torch.t(val_2)))
br2_face_face3 = torch.mm(val_3,torch.mm(invmass,torch.t(val_3)))
br2_face_face4 = torch.mm(val_4,torch.mm(invmass,torch.t(val_4)))
br2_face_face5 = torch.mm(val_5,torch.mm(invmass,torch.t(val_5)))
br2_face_face6 = torch.mm(val_6,torch.mm(invmass,torch.t(val_6)))
update_progress("Computing br2_face ", 1)
# Grad1, Grad2, and Grad3
grad1 = torch.empty(nnodes_ie,nterms_s, dtype=torch.float64)
grad2 = torch.empty(nnodes_ie,nterms_s, dtype=torch.float64)
grad3 = torch.empty(nnodes_ie,nterms_s, dtype=torch.float64)
i = 1
for iterm in range(0,nterms_s):
for inode in range(0,nnodes_ie):
grad1[inode,iterm] = metrics[0,0,inode] * ddxi_i[inode,iterm] + metrics[1,0,inode] * ddeta_i[inode,iterm] + metrics[2,0,inode] * ddzeta_i[inode,iterm]
grad2[inode,iterm] = metrics[0,1,inode] * ddxi_i[inode,iterm] + metrics[1,1,inode] * ddeta_i[inode,iterm] + metrics[2,1,inode] * ddzeta_i[inode,iterm]
grad3[inode,iterm] = metrics[0,2,inode] * ddxi_i[inode,iterm] + metrics[1,2,inode] * ddeta_i[inode,iterm] + metrics[2,2,inode] * ddzeta_i[inode,iterm]
update_progress("Computing grad1, grad2, grad3 ", i/(nnodes_ie*nterms_s))
i += 1
#WRITE_____________________
#
# Metrics
#
f = open("metrics.txt","w")
i = 1
for inode in range (0,nnodes_ie):
f.write("Metric interpolation node %d \n" % (inode+1))
array = np.zeros([3, 3])
for irow in range(0,3):
for icol in range(0,3):
array[irow,icol] = metrics[irow,icol,inode].item()
update_progress("Writing metrics to file ", i/(nnodes_ie*9))
i += 1
np.savetxt(f,array)
f.close()
#
# jinv
#
f = open("jinv.txt","w")
array = np.zeros([1])
i = 1
for inode in range (0,nnodes_ie):
f.write("Jinv interpolation node %d \n" % (inode+1))
array[0] = jinv[inode].item()
np.savetxt(f,array)
update_progress("Writing jinv to file ", i/(nnodes_ie))
i += 1
f.close()
#
# Grad1
#
f = open("grad1.txt","w")
f.write("Grad1 \n")
array = np.zeros([nnodes_ie,nterms_s])
i = 1
for inode in range (0,nnodes_ie):
for iterm in range(0,nterms_s):
array[inode,iterm] = grad1[inode,iterm].item()
update_progress("Writing grad1 to file ", i/(nnodes_ie*nterms_s))
i += 1
np.savetxt(f,array)
f.close()
#
# Grad2
#
f = open("grad2.txt","w")
f.write("Grad2 \n")
array = np.zeros([nnodes_ie,nterms_s])
i = 1
for inode in range (0,nnodes_ie):
for iterm in range(0,nterms_s):
array[inode,iterm] = grad2[inode,iterm].item()
update_progress("Writing grad2 to file ", i/(nnodes_ie*nterms_s))
i += 1
np.savetxt(f,array)
f.close()
#
# Grad3
#
f = open("grad3.txt","w")
f.write("Grad3 \n")
array = np.zeros([nnodes_ie,nterms_s])
i = 1
for inode in range (0,nnodes_ie):
for iterm in range(0,nterms_s):
array[inode,iterm] = grad3[inode,iterm].item()
update_progress("Writing grad3 to file ", i/(nnodes_ie*nterms_s))
i += 1
np.savetxt(f,array)
f.close()
#
# dmetric_dx
#
f = open("dmetric_dx.txt","w")
i = 1
for inode in range (0,nnodes_ie):
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
array = np.zeros([3,3])
f.write("dmetric_dx interpolation node %s, diff_node %s, diff_dir %s \n" % (inode+1,inode_diff+1,idir+1))
for irow in range(0,3):
for icol in range(0,3):
data = metrics[irow,icol,inode]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dmetric_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*3*3))
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# interp_coords_dx
#
f = open("dinterp_xcoords_dx.txt","w")
i = 1
f.write("xcoord interpolation, coord 1, row=node, col=nnodes_r*dir \n")
array = np.zeros([nnodes_ie,nnodes_r*ndirs])
for inode in range (0,nnodes_ie):
data = interp_coords[inode,0]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
if idir == 0:
index = inode_diff
elif idir == 1:
index = nnodes_r + inode_diff
elif idir == 2:
index = 2*nnodes_r + inode_diff
array[inode,index] = ddata_np[inode_diff,idir]
update_progress("Writing interp_xcoords_dx to file ", i/(nnodes_ie*nnodes_r*3))
i += 1
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
np.savetxt(f,array)
f.close()
f = open("dinterp_ycoords_dx.txt","w")
i = 1
f.write("ycoord interpolation, coord 2, row=node, col=nnodes_r*dir \n")
array = np.zeros([nnodes_ie,nnodes_r*ndirs])
for inode in range (0,nnodes_ie):
data = interp_coords[inode,1]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
if idir == 0:
index = inode_diff
elif idir == 1:
index = nnodes_r + inode_diff
elif idir == 2:
index = 2*nnodes_r + inode_diff
array[inode,index] = ddata_np[inode_diff,idir]
update_progress("Writing interp_ycoords_dx to file ", i/(nnodes_ie*nnodes_r*3))
i += 1
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
np.savetxt(f,array)
f.close()
f = open("dinterp_zcoords_dx.txt","w")
i = 1
f.write("zcoord interpolation, coord 3, row=node, col=nnodes_r*dir \n")
array = np.zeros([nnodes_ie,nnodes_r*ndirs])
for inode in range (0,nnodes_ie):
data = interp_coords[inode,2]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
if idir == 0:
index = inode_diff
elif idir == 1:
index = nnodes_r + inode_diff
elif idir == 2:
index = 2*nnodes_r + inode_diff
array[inode,index] = ddata_np[inode_diff,idir]
update_progress("Writing interp_zcoords_dx to file ", i/(nnodes_ie*nnodes_r*3))
i += 1
# This avoid to accumulate derivatives
dummy = coords.grad.data.zero_()
np.savetxt(f,array)
f.close()
#
# djinv_dx
#
f = open("djinv_dx.txt","w")
i = 1
for inode in range (0,nnodes_ie):
array = np.zeros([nnodes_r,ndirs])
f.write("djinv_dx interpolation node %s, row=inode_diff, col=dir \n" % (inode+1))
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
data = jinv[inode]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[inode_diff,idir] = ddata_np[inode_diff,idir]
update_progress("Writing djinv_dx to file ", i/(nnodes_ie*nnodes_r*ndirs))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dmass_dx
#
f = open("dmass_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dmass_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nterms_s,nterms_s])
for irow in range(0,nterms_s):
for icol in range(0,nterms_s):
data = mass[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dmass_dx to file ", i/(nterms_s*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dinvmass_dx
#
f = open("dinvmass_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dinvmass_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nterms_s,nterms_s])
for irow in range(0,nterms_s):
for icol in range(0,nterms_s):
data = invmass[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dinvmass_dx to file ", i/(nterms_s*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dbr2_vol_dx
#
#
f = open("dbr2_vol_face1_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face1_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face1[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face1_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face2_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face2_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face2[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face2_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face3_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face3_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face3[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face3_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face4_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face4_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face4[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face4_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face5_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face5_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face5[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face5_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_vol_face6_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_vol_face6_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nnodes_if])
for irow in range(0,nnodes_ie):
for icol in range(0,nnodes_if):
data = br2_vol_face6[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_vol_face6_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dbr2_face_dx
#
#
f = open("dbr2_face_face1_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face1_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face1[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face1_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face2_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face2_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face2[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face2_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face3_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face3_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face3[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face3_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face4_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face4_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face4[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face4_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face5_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face5_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face5[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face5_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
f = open("dbr2_face_face6_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dbr2_face_face6_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_if,nnodes_if])
for irow in range(0,nnodes_if):
for icol in range(0,nnodes_if):
data = br2_face_face6[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dbr2_face_face6_dx to file ", i/(nnodes_if*nnodes_r*ndirs*nnodes_if))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dgrad1_dx
#
f = open("dgrad1_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dgrad1_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nterms_s])
for irow in range(0,nnodes_ie):
for icol in range(0,nterms_s):
data = grad1[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dgrad1_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dgrad2_dx
#
f = open("dgrad2_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dgrad2_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nterms_s])
for irow in range(0,nnodes_ie):
for icol in range(0,nterms_s):
data = grad2[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dgrad2_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
#
# dgrad3_dx
#
f = open("dgrad3_dx.txt","w")
i = 1
for inode_diff in range(0,nnodes_r):
for idir in range(0,ndirs):
f.write("dgrad3_dx => diff_node %s, diff_dir %s \n" % (inode_diff+1,idir+1))
array = np.zeros([nnodes_ie,nterms_s])
for irow in range(0,nnodes_ie):
for icol in range(0,nterms_s):
data = grad3[irow,icol]
data.backward(retain_graph=True)
ddata = coords.grad
ddata_np = ddata.numpy()
array[irow,icol] = ddata_np[inode_diff,idir]
update_progress("Writing dgrad3_dx to file ", i/(nnodes_ie*nnodes_r*ndirs*nterms_s))
dummy = coords.grad.data.zero_()
i += 1
np.savetxt(f,array)
f.close()
|
[
"numpy.sqrt",
"torch.det",
"torch.t",
"torch.mm",
"torch.tensor",
"numpy.zeros",
"numpy.savetxt",
"torch.dot",
"os.system",
"sys.stdout.flush",
"torch.empty",
"torch.inverse",
"sys.stdout.write"
] |
[((1336, 1402), 'torch.tensor', 'torch.tensor', (['coords_data'], {'requires_grad': '(True)', 'dtype': 'torch.float64'}), '(coords_data, requires_grad=True, dtype=torch.float64)\n', (1348, 1402), False, 'import torch\n'), ((2061, 2127), 'torch.tensor', 'torch.tensor', (['val_r_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_r_data, requires_grad=False, dtype=torch.float64)\n', (2073, 2127), False, 'import torch\n'), ((3249, 3315), 'torch.tensor', 'torch.tensor', (['val_i_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_i_data, requires_grad=False, dtype=torch.float64)\n', (3261, 3315), False, 'import torch\n'), ((4023, 4090), 'torch.tensor', 'torch.tensor', (['ddxi_i_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(ddxi_i_data, requires_grad=False, dtype=torch.float64)\n', (4035, 4090), False, 'import torch\n'), ((4798, 4866), 'torch.tensor', 'torch.tensor', (['ddeta_i_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(ddeta_i_data, requires_grad=False, dtype=torch.float64)\n', (4810, 4866), False, 'import torch\n'), ((5575, 5644), 'torch.tensor', 'torch.tensor', (['ddzeta_i_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(ddzeta_i_data, requires_grad=False, dtype=torch.float64)\n', (5587, 5644), False, 'import torch\n'), ((5772, 5842), 'torch.tensor', 'torch.tensor', (['weights_e_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(weights_e_data, requires_grad=False, dtype=torch.float64)\n', (5784, 5842), False, 'import torch\n'), ((6387, 6453), 'torch.tensor', 'torch.tensor', (['val_1_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_1_data, requires_grad=False, dtype=torch.float64)\n', (6399, 6453), False, 'import torch\n'), ((6969, 7035), 'torch.tensor', 'torch.tensor', (['val_2_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_2_data, requires_grad=False, dtype=torch.float64)\n', (6981, 7035), False, 'import torch\n'), ((7556, 7622), 'torch.tensor', 'torch.tensor', (['val_3_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_3_data, requires_grad=False, dtype=torch.float64)\n', (7568, 7622), False, 'import torch\n'), ((8138, 8204), 'torch.tensor', 'torch.tensor', (['val_4_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_4_data, requires_grad=False, dtype=torch.float64)\n', (8150, 8204), False, 'import torch\n'), ((8725, 8791), 'torch.tensor', 'torch.tensor', (['val_5_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_5_data, requires_grad=False, dtype=torch.float64)\n', (8737, 8791), False, 'import torch\n'), ((9308, 9374), 'torch.tensor', 'torch.tensor', (['val_6_data'], {'requires_grad': '(False)', 'dtype': 'torch.float64'}), '(val_6_data, requires_grad=False, dtype=torch.float64)\n', (9320, 9374), False, 'import torch\n'), ((9485, 9505), 'torch.inverse', 'torch.inverse', (['val_r'], {}), '(val_r)\n', (9498, 9505), False, 'import torch\n'), ((9552, 9579), 'torch.mm', 'torch.mm', (['val_r_inv', 'coords'], {}), '(val_r_inv, coords)\n', (9560, 9579), False, 'import torch\n'), ((9623, 9652), 'torch.mm', 'torch.mm', (['val_i', 'coords_modes'], {}), '(val_i, coords_modes)\n', (9631, 9652), False, 'import torch\n'), ((9688, 9737), 'torch.empty', 'torch.empty', (['(3)', '(3)', 'nnodes_ie'], {'dtype': 'torch.float64'}), '(3, 3, nnodes_ie, dtype=torch.float64)\n', (9699, 9737), False, 'import torch\n'), ((10909, 10958), 'torch.empty', 'torch.empty', (['(3)', '(3)', 'nnodes_ie'], {'dtype': 'torch.float64'}), '(3, 3, nnodes_ie, dtype=torch.float64)\n', (10920, 10958), False, 'import torch\n'), ((10968, 11011), 'torch.empty', 'torch.empty', (['nnodes_ie'], {'dtype': 'torch.float64'}), '(nnodes_ie, dtype=torch.float64)\n', (10979, 11011), False, 'import torch\n'), ((11639, 11702), 'torch.empty', 'torch.empty', (['nterms_s', 'nterms_s', 'nnodes_ie'], {'dtype': 'torch.float64'}), '(nterms_s, nterms_s, nnodes_ie, dtype=torch.float64)\n', (11650, 11702), False, 'import torch\n'), ((11711, 11774), 'torch.empty', 'torch.empty', (['nterms_s', 'nterms_s', 'nnodes_ie'], {'dtype': 'torch.float64'}), '(nterms_s, nterms_s, nnodes_ie, dtype=torch.float64)\n', (11722, 11774), False, 'import torch\n'), ((11783, 11836), 'torch.empty', 'torch.empty', (['nterms_s', 'nnodes_ie'], {'dtype': 'torch.float64'}), '(nterms_s, nnodes_ie, dtype=torch.float64)\n', (11794, 11836), False, 'import torch\n'), ((12149, 12168), 'torch.inverse', 'torch.inverse', (['mass'], {}), '(mass)\n', (12162, 12168), False, 'import torch\n'), ((13176, 13229), 'torch.empty', 'torch.empty', (['nnodes_ie', 'nterms_s'], {'dtype': 'torch.float64'}), '(nnodes_ie, nterms_s, dtype=torch.float64)\n', (13187, 13229), False, 'import torch\n'), ((13237, 13290), 'torch.empty', 'torch.empty', (['nnodes_ie', 'nterms_s'], {'dtype': 'torch.float64'}), '(nnodes_ie, nterms_s, dtype=torch.float64)\n', (13248, 13290), False, 'import torch\n'), ((13298, 13351), 'torch.empty', 'torch.empty', (['nnodes_ie', 'nterms_s'], {'dtype': 'torch.float64'}), '(nnodes_ie, nterms_s, dtype=torch.float64)\n', (13309, 13351), False, 'import torch\n'), ((14520, 14533), 'numpy.zeros', 'np.zeros', (['[1]'], {}), '([1])\n', (14528, 14533), True, 'import numpy as np\n'), ((14851, 14882), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nterms_s]'], {}), '([nnodes_ie, nterms_s])\n', (14859, 14882), True, 'import numpy as np\n'), ((15115, 15135), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (15125, 15135), True, 'import numpy as np\n'), ((15212, 15243), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nterms_s]'], {}), '([nnodes_ie, nterms_s])\n', (15220, 15243), True, 'import numpy as np\n'), ((15476, 15496), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (15486, 15496), True, 'import numpy as np\n'), ((15573, 15604), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nterms_s]'], {}), '([nnodes_ie, nterms_s])\n', (15581, 15604), True, 'import numpy as np\n'), ((15837, 15857), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (15847, 15857), True, 'import numpy as np\n'), ((16958, 16997), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_r * ndirs]'], {}), '([nnodes_ie, nnodes_r * ndirs])\n', (16966, 16997), True, 'import numpy as np\n'), ((17692, 17712), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (17702, 17712), True, 'import numpy as np\n'), ((17848, 17887), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_r * ndirs]'], {}), '([nnodes_ie, nnodes_r * ndirs])\n', (17856, 17887), True, 'import numpy as np\n'), ((18582, 18602), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (18592, 18602), True, 'import numpy as np\n'), ((18738, 18777), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_r * ndirs]'], {}), '([nnodes_ie, nnodes_r * ndirs])\n', (18746, 18777), True, 'import numpy as np\n'), ((19472, 19492), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (19482, 19492), True, 'import numpy as np\n'), ((577, 598), 'sys.stdout.write', 'sys.stdout.write', (['msg'], {}), '(msg)\n', (593, 598), False, 'import sys\n'), ((603, 621), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (619, 621), False, 'import sys\n'), ((638, 686), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (647, 686), False, 'import os\n'), ((9796, 9843), 'torch.dot', 'torch.dot', (['ddxi_i[inode, :]', 'coords_modes[:, 0]'], {}), '(ddxi_i[inode, :], coords_modes[:, 0])\n', (9805, 9843), False, 'import torch\n'), ((9871, 9919), 'torch.dot', 'torch.dot', (['ddeta_i[inode, :]', 'coords_modes[:, 0]'], {}), '(ddeta_i[inode, :], coords_modes[:, 0])\n', (9880, 9919), False, 'import torch\n'), ((9946, 9995), 'torch.dot', 'torch.dot', (['ddzeta_i[inode, :]', 'coords_modes[:, 0]'], {}), '(ddzeta_i[inode, :], coords_modes[:, 0])\n', (9955, 9995), False, 'import torch\n'), ((10021, 10068), 'torch.dot', 'torch.dot', (['ddxi_i[inode, :]', 'coords_modes[:, 1]'], {}), '(ddxi_i[inode, :], coords_modes[:, 1])\n', (10030, 10068), False, 'import torch\n'), ((10096, 10144), 'torch.dot', 'torch.dot', (['ddeta_i[inode, :]', 'coords_modes[:, 1]'], {}), '(ddeta_i[inode, :], coords_modes[:, 1])\n', (10105, 10144), False, 'import torch\n'), ((10171, 10220), 'torch.dot', 'torch.dot', (['ddzeta_i[inode, :]', 'coords_modes[:, 1]'], {}), '(ddzeta_i[inode, :], coords_modes[:, 1])\n', (10180, 10220), False, 'import torch\n'), ((10246, 10293), 'torch.dot', 'torch.dot', (['ddxi_i[inode, :]', 'coords_modes[:, 2]'], {}), '(ddxi_i[inode, :], coords_modes[:, 2])\n', (10255, 10293), False, 'import torch\n'), ((10321, 10369), 'torch.dot', 'torch.dot', (['ddeta_i[inode, :]', 'coords_modes[:, 2]'], {}), '(ddeta_i[inode, :], coords_modes[:, 2])\n', (10330, 10369), False, 'import torch\n'), ((10396, 10445), 'torch.dot', 'torch.dot', (['ddzeta_i[inode, :]', 'coords_modes[:, 2]'], {}), '(ddzeta_i[inode, :], coords_modes[:, 2])\n', (10405, 10445), False, 'import torch\n'), ((10577, 10612), 'torch.mm', 'torch.mm', (['val_i', 'coords_modes[:, 0]'], {}), '(val_i, coords_modes[:, 0])\n', (10585, 10612), False, 'import torch\n'), ((11062, 11100), 'torch.empty', 'torch.empty', (['(3)', '(3)'], {'dtype': 'torch.float64'}), '(3, 3, dtype=torch.float64)\n', (11073, 11100), False, 'import torch\n'), ((11116, 11154), 'torch.empty', 'torch.empty', (['(3)', '(3)'], {'dtype': 'torch.float64'}), '(3, 3, dtype=torch.float64)\n', (11127, 11154), False, 'import torch\n'), ((11413, 11433), 'torch.det', 'torch.det', (['ijacobian'], {}), '(ijacobian)\n', (11422, 11433), False, 'import torch\n'), ((11453, 11477), 'torch.inverse', 'torch.inverse', (['ijacobian'], {}), '(ijacobian)\n', (11466, 11477), False, 'import torch\n'), ((12115, 12131), 'torch.t', 'torch.t', (['val_tmp'], {}), '(val_tmp)\n', (12122, 12131), False, 'import torch\n'), ((14197, 14213), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (14205, 14213), True, 'import numpy as np\n'), ((14445, 14465), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (14455, 14465), True, 'import numpy as np\n'), ((14669, 14689), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (14679, 14689), True, 'import numpy as np\n'), ((19599, 19626), 'numpy.zeros', 'np.zeros', (['[nnodes_r, ndirs]'], {}), '([nnodes_r, ndirs])\n', (19607, 19626), True, 'import numpy as np\n'), ((20194, 20214), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (20204, 20214), True, 'import numpy as np\n'), ((2379, 2397), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2386, 2397), True, 'import numpy as np\n'), ((2472, 2490), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2479, 2490), True, 'import numpy as np\n'), ((2601, 2619), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2608, 2619), True, 'import numpy as np\n'), ((2637, 2655), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2644, 2655), True, 'import numpy as np\n'), ((2748, 2766), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2755, 2766), True, 'import numpy as np\n'), ((2877, 2895), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2884, 2895), True, 'import numpy as np\n'), ((2895, 2913), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2902, 2913), True, 'import numpy as np\n'), ((2988, 3006), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2995, 3006), True, 'import numpy as np\n'), ((3006, 3024), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3013, 3024), True, 'import numpy as np\n'), ((3117, 3135), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3124, 3135), True, 'import numpy as np\n'), ((3135, 3153), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3142, 3153), True, 'import numpy as np\n'), ((3153, 3171), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3160, 3171), True, 'import numpy as np\n'), ((3521, 3539), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3528, 3539), True, 'import numpy as np\n'), ((3606, 3624), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3613, 3624), True, 'import numpy as np\n'), ((3709, 3727), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3716, 3727), True, 'import numpy as np\n'), ((3794, 3812), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3801, 3812), True, 'import numpy as np\n'), ((3861, 3879), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3868, 3879), True, 'import numpy as np\n'), ((3879, 3897), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3886, 3897), True, 'import numpy as np\n'), ((3946, 3964), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3953, 3964), True, 'import numpy as np\n'), ((3964, 3982), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3971, 3982), True, 'import numpy as np\n'), ((4210, 4228), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4217, 4228), True, 'import numpy as np\n'), ((4380, 4398), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4387, 4398), True, 'import numpy as np\n'), ((4487, 4505), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4494, 4505), True, 'import numpy as np\n'), ((4550, 4568), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4557, 4568), True, 'import numpy as np\n'), ((4572, 4590), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4579, 4590), True, 'import numpy as np\n'), ((4657, 4675), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4664, 4675), True, 'import numpy as np\n'), ((4720, 4738), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4727, 4738), True, 'import numpy as np\n'), ((4742, 4760), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4749, 4760), True, 'import numpy as np\n'), ((4990, 5008), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4997, 5008), True, 'import numpy as np\n'), ((5093, 5111), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5100, 5111), True, 'import numpy as np\n'), ((5160, 5178), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5167, 5178), True, 'import numpy as np\n'), ((5178, 5196), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5185, 5196), True, 'import numpy as np\n'), ((5330, 5348), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5337, 5348), True, 'import numpy as np\n'), ((5433, 5451), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5440, 5451), True, 'import numpy as np\n'), ((5500, 5518), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5507, 5518), True, 'import numpy as np\n'), ((5518, 5536), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5525, 5536), True, 'import numpy as np\n'), ((5953, 5971), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5960, 5971), True, 'import numpy as np\n'), ((5971, 5989), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5978, 5989), True, 'import numpy as np\n'), ((6030, 6048), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6037, 6048), True, 'import numpy as np\n'), ((6089, 6107), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6096, 6107), True, 'import numpy as np\n'), ((6166, 6184), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6173, 6184), True, 'import numpy as np\n'), ((6189, 6207), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6196, 6207), True, 'import numpy as np\n'), ((6266, 6284), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6273, 6284), True, 'import numpy as np\n'), ((6284, 6302), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6291, 6302), True, 'import numpy as np\n'), ((6611, 6629), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6618, 6629), True, 'import numpy as np\n'), ((6651, 6669), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6658, 6669), True, 'import numpy as np\n'), ((6747, 6765), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6754, 6765), True, 'import numpy as np\n'), ((6787, 6805), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6794, 6805), True, 'import numpy as np\n'), ((6847, 6865), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6854, 6865), True, 'import numpy as np\n'), ((6865, 6883), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6872, 6883), True, 'import numpy as np\n'), ((6887, 6905), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6894, 6905), True, 'import numpy as np\n'), ((6905, 6923), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6912, 6923), True, 'import numpy as np\n'), ((7117, 7135), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7124, 7135), True, 'import numpy as np\n'), ((7144, 7162), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7151, 7162), True, 'import numpy as np\n'), ((7218, 7236), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7225, 7236), True, 'import numpy as np\n'), ((7263, 7281), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7270, 7281), True, 'import numpy as np\n'), ((7319, 7337), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7326, 7337), True, 'import numpy as np\n'), ((7355, 7373), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7362, 7373), True, 'import numpy as np\n'), ((7438, 7456), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7445, 7456), True, 'import numpy as np\n'), ((7456, 7474), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7463, 7474), True, 'import numpy as np\n'), ((7803, 7821), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7810, 7821), True, 'import numpy as np\n'), ((7821, 7839), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7828, 7839), True, 'import numpy as np\n'), ((7903, 7921), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7910, 7921), True, 'import numpy as np\n'), ((7966, 7984), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7973, 7984), True, 'import numpy as np\n'), ((8021, 8039), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8028, 8039), True, 'import numpy as np\n'), ((8039, 8057), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8046, 8057), True, 'import numpy as np\n'), ((8057, 8075), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8064, 8075), True, 'import numpy as np\n'), ((8084, 8102), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8091, 8102), True, 'import numpy as np\n'), ((8296, 8314), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8303, 8314), True, 'import numpy as np\n'), ((8314, 8332), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8321, 8332), True, 'import numpy as np\n'), ((8388, 8406), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8395, 8406), True, 'import numpy as np\n'), ((8433, 8451), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8440, 8451), True, 'import numpy as np\n'), ((8484, 8502), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8491, 8502), True, 'import numpy as np\n'), ((8534, 8552), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8541, 8552), True, 'import numpy as np\n'), ((8603, 8621), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8610, 8621), True, 'import numpy as np\n'), ((8626, 8644), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8633, 8644), True, 'import numpy as np\n'), ((8973, 8991), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8980, 8991), True, 'import numpy as np\n'), ((9000, 9018), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9007, 9018), True, 'import numpy as np\n'), ((9069, 9087), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9076, 9087), True, 'import numpy as np\n'), ((9136, 9154), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9143, 9154), True, 'import numpy as np\n'), ((9187, 9205), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9194, 9205), True, 'import numpy as np\n'), ((9209, 9227), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9216, 9227), True, 'import numpy as np\n'), ((9236, 9254), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9243, 9254), True, 'import numpy as np\n'), ((9254, 9272), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9261, 9272), True, 'import numpy as np\n'), ((12251, 12265), 'torch.t', 'torch.t', (['val_1'], {}), '(val_1)\n', (12258, 12265), False, 'import torch\n'), ((12316, 12330), 'torch.t', 'torch.t', (['val_2'], {}), '(val_2)\n', (12323, 12330), False, 'import torch\n'), ((12381, 12395), 'torch.t', 'torch.t', (['val_3'], {}), '(val_3)\n', (12388, 12395), False, 'import torch\n'), ((12446, 12460), 'torch.t', 'torch.t', (['val_4'], {}), '(val_4)\n', (12453, 12460), False, 'import torch\n'), ((12511, 12525), 'torch.t', 'torch.t', (['val_5'], {}), '(val_5)\n', (12518, 12525), False, 'import torch\n'), ((12576, 12590), 'torch.t', 'torch.t', (['val_6'], {}), '(val_6)\n', (12583, 12590), False, 'import torch\n'), ((12735, 12749), 'torch.t', 'torch.t', (['val_1'], {}), '(val_1)\n', (12742, 12749), False, 'import torch\n'), ((12801, 12815), 'torch.t', 'torch.t', (['val_2'], {}), '(val_2)\n', (12808, 12815), False, 'import torch\n'), ((12867, 12881), 'torch.t', 'torch.t', (['val_3'], {}), '(val_3)\n', (12874, 12881), False, 'import torch\n'), ((12933, 12947), 'torch.t', 'torch.t', (['val_4'], {}), '(val_4)\n', (12940, 12947), False, 'import torch\n'), ((12999, 13013), 'torch.t', 'torch.t', (['val_5'], {}), '(val_5)\n', (13006, 13013), False, 'import torch\n'), ((13065, 13079), 'torch.t', 'torch.t', (['val_6'], {}), '(val_6)\n', (13072, 13079), False, 'import torch\n'), ((20445, 20475), 'numpy.zeros', 'np.zeros', (['[nterms_s, nterms_s]'], {}), '([nterms_s, nterms_s])\n', (20453, 20475), True, 'import numpy as np\n'), ((20972, 20992), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (20982, 20992), True, 'import numpy as np\n'), ((21232, 21262), 'numpy.zeros', 'np.zeros', (['[nterms_s, nterms_s]'], {}), '([nterms_s, nterms_s])\n', (21240, 21262), True, 'import numpy as np\n'), ((21762, 21782), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (21772, 21782), True, 'import numpy as np\n'), ((22036, 22068), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_if]'], {}), '([nnodes_ie, nnodes_if])\n', (22044, 22068), True, 'import numpy as np\n'), ((22578, 22598), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (22588, 22598), True, 'import numpy as np\n'), ((22830, 22862), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_if]'], {}), '([nnodes_ie, nnodes_if])\n', (22838, 22862), True, 'import numpy as np\n'), ((23372, 23392), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (23382, 23392), True, 'import numpy as np\n'), ((23624, 23656), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_if]'], {}), '([nnodes_ie, nnodes_if])\n', (23632, 23656), True, 'import numpy as np\n'), ((24166, 24186), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (24176, 24186), True, 'import numpy as np\n'), ((24418, 24450), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_if]'], {}), '([nnodes_ie, nnodes_if])\n', (24426, 24450), True, 'import numpy as np\n'), ((24960, 24980), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (24970, 24980), True, 'import numpy as np\n'), ((25212, 25244), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_if]'], {}), '([nnodes_ie, nnodes_if])\n', (25220, 25244), True, 'import numpy as np\n'), ((25754, 25774), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (25764, 25774), True, 'import numpy as np\n'), ((26006, 26038), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nnodes_if]'], {}), '([nnodes_ie, nnodes_if])\n', (26014, 26038), True, 'import numpy as np\n'), ((26548, 26568), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (26558, 26568), True, 'import numpy as np\n'), ((26825, 26857), 'numpy.zeros', 'np.zeros', (['[nnodes_if, nnodes_if]'], {}), '([nnodes_if, nnodes_if])\n', (26833, 26857), True, 'import numpy as np\n'), ((27368, 27388), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (27378, 27388), True, 'import numpy as np\n'), ((27622, 27654), 'numpy.zeros', 'np.zeros', (['[nnodes_if, nnodes_if]'], {}), '([nnodes_if, nnodes_if])\n', (27630, 27654), True, 'import numpy as np\n'), ((28165, 28185), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (28175, 28185), True, 'import numpy as np\n'), ((28419, 28451), 'numpy.zeros', 'np.zeros', (['[nnodes_if, nnodes_if]'], {}), '([nnodes_if, nnodes_if])\n', (28427, 28451), True, 'import numpy as np\n'), ((28962, 28982), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (28972, 28982), True, 'import numpy as np\n'), ((29216, 29248), 'numpy.zeros', 'np.zeros', (['[nnodes_if, nnodes_if]'], {}), '([nnodes_if, nnodes_if])\n', (29224, 29248), True, 'import numpy as np\n'), ((29759, 29779), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (29769, 29779), True, 'import numpy as np\n'), ((30013, 30045), 'numpy.zeros', 'np.zeros', (['[nnodes_if, nnodes_if]'], {}), '([nnodes_if, nnodes_if])\n', (30021, 30045), True, 'import numpy as np\n'), ((30556, 30576), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (30566, 30576), True, 'import numpy as np\n'), ((30810, 30842), 'numpy.zeros', 'np.zeros', (['[nnodes_if, nnodes_if]'], {}), '([nnodes_if, nnodes_if])\n', (30818, 30842), True, 'import numpy as np\n'), ((31353, 31373), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (31363, 31373), True, 'import numpy as np\n'), ((31606, 31637), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nterms_s]'], {}), '([nnodes_ie, nterms_s])\n', (31614, 31637), True, 'import numpy as np\n'), ((32137, 32157), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (32147, 32157), True, 'import numpy as np\n'), ((32390, 32421), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nterms_s]'], {}), '([nnodes_ie, nterms_s])\n', (32398, 32421), True, 'import numpy as np\n'), ((32921, 32941), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (32931, 32941), True, 'import numpy as np\n'), ((33174, 33205), 'numpy.zeros', 'np.zeros', (['[nnodes_ie, nterms_s]'], {}), '([nnodes_ie, nterms_s])\n', (33182, 33205), True, 'import numpy as np\n'), ((33705, 33725), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (33715, 33725), True, 'import numpy as np\n'), ((2214, 2232), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2221, 2232), True, 'import numpy as np\n'), ((2232, 2250), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2239, 2250), True, 'import numpy as np\n'), ((2250, 2268), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2257, 2268), True, 'import numpy as np\n'), ((2303, 2321), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2310, 2321), True, 'import numpy as np\n'), ((2343, 2361), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2350, 2361), True, 'import numpy as np\n'), ((2361, 2379), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2368, 2379), True, 'import numpy as np\n'), ((2432, 2450), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2439, 2450), True, 'import numpy as np\n'), ((2490, 2508), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2497, 2508), True, 'import numpy as np\n'), ((2508, 2526), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2515, 2526), True, 'import numpy as np\n'), ((2561, 2579), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2568, 2579), True, 'import numpy as np\n'), ((2619, 2637), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2626, 2637), True, 'import numpy as np\n'), ((2690, 2708), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2697, 2708), True, 'import numpy as np\n'), ((2730, 2748), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2737, 2748), True, 'import numpy as np\n'), ((2766, 2784), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2773, 2784), True, 'import numpy as np\n'), ((2819, 2837), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2826, 2837), True, 'import numpy as np\n'), ((2859, 2877), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2866, 2877), True, 'import numpy as np\n'), ((2948, 2966), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (2955, 2966), True, 'import numpy as np\n'), ((3024, 3042), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3031, 3042), True, 'import numpy as np\n'), ((3077, 3095), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3084, 3095), True, 'import numpy as np\n'), ((3206, 3224), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3213, 3224), True, 'import numpy as np\n'), ((3351, 3369), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3358, 3369), True, 'import numpy as np\n'), ((3369, 3387), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3376, 3387), True, 'import numpy as np\n'), ((3436, 3454), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3443, 3454), True, 'import numpy as np\n'), ((3454, 3472), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3461, 3472), True, 'import numpy as np\n'), ((3539, 3557), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3546, 3557), True, 'import numpy as np\n'), ((3624, 3642), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3631, 3642), True, 'import numpy as np\n'), ((3691, 3709), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3698, 3709), True, 'import numpy as np\n'), ((3776, 3794), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (3783, 3794), True, 'import numpy as np\n'), ((4125, 4143), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4132, 4143), True, 'import numpy as np\n'), ((4147, 4165), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4154, 4165), True, 'import numpy as np\n'), ((4232, 4250), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4239, 4250), True, 'import numpy as np\n'), ((4295, 4313), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4302, 4313), True, 'import numpy as np\n'), ((4317, 4335), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4324, 4335), True, 'import numpy as np\n'), ((4402, 4420), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4409, 4420), True, 'import numpy as np\n'), ((4465, 4483), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4472, 4483), True, 'import numpy as np\n'), ((4635, 4653), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4642, 4653), True, 'import numpy as np\n'), ((4905, 4923), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4912, 4923), True, 'import numpy as np\n'), ((4923, 4941), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (4930, 4941), True, 'import numpy as np\n'), ((5008, 5026), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5015, 5026), True, 'import numpy as np\n'), ((5075, 5093), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5082, 5093), True, 'import numpy as np\n'), ((5245, 5263), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5252, 5263), True, 'import numpy as np\n'), ((5263, 5281), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5270, 5281), True, 'import numpy as np\n'), ((5348, 5366), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5355, 5366), True, 'import numpy as np\n'), ((5415, 5433), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5422, 5433), True, 'import numpy as np\n'), ((5912, 5930), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5919, 5930), True, 'import numpy as np\n'), ((5930, 5948), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (5937, 5948), True, 'import numpy as np\n'), ((6048, 6066), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6055, 6066), True, 'import numpy as np\n'), ((6071, 6089), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6078, 6089), True, 'import numpy as np\n'), ((6148, 6166), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6155, 6166), True, 'import numpy as np\n'), ((6207, 6225), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6214, 6225), True, 'import numpy as np\n'), ((6307, 6325), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6314, 6325), True, 'import numpy as np\n'), ((6325, 6343), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6332, 6343), True, 'import numpy as np\n'), ((6493, 6511), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6500, 6511), True, 'import numpy as np\n'), ((6511, 6529), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6518, 6529), True, 'import numpy as np\n'), ((6533, 6551), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6540, 6551), True, 'import numpy as np\n'), ((6551, 6569), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6558, 6569), True, 'import numpy as np\n'), ((6629, 6647), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6636, 6647), True, 'import numpy as np\n'), ((6669, 6687), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6676, 6687), True, 'import numpy as np\n'), ((6729, 6747), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6736, 6747), True, 'import numpy as np\n'), ((6769, 6787), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (6776, 6787), True, 'import numpy as np\n'), ((7081, 7099), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7088, 7099), True, 'import numpy as np\n'), ((7099, 7117), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7106, 7117), True, 'import numpy as np\n'), ((7200, 7218), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7207, 7218), True, 'import numpy as np\n'), ((7236, 7254), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7243, 7254), True, 'import numpy as np\n'), ((7337, 7355), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7344, 7355), True, 'import numpy as np\n'), ((7382, 7400), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7389, 7400), True, 'import numpy as np\n'), ((7474, 7492), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7481, 7492), True, 'import numpy as np\n'), ((7501, 7519), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7508, 7519), True, 'import numpy as np\n'), ((7667, 7685), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7674, 7685), True, 'import numpy as np\n'), ((7685, 7703), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7692, 7703), True, 'import numpy as np\n'), ((7703, 7721), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7710, 7721), True, 'import numpy as np\n'), ((7730, 7748), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7737, 7748), True, 'import numpy as np\n'), ((7785, 7803), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7792, 7803), True, 'import numpy as np\n'), ((7848, 7866), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7855, 7866), True, 'import numpy as np\n'), ((7921, 7939), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7928, 7939), True, 'import numpy as np\n'), ((7939, 7957), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (7946, 7957), True, 'import numpy as np\n'), ((8246, 8264), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8253, 8264), True, 'import numpy as np\n'), ((8269, 8287), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8276, 8287), True, 'import numpy as np\n'), ((8365, 8383), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8372, 8383), True, 'import numpy as np\n'), ((8415, 8433), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8422, 8433), True, 'import numpy as np\n'), ((8507, 8525), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8514, 8525), True, 'import numpy as np\n'), ((8552, 8570), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8559, 8570), True, 'import numpy as np\n'), ((8653, 8671), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8660, 8671), True, 'import numpy as np\n'), ((8671, 8689), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8678, 8689), True, 'import numpy as np\n'), ((8833, 8851), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8840, 8851), True, 'import numpy as np\n'), ((8855, 8873), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8862, 8873), True, 'import numpy as np\n'), ((8882, 8900), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8889, 8900), True, 'import numpy as np\n'), ((8900, 8918), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8907, 8918), True, 'import numpy as np\n'), ((8951, 8969), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (8958, 8969), True, 'import numpy as np\n'), ((9018, 9036), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9025, 9036), True, 'import numpy as np\n'), ((9091, 9109), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9098, 9109), True, 'import numpy as np\n'), ((9118, 9136), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3.0)'], {}), '(1.0 / 3.0)\n', (9125, 9136), True, 'import numpy as np\n'), ((16053, 16069), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (16061, 16069), True, 'import numpy as np\n'), ((16778, 16798), 'numpy.savetxt', 'np.savetxt', (['f', 'array'], {}), '(f, array)\n', (16788, 16798), True, 'import numpy as np\n')]
|
import logging
import os
import random
import string
import time
import unittest
import neurolib.utils.paths as paths
import neurolib.utils.pypetUtils as pu
import numpy as np
import pytest
import xarray as xr
from neurolib.models.aln import ALNModel
from neurolib.models.fhn import FHNModel
from neurolib.models.multimodel import MultiModel
from neurolib.models.multimodel.builder.fitzhugh_nagumo import FitzHughNagumoNetwork
from neurolib.optimize.exploration import BoxSearch
from neurolib.utils.loadData import Dataset
from neurolib.utils.parameterSpace import ParameterSpace
def randomString(stringLength=10):
"""Generate a random string of fixed length"""
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(stringLength))
class TestBoxSearch(unittest.TestCase):
"""
Basic tests.
"""
def test_assertions(self):
parameters = ParameterSpace(
{"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)}, kind="sequence"
)
with pytest.raises(AssertionError):
_ = BoxSearch(model=None, parameterSpace=parameters)
with pytest.raises(AssertionError):
_ = BoxSearch(model=None, parameterSpace=None)
with pytest.raises(AssertionError):
_ = BoxSearch(model=None, parameterSpace=parameters, evalFunction=None)
def test_fillin_default_parameters_for_sequential(self):
in_dict = {"a": [None, None, 1, 2], "b": [4, 5, None, None]}
SHOULD_BE = {"a": [0, 0, 1, 2], "b": [4, 5, 12, 12]}
model_params = {"a": 0, "b": 12}
parameters = ParameterSpace({"mue_ext_mean": [1.0, 2.0]})
search = BoxSearch(model=ALNModel(), parameterSpace=parameters)
out_dict = search._fillin_default_parameters_for_sequential(in_dict, model_params)
self.assertDictEqual(out_dict, SHOULD_BE)
class TestExplorationSingleNode(unittest.TestCase):
"""
ALN single node exploration.
"""
def test_single_node(self):
start = time.time()
model = ALNModel()
parameters = ParameterSpace({"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)})
search = BoxSearch(model, parameters, filename="test_single_nodes.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertFalse(dataarray.attrs)
for i in search.dfResults.index:
search.dfResults.loc[i, "max_r"] = np.max(
search.results[i]["rates_exc"][:, -int(1000 / model.params["dt"]) :]
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestExplorationBrainNetwork(unittest.TestCase):
"""
FHN brain network simulation with BOLD simulation.
"""
def test_fhn_brain_network_exploration(self):
ds = Dataset("hcp")
model = FHNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
model.params.duration = 10 * 1000 # ms
model.params.dt = 0.2
model.params.bold = True
parameters = ParameterSpace(
{
"x_ext": [np.ones((model.params["N"],)) * a for a in np.linspace(0, 2, 2)],
"K_gl": np.linspace(0, 2, 2),
"coupling": ["additive", "diffusive"],
},
kind="grid",
)
search = BoxSearch(model=model, parameterSpace=parameters, filename="test_fhn_brain_network_exploration.hdf")
search.run(chunkwise=True, bold=True)
pu.getTrajectorynamesInFile(os.path.join(paths.HDF_DIR, "test_fhn_brain_network_exploration.hdf"))
search.loadDfResults()
search.getRun(0, pypetShortNames=True)
search.getRun(0, pypetShortNames=False)
search.loadResults()
# firing rate xr
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertFalse(dataarray.attrs)
# bold xr
dataarray = search.xr(bold=True)
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertFalse(dataarray.attrs)
search.info()
class TestExplorationBrainNetworkPostprocessing(unittest.TestCase):
"""
ALN brain network simulation with custom evaluation function.
"""
@classmethod
def setUpClass(cls):
# def test_brain_network_postprocessing(self):
ds = Dataset("hcp")
model = ALNModel(Cmat=ds.Cmat, Dmat=ds.Dmat)
# Resting state fits
model.params["mue_ext_mean"] = 1.57
model.params["mui_ext_mean"] = 1.6
model.params["sigma_ou"] = 0.09
model.params["b"] = 5.0
model.params["signalV"] = 2
model.params["dt"] = 0.2
model.params["duration"] = 0.2 * 60 * 1000
# multi stage evaluation function
def evaluateSimulation(traj):
model = search.getModelFromTraj(traj)
model.randomICs()
model.params["dt"] = 0.2
model.params["duration"] = 4 * 1000.0
model.run(bold=True)
result_dict = {"outputs": model.outputs}
search.saveToPypet(result_dict, traj)
# define and run exploration
parameters = ParameterSpace({"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)})
search = BoxSearch(
evalFunction=evaluateSimulation,
model=model,
parameterSpace=parameters,
filename=f"test_brain_postprocessing_{randomString(20)}.hdf",
)
search.run()
cls.model = model
cls.search = search
cls.ds = ds
def test_getRun(self):
self.search.getRun(0)
def test_loadResults(self):
self.search.loadResults()
def test_loadResults_all_False(self):
self.search.loadResults(all=False)
class TestCustomParameterExploration(unittest.TestCase):
"""Exploration with custom function"""
def test_circle_exploration(self):
def explore_me(traj):
pars = search.getParametersFromTraj(traj)
# let's calculate the distance to a circle
computation_result = abs((pars["x"] ** 2 + pars["y"] ** 2) - 1)
result_dict = {"scalar_result": computation_result, "list_result": [1, 2, 3, 4], "array_result": np.ones(3)}
search.saveToPypet(result_dict, traj)
parameters = ParameterSpace({"x": np.linspace(-2, 2, 2), "y": np.linspace(-2, 2, 2)})
search = BoxSearch(evalFunction=explore_me, parameterSpace=parameters, filename="test_circle_exploration.hdf")
search.run()
search.loadResults(pypetShortNames=False)
# call the result dataframe
search.dfResults
# test integrity of dataframe
for i in search.dfResults.index:
self.assertEqual(search.dfResults.loc[i, "scalar_result"], search.results[i]["scalar_result"])
self.assertListEqual(search.dfResults.loc[i, "list_result"], search.results[i]["list_result"])
np.testing.assert_array_equal(search.dfResults.loc[i, "array_result"], search.results[i]["array_result"])
class TestExplorationMultiModel(unittest.TestCase):
"""
MultiModel exploration test - uses FHN network.
"""
def test_multimodel_explore(self):
start = time.time()
DELAY = 13.0
fhn_net = FitzHughNagumoNetwork(np.random.rand(2, 2), np.array([[0.0, DELAY], [DELAY, 0.0]]))
model = MultiModel(fhn_net)
parameters = ParameterSpace({"*input*sigma": [0.0, 0.05], "*epsilon*": [0.5, 0.6]}, allow_star_notation=True)
search = BoxSearch(model, parameters, filename="test_multimodel.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertTrue(isinstance(dataarray.attrs, dict))
self.assertListEqual(
list(dataarray.attrs.keys()),
[k.replace("*", "_").replace(".", "_").replace("|", "_") for k in parameters.dict().keys()],
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestExplorationMultiModelSequential(unittest.TestCase):
"""
MultiModel exploration test with sequential exploration - uses FHN network.
"""
def test_multimodel_explore(self):
start = time.time()
DELAY = 13.0
fhn_net = FitzHughNagumoNetwork(np.random.rand(2, 2), np.array([[0.0, DELAY], [DELAY, 0.0]]))
model = MultiModel(fhn_net)
parameters = ParameterSpace(
{"*input*sigma": [0.0, 0.05], "*epsilon*": [0.5, 0.6, 0.7]}, allow_star_notation=True, kind="sequence"
)
search = BoxSearch(model, parameters, filename="test_multimodel.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertTrue("run_no" in dataarray.dims)
self.assertEqual(len(dataarray["run_no"]), 5)
self.assertTrue(isinstance(dataarray.attrs, dict))
self.assertListEqual(
list(dataarray.attrs.keys()),
[k.replace("*", "_").replace(".", "_").replace("|", "_") for k in parameters.dict().keys()],
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
class TestExplorationSingleNodeSequential(unittest.TestCase):
"""
ALN single node test with sequential exploration.
"""
def test_single_node(self):
start = time.time()
model = ALNModel()
parameters = ParameterSpace({"mue_ext_mean": [0.0, 1.5, 3.0], "mui_ext_mean": [1.5, 3.0]}, kind="sequence")
search = BoxSearch(model, parameters, filename="test_single_nodes.hdf")
search.run()
search.loadResults()
dataarray = search.xr()
self.assertTrue(isinstance(dataarray, xr.DataArray))
self.assertTrue("run_no" in dataarray.dims)
self.assertEqual(len(dataarray["run_no"]), 5)
self.assertFalse(dataarray.attrs)
for i in search.dfResults.index:
search.dfResults.loc[i, "max_r"] = np.max(
search.results[i]["rates_exc"][:, -int(1000 / model.params["dt"]) :]
)
end = time.time()
logging.info("\t > Done in {:.2f} s".format(end - start))
if __name__ == "__main__":
unittest.main()
|
[
"neurolib.optimize.exploration.BoxSearch",
"random.choice",
"numpy.random.rand",
"numpy.ones",
"neurolib.models.multimodel.MultiModel",
"os.path.join",
"neurolib.utils.parameterSpace.ParameterSpace",
"neurolib.utils.loadData.Dataset",
"neurolib.models.aln.ALNModel",
"numpy.array",
"numpy.linspace",
"neurolib.models.fhn.FHNModel",
"pytest.raises",
"unittest.main",
"time.time",
"numpy.testing.assert_array_equal"
] |
[((10456, 10471), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10469, 10471), False, 'import unittest\n'), ((1635, 1679), 'neurolib.utils.parameterSpace.ParameterSpace', 'ParameterSpace', (["{'mue_ext_mean': [1.0, 2.0]}"], {}), "({'mue_ext_mean': [1.0, 2.0]})\n", (1649, 1679), False, 'from neurolib.utils.parameterSpace import ParameterSpace\n'), ((2045, 2056), 'time.time', 'time.time', ([], {}), '()\n', (2054, 2056), False, 'import time\n'), ((2074, 2084), 'neurolib.models.aln.ALNModel', 'ALNModel', ([], {}), '()\n', (2082, 2084), False, 'from neurolib.models.aln import ALNModel\n'), ((2216, 2278), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', (['model', 'parameters'], {'filename': '"""test_single_nodes.hdf"""'}), "(model, parameters, filename='test_single_nodes.hdf')\n", (2225, 2278), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((2675, 2686), 'time.time', 'time.time', ([], {}), '()\n', (2684, 2686), False, 'import time\n'), ((2944, 2958), 'neurolib.utils.loadData.Dataset', 'Dataset', (['"""hcp"""'], {}), "('hcp')\n", (2951, 2958), False, 'from neurolib.utils.loadData import Dataset\n'), ((2975, 3011), 'neurolib.models.fhn.FHNModel', 'FHNModel', ([], {'Cmat': 'ds.Cmat', 'Dmat': 'ds.Dmat'}), '(Cmat=ds.Cmat, Dmat=ds.Dmat)\n', (2983, 3011), False, 'from neurolib.models.fhn import FHNModel\n'), ((3434, 3539), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', ([], {'model': 'model', 'parameterSpace': 'parameters', 'filename': '"""test_fhn_brain_network_exploration.hdf"""'}), "(model=model, parameterSpace=parameters, filename=\n 'test_fhn_brain_network_exploration.hdf')\n", (3443, 3539), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((4452, 4466), 'neurolib.utils.loadData.Dataset', 'Dataset', (['"""hcp"""'], {}), "('hcp')\n", (4459, 4466), False, 'from neurolib.utils.loadData import Dataset\n'), ((4483, 4519), 'neurolib.models.aln.ALNModel', 'ALNModel', ([], {'Cmat': 'ds.Cmat', 'Dmat': 'ds.Dmat'}), '(Cmat=ds.Cmat, Dmat=ds.Dmat)\n', (4491, 4519), False, 'from neurolib.models.aln import ALNModel\n'), ((6533, 6639), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', ([], {'evalFunction': 'explore_me', 'parameterSpace': 'parameters', 'filename': '"""test_circle_exploration.hdf"""'}), "(evalFunction=explore_me, parameterSpace=parameters, filename=\n 'test_circle_exploration.hdf')\n", (6542, 6639), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((7358, 7369), 'time.time', 'time.time', ([], {}), '()\n', (7367, 7369), False, 'import time\n'), ((7510, 7529), 'neurolib.models.multimodel.MultiModel', 'MultiModel', (['fhn_net'], {}), '(fhn_net)\n', (7520, 7529), False, 'from neurolib.models.multimodel import MultiModel\n'), ((7551, 7651), 'neurolib.utils.parameterSpace.ParameterSpace', 'ParameterSpace', (["{'*input*sigma': [0.0, 0.05], '*epsilon*': [0.5, 0.6]}"], {'allow_star_notation': '(True)'}), "({'*input*sigma': [0.0, 0.05], '*epsilon*': [0.5, 0.6]},\n allow_star_notation=True)\n", (7565, 7651), False, 'from neurolib.utils.parameterSpace import ParameterSpace\n'), ((7665, 7725), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', (['model', 'parameters'], {'filename': '"""test_multimodel.hdf"""'}), "(model, parameters, filename='test_multimodel.hdf')\n", (7674, 7725), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((8130, 8141), 'time.time', 'time.time', ([], {}), '()\n', (8139, 8141), False, 'import time\n'), ((8424, 8435), 'time.time', 'time.time', ([], {}), '()\n', (8433, 8435), False, 'import time\n'), ((8576, 8595), 'neurolib.models.multimodel.MultiModel', 'MultiModel', (['fhn_net'], {}), '(fhn_net)\n', (8586, 8595), False, 'from neurolib.models.multimodel import MultiModel\n'), ((8617, 8739), 'neurolib.utils.parameterSpace.ParameterSpace', 'ParameterSpace', (["{'*input*sigma': [0.0, 0.05], '*epsilon*': [0.5, 0.6, 0.7]}"], {'allow_star_notation': '(True)', 'kind': '"""sequence"""'}), "({'*input*sigma': [0.0, 0.05], '*epsilon*': [0.5, 0.6, 0.7]},\n allow_star_notation=True, kind='sequence')\n", (8631, 8739), False, 'from neurolib.utils.parameterSpace import ParameterSpace\n'), ((8775, 8835), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', (['model', 'parameters'], {'filename': '"""test_multimodel.hdf"""'}), "(model, parameters, filename='test_multimodel.hdf')\n", (8784, 8835), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((9346, 9357), 'time.time', 'time.time', ([], {}), '()\n', (9355, 9357), False, 'import time\n'), ((9607, 9618), 'time.time', 'time.time', ([], {}), '()\n', (9616, 9618), False, 'import time\n'), ((9636, 9646), 'neurolib.models.aln.ALNModel', 'ALNModel', ([], {}), '()\n', (9644, 9646), False, 'from neurolib.models.aln import ALNModel\n'), ((9668, 9767), 'neurolib.utils.parameterSpace.ParameterSpace', 'ParameterSpace', (["{'mue_ext_mean': [0.0, 1.5, 3.0], 'mui_ext_mean': [1.5, 3.0]}"], {'kind': '"""sequence"""'}), "({'mue_ext_mean': [0.0, 1.5, 3.0], 'mui_ext_mean': [1.5, 3.0]\n }, kind='sequence')\n", (9682, 9767), False, 'from neurolib.utils.parameterSpace import ParameterSpace\n'), ((9780, 9842), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', (['model', 'parameters'], {'filename': '"""test_single_nodes.hdf"""'}), "(model, parameters, filename='test_single_nodes.hdf')\n", (9789, 9842), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((10345, 10356), 'time.time', 'time.time', ([], {}), '()\n', (10354, 10356), False, 'import time\n'), ((725, 747), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (738, 747), False, 'import random\n'), ((1051, 1080), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1064, 1080), False, 'import pytest\n'), ((1098, 1146), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', ([], {'model': 'None', 'parameterSpace': 'parameters'}), '(model=None, parameterSpace=parameters)\n', (1107, 1146), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((1161, 1190), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1174, 1190), False, 'import pytest\n'), ((1208, 1250), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', ([], {'model': 'None', 'parameterSpace': 'None'}), '(model=None, parameterSpace=None)\n', (1217, 1250), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((1265, 1294), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1278, 1294), False, 'import pytest\n'), ((1312, 1379), 'neurolib.optimize.exploration.BoxSearch', 'BoxSearch', ([], {'model': 'None', 'parameterSpace': 'parameters', 'evalFunction': 'None'}), '(model=None, parameterSpace=parameters, evalFunction=None)\n', (1321, 1379), False, 'from neurolib.optimize.exploration import BoxSearch\n'), ((3619, 3688), 'os.path.join', 'os.path.join', (['paths.HDF_DIR', '"""test_fhn_brain_network_exploration.hdf"""'], {}), "(paths.HDF_DIR, 'test_fhn_brain_network_exploration.hdf')\n", (3631, 3688), False, 'import os\n'), ((7074, 7183), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["search.dfResults.loc[i, 'array_result']", "search.results[i]['array_result']"], {}), "(search.dfResults.loc[i, 'array_result'],\n search.results[i]['array_result'])\n", (7103, 7183), True, 'import numpy as np\n'), ((7432, 7452), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (7446, 7452), True, 'import numpy as np\n'), ((7454, 7492), 'numpy.array', 'np.array', (['[[0.0, DELAY], [DELAY, 0.0]]'], {}), '([[0.0, DELAY], [DELAY, 0.0]])\n', (7462, 7492), True, 'import numpy as np\n'), ((8498, 8518), 'numpy.random.rand', 'np.random.rand', (['(2)', '(2)'], {}), '(2, 2)\n', (8512, 8518), True, 'import numpy as np\n'), ((8520, 8558), 'numpy.array', 'np.array', (['[[0.0, DELAY], [DELAY, 0.0]]'], {}), '([[0.0, DELAY], [DELAY, 0.0]])\n', (8528, 8558), True, 'import numpy as np\n'), ((951, 971), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(2)'], {}), '(0, 3, 2)\n', (962, 971), True, 'import numpy as np\n'), ((989, 1009), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(2)'], {}), '(0, 3, 2)\n', (1000, 1009), True, 'import numpy as np\n'), ((1713, 1723), 'neurolib.models.aln.ALNModel', 'ALNModel', ([], {}), '()\n', (1721, 1723), False, 'from neurolib.models.aln import ALNModel\n'), ((2138, 2158), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(2)'], {}), '(0, 3, 2)\n', (2149, 2158), True, 'import numpy as np\n'), ((2176, 2196), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(2)'], {}), '(0, 3, 2)\n', (2187, 2196), True, 'import numpy as np\n'), ((3290, 3310), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(2)'], {}), '(0, 2, 2)\n', (3301, 3310), True, 'import numpy as np\n'), ((5305, 5325), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(2)'], {}), '(0, 3, 2)\n', (5316, 5325), True, 'import numpy as np\n'), ((5343, 5363), 'numpy.linspace', 'np.linspace', (['(0)', '(3)', '(2)'], {}), '(0, 3, 2)\n', (5354, 5363), True, 'import numpy as np\n'), ((6359, 6369), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (6366, 6369), True, 'import numpy as np\n'), ((6464, 6485), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(2)'], {}), '(-2, 2, 2)\n', (6475, 6485), True, 'import numpy as np\n'), ((6492, 6513), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(2)'], {}), '(-2, 2, 2)\n', (6503, 6513), True, 'import numpy as np\n'), ((3200, 3229), 'numpy.ones', 'np.ones', (["(model.params['N'],)"], {}), "((model.params['N'],))\n", (3207, 3229), True, 'import numpy as np\n'), ((3243, 3263), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(2)'], {}), '(0, 2, 2)\n', (3254, 3263), True, 'import numpy as np\n')]
|
# newly added libraries
import copy
import wandb
import time
import math
import csv
import shutil
from tqdm import tqdm
import torch
import numpy as np
import pandas as pd
from client import Client
from config import *
import scheduler as sch
class FedAvgTrainer(object):
def __init__(self, dataset, model, device, args):
self.device = device
self.args = args
[client_num, _, _, train_data_global, _, train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] = dataset
# record the client number of the dataset
self.client_num = client_num
self.class_num = class_num
# setup dataset
self.data_shape = list(train_data_global[0][0].size())
self.train_data_local_num_dict = train_data_local_num_dict
self.test_data_local_dict = test_data_local_dict
self.train_data_local_dict = train_data_local_dict
if args.partition_method == "noniid":
logger.info("-----------non-i.i.d transform----------")
# generate the non i.i.d dataset
self.gene_non_iid_dataset(train_data_global, "tmp")
# read the non i.i.d dataset
self.read_non_iid_dataset("tmp")
# rm the tmp directory
shutil.rmtree(os.path.join('.', 'tmp'))
self.client_list = []
self.setup_clients(train_data_local_num_dict, train_data_local_dict, test_data_local_dict)
# initialize the recorder of invalid dataset
self.invalid_datasets = dict()
# time counter starts from the first line
self.time_counter = channel_data['Time'][0]
# initialize the cycle_num here
self.cycle_num = 0
# initialize the scheduler function
if self.args.method == "sch_pn_method_1" or self.args.method == "sch_pn_method_1_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_1()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_pn_method_2" or self.args.method == "sch_pn_method_2_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_2()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_pn_method_3" or self.args.method == "sch_pn_method_3_empty":
for _ in range(100):
self.scheduler = sch.Scheduler_PN_method_3()
client_indexes, _ = self.scheduler.sch_pn_test(1, 2002)
if len(client_indexes) > 5:
break
elif self.args.method == "sch_random":
self.scheduler = sch.sch_random
elif self.args.method == "sch_channel":
self.scheduler = sch.sch_channel
elif self.args.method == "sch_rrobin":
self.scheduler = sch.sch_rrobin
elif self.args.method == "sch_loss":
self.scheduler = sch.sch_loss
else:
self.scheduler = sch.sch_random
self.model = model
self.model_global = model(self.args, model_name=self.args.model, output_dim=self.class_num)
self.model_global.train()
def setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict):
logger.debug("############setup_clients (START)#############")
for client_idx in range(client_num_per_round):
c = Client(client_idx, train_data_local_dict[client_idx], test_data_local_dict[client_idx],
train_data_local_num_dict[client_idx], self.args, self.device)
self.client_list.append(c)
logger.debug("############setup_clients (END)#############")
def train(self):
"""
Global initialized values
"""
# maintain a lst for local losses
local_loss_lst = np.zeros((1, client_num_in_total))
# maintain a lst for local acc
_, dataset_acc_lst = self.local_test_on_all_clients(self.model_global, 0, True, False)
local_acc_lst = dataset_acc_lst[np.arange(client_num_in_total) % self.client_num]
# counting days
counting_days, reward = 0, 0
# initialize values for calculating iteration num
delta, rho, beta, rho_flag, beta_flag = np.random.rand(1)[0], np.random.rand(1)[0], np.random.rand(1)[0], True, True
# Initialize values for calculating FPF2 index
local_itr_lst = torch.zeros(self.args.comm_round, int(client_num_in_total)).to(self.device) # historical local iterations.
G_mat = torch.zeros(int(client_num_in_total)).to(self.device) # initial the value of G with zero
# if weight size is larger than THRESHOLD_WEIGHT_SIZE we will use a simpler method to calculate FPF
weight_size = sum([self.model_global.cpu().state_dict()[para].numpy().ravel().shape[0] for para in self.model_global.state_dict().keys()])
if weight_size < THRESHOLD_WEIGHT_SIZE:
A_mat = torch.ones(weight_size).to(self.device) # initial the value of A with ones.
local_w_diffs = torch.zeros((int(client_num_in_total), weight_size)).to(self.device)
else:
logger.warning("The weight size of the model {} is too large. Thus, we turn to use a more simple method to calculate FPF.".format(self.args.model))
LRU_itr_lst = torch.zeros(int(client_num_in_total)).to(self.device) # store the iteration gap for each client.
# show weight size for the model.
logger.debug("weight size: {}".format(weight_size))
"""
starts training, entering the loop of command round.
"""
Inform = {}
traffic = 0
for round_idx in range(self.args.comm_round):
logger.info("################Communication round : {}".format(round_idx))
# set the time_counter
self.time_counter = np.array(channel_data['Time'][channel_data['Time'] >= self.time_counter])[0]
logger.info("time_counter: {}".format(self.time_counter))
self.model_global.train()
# get client_indexes from scheduler
reward, loss_a, loss_c = 0, 0, 0
if (self.args.method)[:6] == "sch_pn":
if self.args.method[-5:] == "empty" or round_idx == 0:
client_indexes, local_itr = self.scheduler.sch_pn_empty(round_idx, self.time_counter)
else:
client_indexes, local_itr, (reward, loss_a, loss_c) = self.scheduler.sch_pn(round_idx, self.time_counter, loss_locals, FPF2_idx_lst, local_loss_lst, )
else:
if self.args.method == "sch_loss":
if round_idx == 0:
loss_locals = []
client_indexes, local_itr = self.scheduler(round_idx, self.time_counter, loss_locals)
else:
client_indexes, local_itr = self.scheduler(round_idx, self.time_counter)
# write to the scheduler csv
with open(scheduler_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['time counter', 'client index', 'iteration'])
csv_writer.writerow([self.time_counter, str(client_indexes), local_itr])
file.flush()
logger.info("client_indexes = " + str(client_indexes))
traffic += len(client_indexes)
# write one line to trainer_csv
trainer_csv_line = [round_idx, self.time_counter, str(client_indexes), traffic]
# contribute to time counter
self.tx_time(list(client_indexes)) # transmit time
# store the last model's training parameters.
last_w = copy.deepcopy(self.model_global.cpu().state_dict())
# local Initialization
w_locals, loss_locals, beta_locals, rho_locals, cycle_locals = [], [], [], [], []
"""
for scalability: following the original FedAvg algorithm, we uniformly sample a fraction of clients in each round.
Instead of changing the 'Client' instances, our implementation keeps the 'Client' instances and then updates their local dataset
"""
for idx in range(len(client_indexes)):
# update dataset
client = self.client_list[idx]
client_idx = client_indexes[idx]
dataset_idx = client_idx % self.client_num
if dataset_idx in self.invalid_datasets.keys():
current_idx = self.invalid_datasets[dataset_idx]
else:
current_idx = dataset_idx
while True:
client.update_local_dataset(current_idx, self.train_data_local_dict[current_idx],
self.test_data_local_dict[current_idx],
self.train_data_local_num_dict[current_idx])
# train on new dataset
# add a new parameter "local_itr" to the funciton "client.train()"
# add a new return value "time_interval" which is the time consumed for training model in client.
w, loss, local_beta, local_rho, local_acc, local_cycle = client.train(net=copy.deepcopy(self.model_global).to(self.device), local_iteration = local_itr)
if loss != None and local_beta != None and local_rho != None and local_acc != None:
if dataset_idx != current_idx:
self.invalid_datasets[dataset_idx] = current_idx
break
current_idx = np.random.randint(self.class_num)
logger.warning("changing dataset for {} to {}".format(client_idx, current_idx))
# record current cycle
cycle_locals.append([client.get_sample_number(), local_cycle])
# record current w into w_locals
w_locals.append((client.get_sample_number(), copy.deepcopy(w)))
# record current loss into loss_locals
loss_locals.append(loss)
# record local beta into beta_locals
beta_locals.append(local_beta)
# record local beta into rho_locals
rho_locals.append(local_rho)
# update the local_loss_lst
local_loss_lst[0, client_idx] = loss
# update local_w_diffs
if weight_size < THRESHOLD_WEIGHT_SIZE:
local_w_diffs[client_idx, :] = torch.cat([w[para].reshape((-1, )) - last_w[para].reshape((-1, )) for para in self.model_global.state_dict().keys()]).to(self.device)
# update local_acc_lst
local_acc_lst[client_idx] = local_acc
# loss
logger.info('Client {:3d}, loss {:.3f}'.format(client_idx, loss))
# update global weights
w_glob = self.aggregate(w_locals)
# copy weight to net_glob
self.model_global.load_state_dict(w_glob)
# update the time counter
if list(client_indexes):
self.time_counter += math.ceil(LOCAL_TRAINING_TIME)
logger.debug("time_counter after training: {}".format(self.time_counter))
trainer_csv_line += [self.time_counter-trainer_csv_line[1], np.var(local_loss_lst), str(loss_locals), np.var(loss_locals), np.var(local_acc_lst)]
# print loss
if not loss_locals:
logger.info('Round {:3d}, Average loss None'.format(round_idx))
trainer_csv_line.append('None')
else:
loss_avg = sum(loss_locals) / len(loss_locals)
logger.info('Round {:3d}, Average loss {:.3f}'.format(round_idx, loss_avg))
trainer_csv_line.append(loss_avg)
if cycle_locals:
cycle_locals = np.asarray(cycle_locals)
logger.info('Elapsed cycles {:.3f}'.format(np.sum(cycle_locals[:, 0] * cycle_locals[:, 1]) / np.sum(cycle_locals[:, 0])))
# local test on all client.
if round_idx % self.args.frequency_of_the_test == 0 or round_idx == self.args.comm_round - 1:
test_acc, _ = self.local_test_on_all_clients(self.model_global, round_idx, EVAL_ON_TRAIN, True)
trainer_csv_line.append(test_acc)
# write headers for csv
with open(trainer_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['round index', 'time counter', 'client index', 'traffic', 'train time', 'fairness',
'local loss', "local loss var", "local acc var", 'global loss', 'test accuracy'])
csv_writer.writerow(trainer_csv_line)
file.flush()
# log on wandb
Inform["reward"] = reward
wandb.log(Inform)
Inform = {
"reward": reward, "loss_a": loss_a,
"loss_c": loss_c, "round": round_idx,
"traffic": traffic,
"beta": beta, "rho": rho, "delta": delta,
"cum_time": trainer_csv_line[1]+self.cycle_num*59361,
"local_itr": local_itr,
"client_num": len(client_indexes),
"C3": (rho*delta)/beta,
"local_loss_var": np.var(loss_locals),
"local_acc_var": np.var(local_acc_lst)
}
# update FPF index list
if weight_size < THRESHOLD_WEIGHT_SIZE:
FPF2_idx_lst = torch.norm(local_w_diffs * A_mat, dim = 1) / G_mat
else:
FPF2_idx_lst = LRU_itr_lst / G_mat
FPF2_idx_lst = FPF2_idx_lst.cpu().numpy()
FPF2_idx_lst[np.bitwise_or(np.isnan(FPF2_idx_lst), np.isinf(FPF2_idx_lst))] = 0
# FPF2_idx_lst = FPF2_idx_lst / max(FPF2_idx_lst)
FPF2_idx_lst[np.bitwise_or(np.isnan(FPF2_idx_lst), np.isinf(FPF2_idx_lst))] = 0
# write FPF index list to csv
with open(FPF_csv, mode = "a+", encoding='utf-8', newline='') as file:
csv_writer = csv.writer(file)
if round_idx == 0:
csv_writer.writerow(['time counter'] + ["car_"+str(i) for i in range(client_num_in_total)])
csv_writer.writerow([trainer_csv_line[1]]+FPF2_idx_lst.tolist())
file.flush()
# update beta & delta & rho
if w_locals and loss_locals:
sample_nums = np.array([sample_num for sample_num, _ in w_locals])
local_w_diff_norms = np.array([torch.norm(torch.cat([w[para].reshape((-1, )) - w_glob[para].reshape((-1, )) for para in self.model_global.state_dict().keys()])).item() for _, w in w_locals])
# calculate delta
delta_tmp = np.sum(sample_nums * local_w_diff_norms) / np.sum(sample_nums) / self.args.lr
if (not np.isnan(delta_tmp) and not np.isinf(delta_tmp)):
delta = delta_tmp
# update rho
rho_tmp = np.sum(sample_nums * np.array(rho_locals)) / np.sum(sample_nums)
if rho_tmp > rho or rho_flag:
if (not np.isnan(rho_tmp) and not np.isinf(rho_tmp)) and rho_tmp < THRESHOLD_RHO:
rho, rho_flag = rho_tmp, False
# update beta
beta_tmp = np.sum(sample_nums * np.array(beta_locals)) / np.sum(sample_nums)
if beta_tmp > beta or beta_flag:
if (not np.isnan(beta_tmp) and not np.isinf(beta_tmp)) and beta_tmp < THRESHOLD_BETA:
beta, beta_flag = beta_tmp, False
if self.args.method == "sch_pn_method_1" or self.args.method == "sch_pn_method_1_empty":
self.scheduler.calculate_itr_method_1(delta)
elif self.args.method == "sch_pn_method_2" or self.args.method == "sch_pn_method_2_empty":
self.scheduler.calculate_itr_method_2(rho, beta, delta)
elif self.args.method == "sch_pn_method_3" or self.args.method == "sch_pn_method_3_empty":
self.scheduler.calculate_itr_method_3(rho, beta, delta)
if weight_size < THRESHOLD_WEIGHT_SIZE:
# update local_w_diffs
global_w_diff = torch.cat([w_glob[para].reshape((-1, )) - last_w[para].reshape((-1, )) for para in self.model_global.state_dict().keys()]).to(self.device)
local_w_diffs[list(set(list(range(client_num_in_total))) - set(list(client_indexes))), :] -= global_w_diff
# update A_mat
A_mat = A_mat * (1 - 1/G2) + (global_w_diff) / G2 / global_w_diff.mean()
# Update local_itr_lst
if list(client_indexes) and local_itr > 0: # only if client_idx is not empty and local_iter > 0, then I will update following values
local_itr_lst[round_idx, list(client_indexes)] = float(local_itr)
if weight_size >= THRESHOLD_WEIGHT_SIZE:
LRU_itr_lst += float(local_itr)
LRU_itr_lst[list(client_indexes)] = 0
# update G_mat
G_mat = G_mat * (1 - 1 / G1) + local_itr_lst[round_idx, :] / G1
# if current time_counter has exceed the channel table, I will simply stop early
if self.time_counter >= time_cnt_max[counting_days]:
counting_days += 1
if counting_days % RESTART_DAYS == 0:
if self.args.method == "find_constant" and loss_locals:
w_optimal, loss_optimal = self.central_train()
w = torch.cat([param.view(-1) for param in self.model_global.parameters()])
w_diff_optimal = torch.norm(w.cpu() - w_optimal.cpu())
logger.info("The norm of difference between w_optmal & w: {}".format(w_diff_optimal.item()))
logger.info("The norm of difference between loss & loss_optimal: {}".format(loss_avg - loss_optimal))
break
logger.info("################reinitialize model")
self.model_global = self.model(self.args, model_name=self.args.model, output_dim=self.class_num)
delta, rho, beta, rho_flag, beta_flag = np.random.rand(1)[0], np.random.rand(1)[0], np.random.rand(1)[0], True, True
traffic = 0
if counting_days >= DATE_LENGTH:
logger.info("################training restarts")
counting_days = 0
self.time_counter = 0
self.cycle_num = self.cycle_num+1
def central_train(self):
logger.info("################global optimal weights calculation")
model = self.model(self.args, model_name=self.args.model, output_dim=self.class_num)
criterion = torch.nn.CrossEntropyLoss().to(self.device)
model.to(self.device)
if self.args.client_optimizer == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr)
else:
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=self.args.lr,
weight_decay=self.args.wd, amsgrad=True)
for _ in tqdm(range(self.args.central_round)):
for client_idx in range(self.client_num):
x, labels = next(iter(self.train_data_local_dict[client_idx]))
x, labels = x.to(self.device), labels.to(self.device)
model.train()
model.zero_grad()
log_probs = model(x)
loss = criterion(log_probs, labels)
loss.backward()
loss = loss.item()
optimizer.step()
wandb.log({"central_training/loss": loss})
w_optimal = torch.cat([param.view(-1) for param in model.parameters()])
loss_optimal = loss
return w_optimal, loss_optimal
def gene_non_iid_dataset(self, train_global, directory):
"""
changing self.train_data_local_dict to non-i.i.d. dataset.
And change self.train_data_local_num_dict correspondingly.
"""
data, labels = train_global[0][0], train_global[0][1] # read the tensor from train_global.
# transform shape
data = data.view(data.shape[0], -1)
labels = labels.view(labels.shape[0], -1)
# get full_df
full_df = pd.DataFrame(np.concatenate((data.numpy(), labels.numpy()), axis=1)).sample(frac=1, random_state=self.args.seed)
# temporary store the data in dir
save_dir = os.path.join(".", directory)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for client_idx in tqdm(range(self.client_num)):
# get selected classes
try:
selected_classes = set(list(np.random.choice(list(set(full_df.iloc[:, -1])), CLASS_NUM)))
except:
selected_classes = set(full_df.iloc[:, -1])
# got valid data
valid_data = full_df[full_df.iloc[:, -1].isin(selected_classes)]
# get number of data on the local client
local_num = self.train_data_local_num_dict[client_idx]
# got selected data # remember to shuffle the data
try:
selected_data = valid_data[0:local_num]
except:
selected_data = valid_data
self.train_data_local_dict[client_idx] = len(selected_data)
# update the local client data
np.save(os.path.join(save_dir, "client_{}_data.npy".format(client_idx)), selected_data.iloc[:, 0:-1].values)
np.save(os.path.join(save_dir, "client_{}_labels.npy".format(client_idx)), selected_data.iloc[:, -1].values)
# remove the data from the full_df
full_df = full_df.drop(index=selected_data.index)
def read_non_iid_dataset(self, directory):
for client_idx in tqdm(range(self.client_num)):
data_shape = [self.train_data_local_num_dict[client_idx]] + self.data_shape[1:]
data_path = os.path.join(".", directory, "client_{}_data.npy".format(client_idx))
labels_path = os.path.join(".", directory, "client_{}_labels.npy".format(client_idx))
self.train_data_local_dict[client_idx] = [(torch.from_numpy(np.load(data_path)).view(tuple(data_shape)).float(), torch.from_numpy(np.load(labels_path)).long())]
def tx_time(self, client_indexes):
if not client_indexes:
self.time_counter += 1
return
# read the channel condition for corresponding cars.
channel_res = np.reshape(np.array(channel_data[channel_data['Time'] == self.time_counter * channel_data['Car'].isin(client_indexes)]["Distance to BS(4982,905)"]), (1, -1))
logger.debug("channel_res: {}".format(channel_res))
# linearly resolve the optimazation problem
tmp_t = 1
if self.args.radio_alloc == "optimal":
while np.sum(RES_WEIGHT * channel_res * RES_RATIO / tmp_t) > 1:
tmp_t += 1
elif self.args.radio_alloc == "uniform":
while np.max(channel_res) * RES_WEIGHT * RES_RATIO * len(channel_res) / tmp_t > 1:
tmp_t += 1
self.time_counter += math.ceil(TIME_COMPRESSION_RATIO*tmp_t)
logger.debug("time_counter after tx_time: {}".format(self.time_counter))
def aggregate(self, w_locals):
if not w_locals:
return copy.deepcopy(self.model_global.cpu().state_dict())
training_num = 0
for idx in range(len(w_locals)):
(sample_num, averaged_params) = w_locals[idx]
training_num += sample_num
(sample_num, averaged_params) = w_locals[0]
for k in averaged_params.keys():
for i in range(0, len(w_locals)):
local_sample_number, local_model_params = w_locals[i]
w = local_sample_number / training_num
if i == 0:
averaged_params[k] = local_model_params[k] * w
else:
averaged_params[k] += local_model_params[k] * w
return averaged_params
def local_test_on_all_clients(self, model_global, round_idx, eval_on_train=False, if_log=True):
logger.info("################local_test_on_all_clients : {}".format(round_idx))
train_metrics = {
'num_samples': [],
'num_correct': [],
'losses': []
}
test_metrics = {
'num_samples': [],
'num_correct': [],
'losses': []
}
client = self.client_list[0]
for client_idx in tqdm(range(min(int(client_num_in_total), self.client_num))):
"""
Note: for datasets like "fed_CIFAR100" and "fed_shakespheare",
the training client number is larger than the testing client number
"""
if self.test_data_local_dict[client_idx] is None or client_idx in self.invalid_datasets.keys():
continue
client.update_local_dataset(client_idx, self.train_data_local_dict[client_idx],
self.test_data_local_dict[client_idx],
self.train_data_local_num_dict[client_idx])
# test data
test_local_metrics = client.local_test(model_global, True)
test_metrics['num_samples'].append(copy.deepcopy(test_local_metrics['test_total']))
test_metrics['num_correct'].append(copy.deepcopy(test_local_metrics['test_correct']))
test_metrics['losses'].append(copy.deepcopy(test_local_metrics['test_loss']))
# train data
if eval_on_train:
train_local_metrics = client.local_test(model_global, False)
train_metrics['num_samples'].append(copy.deepcopy(train_local_metrics['test_total']))
train_metrics['num_correct'].append(copy.deepcopy(train_local_metrics['test_correct']))
train_metrics['losses'].append(copy.deepcopy(train_local_metrics['test_loss']))
# test on test dataset
test_acc = sum(test_metrics['num_correct']) / sum(test_metrics['num_samples'])
test_loss = sum(test_metrics['losses']) / sum(test_metrics['num_samples'])
stats = {
"Test/Acc": test_acc,
"Test/Loss": test_loss,
"round": round_idx,
"cum_time": self.time_counter+self.cycle_num*59361,
}
# test on training dataset
if eval_on_train:
train_acc = sum(train_metrics['num_correct']) / sum(train_metrics['num_samples'])
train_loss = sum(train_metrics['losses']) / sum(train_metrics['num_samples'])
stats.update({
'Train/Acc': train_acc,
'Train/Loss': train_loss,
"round": round_idx,
"cum_time": self.time_counter+self.cycle_num*59361,
})
if if_log:
logger.info(stats)
wandb.log(stats)
return test_acc, np.array(train_metrics['num_correct']) / np.array(train_metrics['num_samples'])
if if_log:
logger.info(stats)
wandb.log(stats)
return test_acc, None
|
[
"wandb.log",
"numpy.random.rand",
"torch.nn.CrossEntropyLoss",
"numpy.array",
"client.Client",
"copy.deepcopy",
"numpy.arange",
"numpy.asarray",
"numpy.max",
"numpy.isinf",
"scheduler.Scheduler_PN_method_1",
"csv.writer",
"torch.norm",
"numpy.isnan",
"scheduler.Scheduler_PN_method_3",
"math.ceil",
"scheduler.Scheduler_PN_method_2",
"numpy.sum",
"numpy.zeros",
"numpy.random.randint",
"numpy.load",
"numpy.var",
"torch.ones"
] |
[((4042, 4076), 'numpy.zeros', 'np.zeros', (['(1, client_num_in_total)'], {}), '((1, client_num_in_total))\n', (4050, 4076), True, 'import numpy as np\n'), ((23990, 24031), 'math.ceil', 'math.ceil', (['(TIME_COMPRESSION_RATIO * tmp_t)'], {}), '(TIME_COMPRESSION_RATIO * tmp_t)\n', (23999, 24031), False, 'import math\n'), ((3612, 3767), 'client.Client', 'Client', (['client_idx', 'train_data_local_dict[client_idx]', 'test_data_local_dict[client_idx]', 'train_data_local_num_dict[client_idx]', 'self.args', 'self.device'], {}), '(client_idx, train_data_local_dict[client_idx], test_data_local_dict[\n client_idx], train_data_local_num_dict[client_idx], self.args, self.device)\n', (3618, 3767), False, 'from client import Client\n'), ((13423, 13440), 'wandb.log', 'wandb.log', (['Inform'], {}), '(Inform)\n', (13432, 13440), False, 'import wandb\n'), ((20430, 20472), 'wandb.log', 'wandb.log', (["{'central_training/loss': loss}"], {}), "({'central_training/loss': loss})\n", (20439, 20472), False, 'import wandb\n'), ((27972, 27988), 'wandb.log', 'wandb.log', (['stats'], {}), '(stats)\n', (27981, 27988), False, 'import wandb\n'), ((1933, 1960), 'scheduler.Scheduler_PN_method_1', 'sch.Scheduler_PN_method_1', ([], {}), '()\n', (1958, 1960), True, 'import scheduler as sch\n'), ((4252, 4282), 'numpy.arange', 'np.arange', (['client_num_in_total'], {}), '(client_num_in_total)\n', (4261, 4282), True, 'import numpy as np\n'), ((4469, 4486), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (4483, 4486), True, 'import numpy as np\n'), ((4491, 4508), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (4505, 4508), True, 'import numpy as np\n'), ((4513, 4530), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (4527, 4530), True, 'import numpy as np\n'), ((6074, 6147), 'numpy.array', 'np.array', (["channel_data['Time'][channel_data['Time'] >= self.time_counter]"], {}), "(channel_data['Time'][channel_data['Time'] >= self.time_counter])\n", (6082, 6147), True, 'import numpy as np\n'), ((11553, 11583), 'math.ceil', 'math.ceil', (['LOCAL_TRAINING_TIME'], {}), '(LOCAL_TRAINING_TIME)\n', (11562, 11583), False, 'import math\n'), ((11755, 11777), 'numpy.var', 'np.var', (['local_loss_lst'], {}), '(local_loss_lst)\n', (11761, 11777), True, 'import numpy as np\n'), ((11797, 11816), 'numpy.var', 'np.var', (['loss_locals'], {}), '(loss_locals)\n', (11803, 11816), True, 'import numpy as np\n'), ((11818, 11839), 'numpy.var', 'np.var', (['local_acc_lst'], {}), '(local_acc_lst)\n', (11824, 11839), True, 'import numpy as np\n'), ((12323, 12347), 'numpy.asarray', 'np.asarray', (['cycle_locals'], {}), '(cycle_locals)\n', (12333, 12347), True, 'import numpy as np\n'), ((12960, 12976), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (12970, 12976), False, 'import csv\n'), ((13899, 13918), 'numpy.var', 'np.var', (['loss_locals'], {}), '(loss_locals)\n', (13905, 13918), True, 'import numpy as np\n'), ((13953, 13974), 'numpy.var', 'np.var', (['local_acc_lst'], {}), '(local_acc_lst)\n', (13959, 13974), True, 'import numpy as np\n'), ((14685, 14701), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (14695, 14701), False, 'import csv\n'), ((15071, 15123), 'numpy.array', 'np.array', (['[sample_num for sample_num, _ in w_locals]'], {}), '([sample_num for sample_num, _ in w_locals])\n', (15079, 15123), True, 'import numpy as np\n'), ((19490, 19517), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (19515, 19517), False, 'import torch\n'), ((23705, 23757), 'numpy.sum', 'np.sum', (['(RES_WEIGHT * channel_res * RES_RATIO / tmp_t)'], {}), '(RES_WEIGHT * channel_res * RES_RATIO / tmp_t)\n', (23711, 23757), True, 'import numpy as np\n'), ((26168, 26215), 'copy.deepcopy', 'copy.deepcopy', (["test_local_metrics['test_total']"], {}), "(test_local_metrics['test_total'])\n", (26181, 26215), False, 'import copy\n'), ((26264, 26313), 'copy.deepcopy', 'copy.deepcopy', (["test_local_metrics['test_correct']"], {}), "(test_local_metrics['test_correct'])\n", (26277, 26313), False, 'import copy\n'), ((26357, 26403), 'copy.deepcopy', 'copy.deepcopy', (["test_local_metrics['test_loss']"], {}), "(test_local_metrics['test_loss'])\n", (26370, 26403), False, 'import copy\n'), ((27783, 27799), 'wandb.log', 'wandb.log', (['stats'], {}), '(stats)\n', (27792, 27799), False, 'import wandb\n'), ((2268, 2295), 'scheduler.Scheduler_PN_method_2', 'sch.Scheduler_PN_method_2', ([], {}), '()\n', (2293, 2295), True, 'import scheduler as sch\n'), ((5169, 5192), 'torch.ones', 'torch.ones', (['weight_size'], {}), '(weight_size)\n', (5179, 5192), False, 'import torch\n'), ((7331, 7347), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (7341, 7347), False, 'import csv\n'), ((10015, 10048), 'numpy.random.randint', 'np.random.randint', (['self.class_num'], {}), '(self.class_num)\n', (10032, 10048), True, 'import numpy as np\n'), ((14108, 14148), 'torch.norm', 'torch.norm', (['(local_w_diffs * A_mat)'], {'dim': '(1)'}), '(local_w_diffs * A_mat, dim=1)\n', (14118, 14148), False, 'import torch\n'), ((14321, 14343), 'numpy.isnan', 'np.isnan', (['FPF2_idx_lst'], {}), '(FPF2_idx_lst)\n', (14329, 14343), True, 'import numpy as np\n'), ((14345, 14367), 'numpy.isinf', 'np.isinf', (['FPF2_idx_lst'], {}), '(FPF2_idx_lst)\n', (14353, 14367), True, 'import numpy as np\n'), ((14476, 14498), 'numpy.isnan', 'np.isnan', (['FPF2_idx_lst'], {}), '(FPF2_idx_lst)\n', (14484, 14498), True, 'import numpy as np\n'), ((14500, 14522), 'numpy.isinf', 'np.isinf', (['FPF2_idx_lst'], {}), '(FPF2_idx_lst)\n', (14508, 14522), True, 'import numpy as np\n'), ((15683, 15702), 'numpy.sum', 'np.sum', (['sample_nums'], {}), '(sample_nums)\n', (15689, 15702), True, 'import numpy as np\n'), ((16009, 16028), 'numpy.sum', 'np.sum', (['sample_nums'], {}), '(sample_nums)\n', (16015, 16028), True, 'import numpy as np\n'), ((26589, 26637), 'copy.deepcopy', 'copy.deepcopy', (["train_local_metrics['test_total']"], {}), "(train_local_metrics['test_total'])\n", (26602, 26637), False, 'import copy\n'), ((26691, 26741), 'copy.deepcopy', 'copy.deepcopy', (["train_local_metrics['test_correct']"], {}), "(train_local_metrics['test_correct'])\n", (26704, 26741), False, 'import copy\n'), ((26790, 26837), 'copy.deepcopy', 'copy.deepcopy', (["train_local_metrics['test_loss']"], {}), "(train_local_metrics['test_loss'])\n", (26803, 26837), False, 'import copy\n'), ((27829, 27867), 'numpy.array', 'np.array', (["train_metrics['num_correct']"], {}), "(train_metrics['num_correct'])\n", (27837, 27867), True, 'import numpy as np\n'), ((27870, 27908), 'numpy.array', 'np.array', (["train_metrics['num_samples']"], {}), "(train_metrics['num_samples'])\n", (27878, 27908), True, 'import numpy as np\n'), ((2603, 2630), 'scheduler.Scheduler_PN_method_3', 'sch.Scheduler_PN_method_3', ([], {}), '()\n', (2628, 2630), True, 'import scheduler as sch\n'), ((10377, 10393), 'copy.deepcopy', 'copy.deepcopy', (['w'], {}), '(w)\n', (10390, 10393), False, 'import copy\n'), ((15393, 15433), 'numpy.sum', 'np.sum', (['(sample_nums * local_w_diff_norms)'], {}), '(sample_nums * local_w_diff_norms)\n', (15399, 15433), True, 'import numpy as np\n'), ((15436, 15455), 'numpy.sum', 'np.sum', (['sample_nums'], {}), '(sample_nums)\n', (15442, 15455), True, 'import numpy as np\n'), ((15495, 15514), 'numpy.isnan', 'np.isnan', (['delta_tmp'], {}), '(delta_tmp)\n', (15503, 15514), True, 'import numpy as np\n'), ((15523, 15542), 'numpy.isinf', 'np.isinf', (['delta_tmp'], {}), '(delta_tmp)\n', (15531, 15542), True, 'import numpy as np\n'), ((12407, 12454), 'numpy.sum', 'np.sum', (['(cycle_locals[:, 0] * cycle_locals[:, 1])'], {}), '(cycle_locals[:, 0] * cycle_locals[:, 1])\n', (12413, 12454), True, 'import numpy as np\n'), ((12457, 12483), 'numpy.sum', 'np.sum', (['cycle_locals[:, 0]'], {}), '(cycle_locals[:, 0])\n', (12463, 12483), True, 'import numpy as np\n'), ((15659, 15679), 'numpy.array', 'np.array', (['rho_locals'], {}), '(rho_locals)\n', (15667, 15679), True, 'import numpy as np\n'), ((15984, 16005), 'numpy.array', 'np.array', (['beta_locals'], {}), '(beta_locals)\n', (15992, 16005), True, 'import numpy as np\n'), ((18901, 18918), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (18915, 18918), True, 'import numpy as np\n'), ((18923, 18940), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (18937, 18940), True, 'import numpy as np\n'), ((18945, 18962), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (18959, 18962), True, 'import numpy as np\n'), ((15777, 15794), 'numpy.isnan', 'np.isnan', (['rho_tmp'], {}), '(rho_tmp)\n', (15785, 15794), True, 'import numpy as np\n'), ((15803, 15820), 'numpy.isinf', 'np.isinf', (['rho_tmp'], {}), '(rho_tmp)\n', (15811, 15820), True, 'import numpy as np\n'), ((16106, 16124), 'numpy.isnan', 'np.isnan', (['beta_tmp'], {}), '(beta_tmp)\n', (16114, 16124), True, 'import numpy as np\n'), ((16133, 16151), 'numpy.isinf', 'np.isinf', (['beta_tmp'], {}), '(beta_tmp)\n', (16141, 16151), True, 'import numpy as np\n'), ((23099, 23119), 'numpy.load', 'np.load', (['labels_path'], {}), '(labels_path)\n', (23106, 23119), True, 'import numpy as np\n'), ((9636, 9668), 'copy.deepcopy', 'copy.deepcopy', (['self.model_global'], {}), '(self.model_global)\n', (9649, 9668), False, 'import copy\n'), ((23857, 23876), 'numpy.max', 'np.max', (['channel_res'], {}), '(channel_res)\n', (23863, 23876), True, 'import numpy as np\n'), ((23029, 23047), 'numpy.load', 'np.load', (['data_path'], {}), '(data_path)\n', (23036, 23047), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from sklearn import preprocessing
from torch.autograd import Variable
from models_gat import GAT
import os
import torch
import numpy as np
import argparse
import pickle
import sklearn.metrics as metrics
import cross_val
import time
import random
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def evaluate(dataset, model_GAT, args, threshold_value, model_name):
"""
Parameters
----------
dataset : dataloader (dataloader for the validation/test dataset).
model_GCN : nn model (GAT model).
args : arguments
threshold_value : float (threshold for adjacency matrices).
Description
----------
This methods performs the evaluation of the model on test/validation dataset
Returns
-------
test accuracy.
"""
model_GAT.eval()
labels = []
preds = []
for batch_idx, data in enumerate(dataset):
adj = Variable(data['adj'].float(), requires_grad=False).to(device)
labels.append(data['label'].long().numpy())
adj = torch.squeeze(adj)
features = np.identity(adj.shape[0])
features = Variable(torch.from_numpy(features).float(), requires_grad=False).cpu()
if args.threshold in ["median", "mean"]:
adj = torch.where(adj > threshold_value, torch.tensor([1.0]), torch.tensor([0.0]))
ypred = model_GAT(features, adj)
_, indices = torch.max(ypred, 1)
preds.append(indices.cpu().data.numpy())
labels = np.hstack(labels)
preds = np.hstack(preds)
simple_r = {'labels':labels,'preds':preds}
with open("./gat/Labels_and_preds/"+model_name+".pickle", 'wb') as f:
pickle.dump(simple_r, f)
result = {'prec': metrics.precision_score(labels, preds, average='macro'),
'recall': metrics.recall_score(labels, preds, average='macro'),
'acc': metrics.accuracy_score(labels, preds),
'F1': metrics.f1_score(labels, preds, average="micro")}
if args.evaluation_method == 'model assessment':
name = 'Test'
if args.evaluation_method == 'model selection':
name = 'Validation'
print(name, " accuracy:", result['acc'])
return result['acc']
def minmax_sc(x):
min_max_scaler = preprocessing.MinMaxScaler()
x = min_max_scaler.fit_transform(x)
return x
def train(args, train_dataset, val_dataset, model_GAT, threshold_value, model_name):
"""
Parameters
----------
args : arguments
train_dataset : dataloader (dataloader for the validation/test dataset).
val_dataset : dataloader (dataloader for the validation/test dataset).
model_GAT : nn model (GAT model).
threshold_value : float (threshold for adjacency matrices).
Description
----------
This methods performs the training of the model on train dataset and calls evaluate() method for evaluation.
Returns
-------
test accuracy.
"""
params = list(model_GAT.parameters())
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
test_accs = []
train_loss=[]
val_acc=[]
for epoch in range(args.num_epochs):
print("Epoch ",epoch)
print("Size of Training Set:" + str(len(train_dataset)))
print("Size of Validation Set:" + str(len(val_dataset)))
model_GAT.train()
total_time = 0
avg_loss = 0.0
preds = []
labels = []
for batch_idx, data in enumerate(train_dataset):
begin_time = time.time()
adj = Variable(data['adj'].float(), requires_grad=False).to(device)
label = Variable(data['label'].long()).to(device)
#adj_id = Variable(data['id'].int()).to(device)
adj = torch.squeeze(adj)
features = np.identity(adj.shape[0])
features = Variable(torch.from_numpy(features).float(), requires_grad=False).cpu()
if args.threshold in ["median", "mean"]:
adj = torch.where(adj > threshold_value, torch.tensor([1.0]), torch.tensor([0.0]))
ypred = model_GAT(features, adj)
_, indices = torch.max(ypred, 1)
preds.append(indices.cpu().data.numpy())
labels.append(data['label'].long().numpy())
loss = model_GAT.loss(ypred, label)
model_GAT.zero_grad()
loss.backward()
#nn.utils.clip_grad_norm_(model_DIFFPOOL.parameters(), args.clip)
optimizer.step()
avg_loss += loss
elapsed = time.time() - begin_time
total_time += elapsed
if epoch == args.num_epochs-1:
model_GAT.is_trained = True
preds = np.hstack(preds)
labels = np.hstack(labels)
print("Train accuracy : ", np.mean( preds == labels ))
test_acc = evaluate(val_dataset, model_GAT, args, threshold_value, model_name)
print('Avg loss: ', avg_loss, '; epoch time: ', total_time)
test_accs.append(test_acc)
train_loss.append(avg_loss)
val_acc.append(test_acc)
path = './gat/weights/W_'+model_name+'.pickle'
if os.path.exists(path):
os.remove(path)
os.rename('GAT_W.pickle',path)
los_p = {'loss':train_loss}
with open("./gat/training_loss/Training_loss_"+model_name+".pickle", 'wb') as f:
pickle.dump(los_p, f)
torch.save(model_GAT,"./gat/models/GAT_"+model_name+".pt")
return test_acc
def load_data(args):
"""
Parameters
----------
args : arguments
Description
----------
This methods loads the adjacency matrices representing the args.view -th view in dataset
Returns
-------
List of dictionaries{adj, label, id}
"""
#Load graphs and labels
with open('data/'+args.dataset+'/'+args.dataset+'_edges','rb') as f:
multigraphs = pickle.load(f)
with open('data/'+args.dataset+'/'+args.dataset+'_labels','rb') as f:
labels = pickle.load(f)
adjacencies = [multigraphs[i][:,:,args.view] for i in range(len(multigraphs))]
#Normalize inputs
if args.NormalizeInputGraphs==True:
for subject in range(len(adjacencies)):
adjacencies[subject] = minmax_sc(adjacencies[subject])
#Create List of Dictionaries
G_list=[]
for i in range(len(labels)):
G_element = {"adj": adjacencies[i],"label": labels[i],"id": i,}
G_list.append(G_element)
return G_list
def arg_parse(dataset, view, num_shots=2, cv_number=5):
"""
arguments definition method
"""
parser = argparse.ArgumentParser(description='Graph Classification')
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument('--v', type=str, default=1)
parser.add_argument('--data', type=str, default='Sample_dataset', choices = [ f.path[5:] for f in os.scandir("data") if f.is_dir() ])
parser.add_argument('--dataset', type=str, default=dataset,
help='Dataset')
parser.add_argument('--view', type=int, default=view,
help = 'view index in the dataset')
parser.add_argument('--num_epochs', type=int, default=1, #50
help='Training Epochs')
parser.add_argument('--num_shots', type=int, default=num_shots, #100
help='number of shots')
parser.add_argument('--cv_number', type=int, default=cv_number,
help='number of validation folds.')
parser.add_argument('--NormalizeInputGraphs', default=False, action='store_true',
help='Normalize Input adjacency matrices of graphs')
parser.add_argument('--evaluation_method', type=str, default='model assessment',
help='evaluation method, possible values : model selection, model assessment')
parser.add_argument('--threshold', dest='threshold', default='mean',
help='threshold the graph adjacency matrix. Possible values: no_threshold, median, mean')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--num-classes', dest='num_classes', type=int, default=2,
help='Number of label classes')
parser.add_argument('--lr', type=float, default=0.001,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=8,
help='Number of hidden units.')
parser.add_argument('--nb_heads', type=int, default=8,
help='Number of head attentions.')
parser.add_argument('--dropout', type=float, default=0.8,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--alpha', type=float, default=0.2,
help='Alpha for the leaky_relu.')
return parser.parse_args()
def benchmark_task(args, model_name):
"""
Parameters
----------
args : Arguments
Description
----------
Initiates the model and performs train/test or train/validation splits and calls train() to execute training and evaluation.
Returns
-------
test_accs : test accuracies (list)
"""
G_list = load_data(args)
num_nodes = G_list[0]['adj'].shape[0]
test_accs = []
folds = cross_val.stratify_splits(G_list,args)
[random.shuffle(folds[i]) for i in range(len(folds))]
for i in range(args.cv_number):
train_set, validation_set, test_set = cross_val.datasets_splits(folds, args, i)
if args.evaluation_method =='model selection':
train_dataset, val_dataset, threshold_value = cross_val.model_selection_split(train_set, validation_set, args)
if args.evaluation_method =='model assessment':
train_dataset, val_dataset, threshold_value = cross_val.model_assessment_split(train_set, validation_set, test_set, args)
print("CV : ",i)
model_GAT = GAT(nfeat=num_nodes,
nhid=args.hidden,
nclass=args.num_classes,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
test_acc = train(args, train_dataset, val_dataset, model_GAT, threshold_value, model_name+"_CV_"+str(i)+"_view_"+str(args.view))
test_accs.append(test_acc)
return test_accs
def test_scores(dataset, view, model_name, cv_number):
args = arg_parse(dataset, view, cv_number=cv_number)
print("Main : ",args)
test_accs = benchmark_task(args, model_name)
print("test accuracies ",test_accs)
return test_accs
def two_shot_trainer(dataset, view, num_shots):
args = arg_parse(dataset, view, num_shots=num_shots)
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
start = time.time()
for i in range(args.num_shots):
model = "gat"
model_name = "Few_Shot_"+dataset+"_"+model + str(i)
print("Shot : ",i)
with open('./Two_shot_samples_views/'+dataset+'_view_'+str(view)+'_shot_'+str(i)+'_train','rb') as f:
train_set = pickle.load(f)
with open('./Two_shot_samples_views/'+dataset+'_view_'+str(view)+'_shot_'+str(i)+'_test','rb') as f:
test_set = pickle.load(f)
num_nodes = train_set[0]['adj'].shape[0]
model_GAT = GAT(nfeat=num_nodes,
nhid=args.hidden,
nclass=args.num_classes,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
train_dataset, val_dataset, threshold_value = cross_val.two_shot_loader(train_set, test_set, args)
test_acc = train(args, train_dataset, val_dataset, model_GAT, threshold_value, model_name+"_view_"+str(view))
print("Test accuracy:"+str(test_acc))
print('load data using ------>', time.time()-start)
|
[
"numpy.hstack",
"torch.max",
"cross_val.stratify_splits",
"torch.from_numpy",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"torch.cuda.is_available",
"torch.squeeze",
"cross_val.model_selection_split",
"os.remove",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"cross_val.datasets_splits",
"numpy.random.seed",
"sklearn.preprocessing.MinMaxScaler",
"numpy.identity",
"cross_val.two_shot_loader",
"random.shuffle",
"os.rename",
"os.scandir",
"pickle.load",
"torch.save",
"time.time",
"sklearn.metrics.accuracy_score",
"torch.manual_seed",
"torch.optim.Adam",
"sklearn.metrics.f1_score",
"pickle.dump",
"cross_val.model_assessment_split",
"random.seed",
"torch.tensor",
"models_gat.GAT"
] |
[((273, 293), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (290, 293), False, 'import torch\n'), ((294, 311), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (308, 311), True, 'import numpy as np\n'), ((312, 326), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (323, 326), False, 'import random\n'), ((1597, 1614), 'numpy.hstack', 'np.hstack', (['labels'], {}), '(labels)\n', (1606, 1614), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.hstack', 'np.hstack', (['preds'], {}), '(preds)\n', (1636, 1643), True, 'import numpy as np\n'), ((2350, 2378), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (2376, 2378), False, 'from sklearn import preprocessing\n'), ((3095, 3163), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay'}), '(params, lr=args.lr, weight_decay=args.weight_decay)\n', (3111, 3163), False, 'import torch\n'), ((5362, 5382), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5376, 5382), False, 'import os\n'), ((5421, 5452), 'os.rename', 'os.rename', (['"""GAT_W.pickle"""', 'path'], {}), "('GAT_W.pickle', path)\n", (5430, 5452), False, 'import os\n'), ((5604, 5667), 'torch.save', 'torch.save', (['model_GAT', "('./gat/models/GAT_' + model_name + '.pt')"], {}), "(model_GAT, './gat/models/GAT_' + model_name + '.pt')\n", (5614, 5667), False, 'import torch\n'), ((6808, 6867), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Graph Classification"""'}), "(description='Graph Classification')\n", (6831, 6867), False, 'import argparse\n'), ((9774, 9813), 'cross_val.stratify_splits', 'cross_val.stratify_splits', (['G_list', 'args'], {}), '(G_list, args)\n', (9799, 9813), False, 'import cross_val\n'), ((11209, 11229), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (11226, 11229), False, 'import torch\n'), ((11234, 11251), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (11248, 11251), True, 'import numpy as np\n'), ((11256, 11270), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (11267, 11270), False, 'import random\n'), ((11285, 11296), 'time.time', 'time.time', ([], {}), '()\n', (11294, 11296), False, 'import time\n'), ((362, 387), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (385, 387), False, 'import torch\n'), ((1125, 1143), 'torch.squeeze', 'torch.squeeze', (['adj'], {}), '(adj)\n', (1138, 1143), False, 'import torch\n'), ((1172, 1197), 'numpy.identity', 'np.identity', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (1183, 1197), True, 'import numpy as np\n'), ((1514, 1533), 'torch.max', 'torch.max', (['ypred', '(1)'], {}), '(ypred, 1)\n', (1523, 1533), False, 'import torch\n'), ((1772, 1796), 'pickle.dump', 'pickle.dump', (['simple_r', 'f'], {}), '(simple_r, f)\n', (1783, 1796), False, 'import pickle\n'), ((1820, 1875), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['labels', 'preds'], {'average': '"""macro"""'}), "(labels, preds, average='macro')\n", (1843, 1875), True, 'import sklearn.metrics as metrics\n'), ((1901, 1953), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['labels', 'preds'], {'average': '"""macro"""'}), "(labels, preds, average='macro')\n", (1921, 1953), True, 'import sklearn.metrics as metrics\n'), ((1976, 2013), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['labels', 'preds'], {}), '(labels, preds)\n', (1998, 2013), True, 'import sklearn.metrics as metrics\n'), ((2035, 2083), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['labels', 'preds'], {'average': '"""micro"""'}), "(labels, preds, average='micro')\n", (2051, 2083), True, 'import sklearn.metrics as metrics\n'), ((4915, 4931), 'numpy.hstack', 'np.hstack', (['preds'], {}), '(preds)\n', (4924, 4931), True, 'import numpy as np\n'), ((4949, 4966), 'numpy.hstack', 'np.hstack', (['labels'], {}), '(labels)\n', (4958, 4966), True, 'import numpy as np\n'), ((5392, 5407), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (5401, 5407), False, 'import os\n'), ((5578, 5599), 'pickle.dump', 'pickle.dump', (['los_p', 'f'], {}), '(los_p, f)\n', (5589, 5599), False, 'import pickle\n'), ((6090, 6104), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6101, 6104), False, 'import pickle\n'), ((6204, 6218), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6215, 6218), False, 'import pickle\n'), ((9818, 9842), 'random.shuffle', 'random.shuffle', (['folds[i]'], {}), '(folds[i])\n', (9832, 9842), False, 'import random\n'), ((9953, 9994), 'cross_val.datasets_splits', 'cross_val.datasets_splits', (['folds', 'args', 'i'], {}), '(folds, args, i)\n', (9978, 9994), False, 'import cross_val\n'), ((10427, 10557), 'models_gat.GAT', 'GAT', ([], {'nfeat': 'num_nodes', 'nhid': 'args.hidden', 'nclass': 'args.num_classes', 'dropout': 'args.dropout', 'nheads': 'args.nb_heads', 'alpha': 'args.alpha'}), '(nfeat=num_nodes, nhid=args.hidden, nclass=args.num_classes, dropout=\n args.dropout, nheads=args.nb_heads, alpha=args.alpha)\n', (10430, 10557), False, 'from models_gat import GAT\n'), ((11830, 11960), 'models_gat.GAT', 'GAT', ([], {'nfeat': 'num_nodes', 'nhid': 'args.hidden', 'nclass': 'args.num_classes', 'dropout': 'args.dropout', 'nheads': 'args.nb_heads', 'alpha': 'args.alpha'}), '(nfeat=num_nodes, nhid=args.hidden, nclass=args.num_classes, dropout=\n args.dropout, nheads=args.nb_heads, alpha=args.alpha)\n', (11833, 11960), False, 'from models_gat import GAT\n'), ((12104, 12156), 'cross_val.two_shot_loader', 'cross_val.two_shot_loader', (['train_set', 'test_set', 'args'], {}), '(train_set, test_set, args)\n', (12129, 12156), False, 'import cross_val\n'), ((3637, 3648), 'time.time', 'time.time', ([], {}), '()\n', (3646, 3648), False, 'import time\n'), ((3895, 3913), 'torch.squeeze', 'torch.squeeze', (['adj'], {}), '(adj)\n', (3908, 3913), False, 'import torch\n'), ((3938, 3963), 'numpy.identity', 'np.identity', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (3949, 3963), True, 'import numpy as np\n'), ((4333, 4352), 'torch.max', 'torch.max', (['ypred', '(1)'], {}), '(ypred, 1)\n', (4342, 4352), False, 'import torch\n'), ((5002, 5026), 'numpy.mean', 'np.mean', (['(preds == labels)'], {}), '(preds == labels)\n', (5009, 5026), True, 'import numpy as np\n'), ((10117, 10181), 'cross_val.model_selection_split', 'cross_val.model_selection_split', (['train_set', 'validation_set', 'args'], {}), '(train_set, validation_set, args)\n', (10148, 10181), False, 'import cross_val\n'), ((10305, 10380), 'cross_val.model_assessment_split', 'cross_val.model_assessment_split', (['train_set', 'validation_set', 'test_set', 'args'], {}), '(train_set, validation_set, test_set, args)\n', (10337, 10380), False, 'import cross_val\n'), ((11581, 11595), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11592, 11595), False, 'import pickle\n'), ((11728, 11742), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11739, 11742), False, 'import pickle\n'), ((1400, 1419), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (1412, 1419), False, 'import torch\n'), ((1421, 1440), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (1433, 1440), False, 'import torch\n'), ((4743, 4754), 'time.time', 'time.time', ([], {}), '()\n', (4752, 4754), False, 'import time\n'), ((12380, 12391), 'time.time', 'time.time', ([], {}), '()\n', (12389, 12391), False, 'import time\n'), ((4182, 4201), 'torch.tensor', 'torch.tensor', (['[1.0]'], {}), '([1.0])\n', (4194, 4201), False, 'import torch\n'), ((4203, 4222), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (4215, 4222), False, 'import torch\n'), ((7125, 7143), 'os.scandir', 'os.scandir', (['"""data"""'], {}), "('data')\n", (7135, 7143), False, 'import os\n'), ((1235, 1261), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (1251, 1261), False, 'import torch\n'), ((3996, 4022), 'torch.from_numpy', 'torch.from_numpy', (['features'], {}), '(features)\n', (4012, 4022), False, 'import torch\n')]
|
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import sys
import numpy as np
import logging
import time
import types
from datetime import datetime
from netCDF4 import Dataset
from nexustiles.nexustiles import NexusTileService
from webservice.webmodel import NexusProcessingException
AVAILABLE_HANDLERS = []
AVAILABLE_INITIALIZERS = []
def nexus_initializer(clazz):
log = logging.getLogger(__name__)
try:
wrapper = NexusInitializerWrapper(clazz)
log.info("Adding initializer '%s'" % wrapper.clazz())
AVAILABLE_INITIALIZERS.append(wrapper)
except Exception as ex:
log.warn("Initializer '%s' failed to load (reason: %s)" % (clazz, ex.message), exc_info=True)
return clazz
def nexus_handler(clazz):
log = logging.getLogger(__name__)
try:
wrapper = AlgorithmModuleWrapper(clazz)
log.info("Adding algorithm module '%s' with path '%s' (%s)" % (wrapper.name(), wrapper.path(), wrapper.clazz()))
AVAILABLE_HANDLERS.append(wrapper)
except Exception as ex:
log.warn("Handler '%s' is invalid and will be skipped (reason: %s)" % (clazz, ex.message), exc_info=True)
return clazz
DEFAULT_PARAMETERS_SPEC = {
"ds": {
"name": "Dataset",
"type": "string",
"description": "One or more comma-separated dataset shortnames"
},
"minLat": {
"name": "Minimum Latitude",
"type": "float",
"description": "Minimum (Southern) bounding box Latitude"
},
"maxLat": {
"name": "Maximum Latitude",
"type": "float",
"description": "Maximum (Northern) bounding box Latitude"
},
"minLon": {
"name": "Minimum Longitude",
"type": "float",
"description": "Minimum (Western) bounding box Longitude"
},
"maxLon": {
"name": "Maximum Longitude",
"type": "float",
"description": "Maximum (Eastern) bounding box Longitude"
},
"startTime": {
"name": "Start Time",
"type": "long integer",
"description": "Starting time in milliseconds since midnight Jan. 1st, 1970 UTC"
},
"endTime": {
"name": "End Time",
"type": "long integer",
"description": "Ending time in milliseconds since midnight Jan. 1st, 1970 UTC"
},
"lowPassFilter": {
"name": "Apply Low Pass Filter",
"type": "boolean",
"description": "Specifies whether to apply a low pass filter on the analytics results"
},
"seasonalFilter": {
"name": "Apply Seasonal Filter",
"type": "boolean",
"description": "Specified whether to apply a seasonal cycle filter on the analytics results"
}
}
class NexusInitializerWrapper:
def __init__(self, clazz):
self.__log = logging.getLogger(__name__)
self.__hasBeenRun = False
self.__clazz = clazz
self.validate()
def validate(self):
if "init" not in self.__clazz.__dict__ or not type(self.__clazz.__dict__["init"]) == types.FunctionType:
raise Exception("Method 'init' has not been declared")
def clazz(self):
return self.__clazz
def hasBeenRun(self):
return self.__hasBeenRun
def init(self, config):
if not self.__hasBeenRun:
self.__hasBeenRun = True
instance = self.__clazz()
instance.init(config)
else:
self.log("Initializer '%s' has already been run" % self.__clazz)
class AlgorithmModuleWrapper:
def __init__(self, clazz):
self.__instance = None
self.__clazz = clazz
self.validate()
def validate(self):
if "calc" not in self.__clazz.__dict__ or not type(self.__clazz.__dict__["calc"]) == types.FunctionType:
raise Exception("Method 'calc' has not been declared")
if "path" not in self.__clazz.__dict__:
raise Exception("Property 'path' has not been defined")
if "name" not in self.__clazz.__dict__:
raise Exception("Property 'name' has not been defined")
if "description" not in self.__clazz.__dict__:
raise Exception("Property 'description' has not been defined")
if "params" not in self.__clazz.__dict__:
raise Exception("Property 'params' has not been defined")
def clazz(self):
return self.__clazz
def name(self):
return self.__clazz.name
def path(self):
return self.__clazz.path
def description(self):
return self.__clazz.description
def params(self):
return self.__clazz.params
def instance(self, algorithm_config=None, sc=None):
if "singleton" in self.__clazz.__dict__ and self.__clazz.__dict__["singleton"] is True:
if self.__instance is None:
self.__instance = self.__clazz()
try:
self.__instance.set_config(algorithm_config)
except AttributeError:
pass
try:
self.__instance.set_spark_context(sc)
except AttributeError:
pass
return self.__instance
else:
instance = self.__clazz()
try:
instance.set_config(algorithm_config)
except AttributeError:
pass
try:
self.__instance.set_spark_context(sc)
except AttributeError:
pass
return instance
def isValid(self):
try:
self.validate()
return True
except Exception as ex:
return False
class CalcHandler(object):
def calc(self, computeOptions, **args):
raise Exception("calc() not yet implemented")
class NexusHandler(CalcHandler):
def __init__(self, skipCassandra=False, skipSolr=False):
CalcHandler.__init__(self)
self.algorithm_config = None
self._tile_service = NexusTileService(skipCassandra, skipSolr)
def set_config(self, algorithm_config):
self.algorithm_config = algorithm_config
def _mergeDicts(self, x, y):
z = x.copy()
z.update(y)
return z
def _now(self):
millis = int(round(time.time() * 1000))
return millis
def _mergeDataSeries(self, resultsData, dataNum, resultsMap):
for entry in resultsData:
#frmtdTime = datetime.fromtimestamp(entry["time"] ).strftime("%Y-%m")
frmtdTime = entry["time"]
if not frmtdTime in resultsMap:
resultsMap[frmtdTime] = []
entry["ds"] = dataNum
resultsMap[frmtdTime].append(entry)
def _resultsMapToList(self, resultsMap):
resultsList = []
for key, value in resultsMap.iteritems():
resultsList.append(value)
resultsList = sorted(resultsList, key=lambda entry: entry[0]["time"])
return resultsList
def _mergeResults(self, resultsRaw):
resultsMap = {}
for i in range(0, len(resultsRaw)):
resultsSeries = resultsRaw[i]
resultsData = resultsSeries[0]
self._mergeDataSeries(resultsData, i, resultsMap)
resultsList = self._resultsMapToList(resultsMap)
return resultsList
class SparkHandler(NexusHandler):
class SparkJobContext(object):
class MaxConcurrentJobsReached(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def __init__(self, job_stack):
self.spark_job_stack = job_stack
self.job_name = None
self.log = logging.getLogger(__name__)
def __enter__(self):
try:
self.job_name = self.spark_job_stack.pop()
self.log.debug("Using %s" % self.job_name)
except IndexError:
raise SparkHandler.SparkJobContext.MaxConcurrentJobsReached()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.job_name is not None:
self.log.debug("Returning %s" % self.job_name)
self.spark_job_stack.append(self.job_name)
def __init__(self, **kwargs):
import inspect
NexusHandler.__init__(self, **kwargs)
self._sc = None
self.spark_job_stack = []
def with_spark_job_context(calc_func):
from functools import wraps
@wraps(calc_func)
def wrapped(*args, **kwargs1):
try:
with SparkHandler.SparkJobContext(self.spark_job_stack) as job_context:
# TODO Pool and Job are forced to a 1-to-1 relationship
calc_func.im_self._sc.setLocalProperty("spark.scheduler.pool", job_context.job_name)
calc_func.im_self._sc.setJobGroup(job_context.job_name, "a spark job")
return calc_func(*args, **kwargs1)
except SparkHandler.SparkJobContext.MaxConcurrentJobsReached:
raise NexusProcessingException(code=503,
reason="Max concurrent requests reached. Please try again later.")
return wrapped
for member in inspect.getmembers(self, predicate=inspect.ismethod):
if member[0] == "calc":
setattr(self, member[0], with_spark_job_context(member[1]))
def set_spark_context(self, sc):
self._sc = sc
def set_config(self, algorithm_config):
max_concurrent_jobs = algorithm_config.getint("spark", "maxconcurrentjobs") if algorithm_config.has_section(
"spark") and algorithm_config.has_option("spark", "maxconcurrentjobs") else 10
self.spark_job_stack = list(["Job %s" % x for x in xrange(1, max_concurrent_jobs + 1)])
self.algorithm_config = algorithm_config
def _setQueryParams(self, ds, bounds, start_time=None, end_time=None,
start_year=None, end_year=None, clim_month=None,
fill=-9999., spark_master=None, spark_nexecs=None,
spark_nparts=None):
self._ds = ds
self._minLat, self._maxLat, self._minLon, self._maxLon = bounds
self._startTime = start_time
self._endTime = end_time
self._startYear = start_year
self._endYear = end_year
self._climMonth = clim_month
self._fill = fill
self._spark_master = spark_master
self._spark_nexecs = spark_nexecs
self._spark_nparts = spark_nparts
def _find_global_tile_set(self):
if type(self._ds) in (list,tuple):
ds = self._ds[0]
else:
ds = self._ds
ntiles = 0
##################################################################
# Temporary workaround until we have dataset metadata to indicate
# temporal resolution.
if "monthly" in ds.lower():
t_incr = 2592000 # 30 days
else:
t_incr = 86400 # 1 day
##################################################################
t = self._endTime
self._latRes = None
self._lonRes = None
while ntiles == 0:
nexus_tiles = self._tile_service.get_tiles_bounded_by_box(self._minLat, self._maxLat, self._minLon, self._maxLon, ds=ds, start_time=t-t_incr, end_time=t)
ntiles = len(nexus_tiles)
self.log.debug('find_global_tile_set got {0} tiles'.format(ntiles))
if ntiles > 0:
for tile in nexus_tiles:
self.log.debug('tile coords:')
self.log.debug('tile lats: {0}'.format(tile.latitudes))
self.log.debug('tile lons: {0}'.format(tile.longitudes))
if self._latRes is None:
lats = tile.latitudes.data
if (len(lats) > 1):
self._latRes = abs(lats[1]-lats[0])
if self._lonRes is None:
lons = tile.longitudes.data
if (len(lons) > 1):
self._lonRes = abs(lons[1]-lons[0])
if ((self._latRes is not None) and
(self._lonRes is not None)):
break
if (self._latRes is None) or (self._lonRes is None):
ntiles = 0
else:
lats_agg = np.concatenate([tile.latitudes.compressed()
for tile in nexus_tiles])
lons_agg = np.concatenate([tile.longitudes.compressed()
for tile in nexus_tiles])
self._minLatCent = np.min(lats_agg)
self._maxLatCent = np.max(lats_agg)
self._minLonCent = np.min(lons_agg)
self._maxLonCent = np.max(lons_agg)
t -= t_incr
return nexus_tiles
def _find_tile_bounds(self, t):
lats = t.latitudes
lons = t.longitudes
if (len(lats.compressed()) > 0) and (len(lons.compressed()) > 0):
min_lat = np.ma.min(lats)
max_lat = np.ma.max(lats)
min_lon = np.ma.min(lons)
max_lon = np.ma.max(lons)
good_inds_lat = np.where(lats.mask == False)[0]
good_inds_lon = np.where(lons.mask == False)[0]
min_y = np.min(good_inds_lat)
max_y = np.max(good_inds_lat)
min_x = np.min(good_inds_lon)
max_x = np.max(good_inds_lon)
bounds = (min_lat, max_lat, min_lon, max_lon,
min_y, max_y, min_x, max_x)
else:
self.log.warn('Nothing in this tile!')
bounds = None
return bounds
@staticmethod
def query_by_parts(tile_service, min_lat, max_lat, min_lon, max_lon,
dataset, start_time, end_time, part_dim=0):
nexus_max_tiles_per_query = 100
#print 'trying query: ',min_lat, max_lat, min_lon, max_lon, \
# dataset, start_time, end_time
try:
tiles = \
tile_service.find_tiles_in_box(min_lat, max_lat,
min_lon, max_lon,
dataset,
start_time=start_time,
end_time=end_time,
fetch_data=False)
assert(len(tiles) <= nexus_max_tiles_per_query)
except:
#print 'failed query: ',min_lat, max_lat, min_lon, max_lon, \
# dataset, start_time, end_time
if part_dim == 0:
# Partition by latitude.
mid_lat = (min_lat + max_lat) / 2
nexus_tiles = SparkHandler.query_by_parts(tile_service,
min_lat, mid_lat,
min_lon, max_lon,
dataset,
start_time, end_time,
part_dim=part_dim)
nexus_tiles.extend(SparkHandler.query_by_parts(tile_service,
mid_lat,
max_lat,
min_lon,
max_lon,
dataset,
start_time,
end_time,
part_dim=part_dim))
elif part_dim == 1:
# Partition by longitude.
mid_lon = (min_lon + max_lon) / 2
nexus_tiles = SparkHandler.query_by_parts(tile_service,
min_lat, max_lat,
min_lon, mid_lon,
dataset,
start_time, end_time,
part_dim=part_dim)
nexus_tiles.extend(SparkHandler.query_by_parts(tile_service,
min_lat,
max_lat,
mid_lon,
max_lon,
dataset,
start_time,
end_time,
part_dim=part_dim))
elif part_dim == 2:
# Partition by time.
mid_time = (start_time + end_time) / 2
nexus_tiles = SparkHandler.query_by_parts(tile_service,
min_lat, max_lat,
min_lon, max_lon,
dataset,
start_time, mid_time,
part_dim=part_dim)
nexus_tiles.extend(SparkHandler.query_by_parts(tile_service,
min_lat,
max_lat,
min_lon,
max_lon,
dataset,
mid_time,
end_time,
part_dim=part_dim))
else:
# No exception, so query Cassandra for the tile data.
#print 'Making NEXUS query to Cassandra for %d tiles...' % \
# len(tiles)
#t1 = time.time()
#print 'NEXUS call start at time %f' % t1
#sys.stdout.flush()
nexus_tiles = list(tile_service.fetch_data_for_tiles(*tiles))
nexus_tiles = list(tile_service.mask_tiles_to_bbox(min_lat, max_lat,
min_lon, max_lon,
nexus_tiles))
#t2 = time.time()
#print 'NEXUS call end at time %f' % t2
#print 'Seconds in NEXUS call: ', t2-t1
#sys.stdout.flush()
#print 'Returning %d tiles' % len(nexus_tiles)
return nexus_tiles
@staticmethod
def _prune_tiles(nexus_tiles):
del_ind = np.where([np.all(tile.data.mask) for tile in nexus_tiles])[0]
for i in np.flipud(del_ind):
del nexus_tiles[i]
def _lat2ind(self,lat):
return int((lat-self._minLatCent)/self._latRes)
def _lon2ind(self,lon):
return int((lon-self._minLonCent)/self._lonRes)
def _ind2lat(self,y):
return self._minLatCent+y*self._latRes
def _ind2lon(self,x):
return self._minLonCent+x*self._lonRes
def _create_nc_file_time1d(self, a, fname, varname, varunits=None,
fill=None):
self.log.debug('a={0}'.format(a))
self.log.debug('shape a = {0}'.format(a.shape))
assert len(a.shape) == 1
time_dim = len(a)
rootgrp = Dataset(fname, "w", format="NETCDF4")
rootgrp.createDimension("time", time_dim)
vals = rootgrp.createVariable(varname, "f4", dimensions=("time",),
fill_value=fill)
times = rootgrp.createVariable("time", "f4", dimensions=("time",))
vals[:] = [d['mean'] for d in a]
times[:] = [d['time'] for d in a]
if varunits is not None:
vals.units = varunits
times.units = 'seconds since 1970-01-01 00:00:00'
rootgrp.close()
def _create_nc_file_latlon2d(self, a, fname, varname, varunits=None,
fill=None):
self.log.debug('a={0}'.format(a))
self.log.debug('shape a = {0}'.format(a.shape))
assert len(a.shape) == 2
lat_dim, lon_dim = a.shape
rootgrp = Dataset(fname, "w", format="NETCDF4")
rootgrp.createDimension("lat", lat_dim)
rootgrp.createDimension("lon", lon_dim)
vals = rootgrp.createVariable(varname, "f4",
dimensions=("lat","lon",),
fill_value=fill)
lats = rootgrp.createVariable("lat", "f4", dimensions=("lat",))
lons = rootgrp.createVariable("lon", "f4", dimensions=("lon",))
vals[:,:] = a
lats[:] = np.linspace(self._minLatCent,
self._maxLatCent, lat_dim)
lons[:] = np.linspace(self._minLonCent,
self._maxLonCent, lon_dim)
if varunits is not None:
vals.units = varunits
lats.units = "degrees north"
lons.units = "degrees east"
rootgrp.close()
def _create_nc_file(self, a, fname, varname, **kwargs):
self._create_nc_file_latlon2d(a, fname, varname, **kwargs)
def executeInitializers(config):
[wrapper.init(config) for wrapper in AVAILABLE_INITIALIZERS]
|
[
"logging.getLogger",
"numpy.ma.max",
"inspect.getmembers",
"numpy.flipud",
"numpy.where",
"webservice.webmodel.NexusProcessingException",
"netCDF4.Dataset",
"time.time",
"functools.wraps",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.all",
"numpy.ma.min",
"nexustiles.nexustiles.NexusTileService"
] |
[((442, 469), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (459, 469), False, 'import logging\n'), ((822, 849), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (839, 849), False, 'import logging\n'), ((2833, 2860), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2850, 2860), False, 'import logging\n'), ((6028, 6069), 'nexustiles.nexustiles.NexusTileService', 'NexusTileService', (['skipCassandra', 'skipSolr'], {}), '(skipCassandra, skipSolr)\n', (6044, 6069), False, 'from nexustiles.nexustiles import NexusTileService\n'), ((9354, 9406), 'inspect.getmembers', 'inspect.getmembers', (['self'], {'predicate': 'inspect.ismethod'}), '(self, predicate=inspect.ismethod)\n', (9372, 9406), False, 'import inspect\n'), ((19654, 19672), 'numpy.flipud', 'np.flipud', (['del_ind'], {}), '(del_ind)\n', (19663, 19672), True, 'import numpy as np\n'), ((20313, 20350), 'netCDF4.Dataset', 'Dataset', (['fname', '"""w"""'], {'format': '"""NETCDF4"""'}), "(fname, 'w', format='NETCDF4')\n", (20320, 20350), False, 'from netCDF4 import Dataset\n'), ((21141, 21178), 'netCDF4.Dataset', 'Dataset', (['fname', '"""w"""'], {'format': '"""NETCDF4"""'}), "(fname, 'w', format='NETCDF4')\n", (21148, 21178), False, 'from netCDF4 import Dataset\n'), ((21632, 21688), 'numpy.linspace', 'np.linspace', (['self._minLatCent', 'self._maxLatCent', 'lat_dim'], {}), '(self._minLatCent, self._maxLatCent, lat_dim)\n', (21643, 21688), True, 'import numpy as np\n'), ((21738, 21794), 'numpy.linspace', 'np.linspace', (['self._minLonCent', 'self._maxLonCent', 'lon_dim'], {}), '(self._minLonCent, self._maxLonCent, lon_dim)\n', (21749, 21794), True, 'import numpy as np\n'), ((7719, 7746), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7736, 7746), False, 'import logging\n'), ((8530, 8546), 'functools.wraps', 'wraps', (['calc_func'], {}), '(calc_func)\n', (8535, 8546), False, 'from functools import wraps\n'), ((13311, 13326), 'numpy.ma.min', 'np.ma.min', (['lats'], {}), '(lats)\n', (13320, 13326), True, 'import numpy as np\n'), ((13349, 13364), 'numpy.ma.max', 'np.ma.max', (['lats'], {}), '(lats)\n', (13358, 13364), True, 'import numpy as np\n'), ((13387, 13402), 'numpy.ma.min', 'np.ma.min', (['lons'], {}), '(lons)\n', (13396, 13402), True, 'import numpy as np\n'), ((13425, 13440), 'numpy.ma.max', 'np.ma.max', (['lons'], {}), '(lons)\n', (13434, 13440), True, 'import numpy as np\n'), ((13581, 13602), 'numpy.min', 'np.min', (['good_inds_lat'], {}), '(good_inds_lat)\n', (13587, 13602), True, 'import numpy as np\n'), ((13623, 13644), 'numpy.max', 'np.max', (['good_inds_lat'], {}), '(good_inds_lat)\n', (13629, 13644), True, 'import numpy as np\n'), ((13665, 13686), 'numpy.min', 'np.min', (['good_inds_lon'], {}), '(good_inds_lon)\n', (13671, 13686), True, 'import numpy as np\n'), ((13707, 13728), 'numpy.max', 'np.max', (['good_inds_lon'], {}), '(good_inds_lon)\n', (13713, 13728), True, 'import numpy as np\n'), ((13469, 13497), 'numpy.where', 'np.where', (['(lats.mask == False)'], {}), '(lats.mask == False)\n', (13477, 13497), True, 'import numpy as np\n'), ((13529, 13557), 'numpy.where', 'np.where', (['(lons.mask == False)'], {}), '(lons.mask == False)\n', (13537, 13557), True, 'import numpy as np\n'), ((6304, 6315), 'time.time', 'time.time', ([], {}), '()\n', (6313, 6315), False, 'import time\n'), ((12887, 12903), 'numpy.min', 'np.min', (['lats_agg'], {}), '(lats_agg)\n', (12893, 12903), True, 'import numpy as np\n'), ((12943, 12959), 'numpy.max', 'np.max', (['lats_agg'], {}), '(lats_agg)\n', (12949, 12959), True, 'import numpy as np\n'), ((12999, 13015), 'numpy.min', 'np.min', (['lons_agg'], {}), '(lons_agg)\n', (13005, 13015), True, 'import numpy as np\n'), ((13055, 13071), 'numpy.max', 'np.max', (['lons_agg'], {}), '(lons_agg)\n', (13061, 13071), True, 'import numpy as np\n'), ((19585, 19607), 'numpy.all', 'np.all', (['tile.data.mask'], {}), '(tile.data.mask)\n', (19591, 19607), True, 'import numpy as np\n'), ((9150, 9256), 'webservice.webmodel.NexusProcessingException', 'NexusProcessingException', ([], {'code': '(503)', 'reason': '"""Max concurrent requests reached. Please try again later."""'}), "(code=503, reason=\n 'Max concurrent requests reached. Please try again later.')\n", (9174, 9256), False, 'from webservice.webmodel import NexusProcessingException\n')]
|
import numpy as np
from collections import defaultdict, Counter
from .rbbox_np import rbbox_iou
def get_ap(recall, precision):
recall = [0] + list(recall) + [1]
precision = [0] + list(precision) + [0]
for i in range(len(precision) - 1, 0, -1):
precision[i - 1] = max(precision[i - 1], precision[i])
ap = sum((recall[i] - recall[i - 1]) * precision[i] for i in range(1, len(recall)) if recall[i] != recall[i - 1])
return ap * 100
def get_ap_07(recall, precision):
ap = 0.
for t in np.linspace(0, 1, 11, endpoint=True):
mask = recall >= t
if np.any(mask):
ap += np.max(precision[mask]) / 11
return ap * 100
def get_det_aps(detect, target, num_classes, iou_thresh=0.5, use_07_metric=False):
# [[index, bbox, score, label], ...]
aps = []
for c in range(num_classes):
target_c = list(filter(lambda x: x[3] == c, target))
detect_c = filter(lambda x: x[3] == c, detect)
detect_c = sorted(detect_c, key=lambda x: x[2], reverse=True)
tp = np.zeros(len(detect_c))
fp = np.zeros(len(detect_c))
target_count = Counter([x[0] for x in target_c])
target_count = {index: np.zeros(count) for index, count in target_count.items()}
target_lut = defaultdict(list)
for index, bbox, conf, label in target_c:
target_lut[index].append(bbox)
detect_lut = defaultdict(list)
for index, bbox, conf, label in detect_c:
detect_lut[index].append(bbox)
iou_lut = dict()
for index, bboxes in detect_lut.items():
if index in target_lut:
iou_lut[index] = rbbox_iou(np.stack(bboxes), np.stack(target_lut[index]))
counter = defaultdict(int)
for i, (index, bbox, conf, label) in enumerate(detect_c):
count = counter[index]
counter[index] += 1
iou_max = -np.inf
hit_j = 0
if index in iou_lut:
for j, iou in enumerate(iou_lut[index][count]):
if iou > iou_max:
iou_max = iou
hit_j = j
if iou_max > iou_thresh and target_count[index][hit_j] == 0:
tp[i] = 1
target_count[index][hit_j] = 1
else:
fp[i] = 1
tp_sum = np.cumsum(tp)
fp_sum = np.cumsum(fp)
npos = len(target_c)
recall = tp_sum / npos
precision = tp_sum / (tp_sum + fp_sum)
aps.append((get_ap_07 if use_07_metric else get_ap)(recall, precision))
return aps
|
[
"numpy.any",
"numpy.max",
"collections.Counter",
"numpy.stack",
"numpy.linspace",
"numpy.zeros",
"collections.defaultdict",
"numpy.cumsum"
] |
[((522, 558), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {'endpoint': '(True)'}), '(0, 1, 11, endpoint=True)\n', (533, 558), True, 'import numpy as np\n'), ((598, 610), 'numpy.any', 'np.any', (['mask'], {}), '(mask)\n', (604, 610), True, 'import numpy as np\n'), ((1134, 1167), 'collections.Counter', 'Counter', (['[x[0] for x in target_c]'], {}), '([x[0] for x in target_c])\n', (1141, 1167), False, 'from collections import defaultdict, Counter\n'), ((1278, 1295), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1289, 1295), False, 'from collections import defaultdict, Counter\n'), ((1410, 1427), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1421, 1427), False, 'from collections import defaultdict, Counter\n'), ((1739, 1755), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1750, 1755), False, 'from collections import defaultdict, Counter\n'), ((2355, 2368), 'numpy.cumsum', 'np.cumsum', (['tp'], {}), '(tp)\n', (2364, 2368), True, 'import numpy as np\n'), ((2386, 2399), 'numpy.cumsum', 'np.cumsum', (['fp'], {}), '(fp)\n', (2395, 2399), True, 'import numpy as np\n'), ((1199, 1214), 'numpy.zeros', 'np.zeros', (['count'], {}), '(count)\n', (1207, 1214), True, 'import numpy as np\n'), ((630, 653), 'numpy.max', 'np.max', (['precision[mask]'], {}), '(precision[mask])\n', (636, 653), True, 'import numpy as np\n'), ((1674, 1690), 'numpy.stack', 'np.stack', (['bboxes'], {}), '(bboxes)\n', (1682, 1690), True, 'import numpy as np\n'), ((1692, 1719), 'numpy.stack', 'np.stack', (['target_lut[index]'], {}), '(target_lut[index])\n', (1700, 1719), True, 'import numpy as np\n')]
|
import json
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import simulation
from eval_functions import oks_score_multi
import utils
def alter_location(points, x_offset, y_offset):
x, y = points.T
return np.array([x + x_offset, y + y_offset]).T
def alter_rotation(points, radians):
centroid = np.mean(points, axis=0)
return utils.rotate_via_numpy((points - centroid).T, radians) + centroid
def alter_magnitude(points, percent_diff):
centroid = np.mean(points, axis=0)
return (points - centroid) * np.exp(percent_diff) + centroid
def alter_normal_jump(points, scale):
return points + np.random.normal(0, scale, points.shape)
def alter_cauchy_jump(points, scale, abs_bound):
return points + utils.bounded_cauchy(scale, points.shape, abs_bound)
def disappear(points, p_disappear):
return None if np.random.uniform() < p_disappear else points
def shift_by_uerr(annotation, uerr):
shifts = [
alter_rotation(annotation, np.random.normal(0, 0.5 * uerr) * np.pi / 8),
alter_magnitude(annotation, np.random.normal(0, 0.3 * uerr)),
alter_normal_jump(annotation, 30 * uerr),
alter_cauchy_jump(annotation, 30 * uerr, 100),
]
return np.mean(shifts, axis=0) * np.abs(np.sign(annotation))
def create_user_data(uid, df, pct_items, u_err, difficulty_dict=None, extraarg=None):
items = df["item"].unique()
n_items_labeled = int(np.round(pct_items * len(items)))
items_labeled = sorted(np.random.choice(items, n_items_labeled, replace=False))
labels = []
for item in items_labeled:
gold = df[df["item"] == item]["gold"].values[0]
shifted_kpobjs = [shift_by_uerr(kpobj, u_err) for kpobj in gold]
kpobjs = [shifted_kpobjs[0]] + [disappear(kp, u_err / 2) for kp in shifted_kpobjs[1:]]
kpobjs = [kp for kp in kpobjs if kp is not None]
labels.append(kpobjs)
dfdict = {
"uid": [uid] * len(items_labeled),
"item": items_labeled,
"annotation": labels,
}
return pd.DataFrame(dfdict)
class KeypointSimulator(simulation.Simulator):
def __init__(self, rawdata_dir='data/coco/person_keypoints_train2017.json', max_items=500, minlabelsperitem=4):
with open(rawdata_dir) as f:
dataset = json.load(f)
self.category_id_skeletons = {c["id"]: np.array(c["skeleton"])-1 for c in iter(dataset["categories"])}
img_label = {}
for dataset_annotation in iter(dataset["annotations"]):
v = img_label.setdefault(dataset_annotation["image_id"], [])
v.append(dataset_annotation)
img_label_minlen = {k: v for k, v in img_label.items() if len(v) >= minlabelsperitem}
i = 0
rows = []
item = []
annotation = []
category = []
for dataset_annotations in iter(img_label_minlen.values()):
for dataset_annotation in dataset_annotations:
kp = np.reshape(dataset_annotation["keypoints"], (-1,3))
kp = kp[kp[:,2]>-90][:,:2]
if len(kp) == 0:
continue
item.append(dataset_annotation["image_id"])
annotation.append(kp)
category.append(dataset_annotation["category_id"])
i += 1
if i > max_items:
break
kp_df = pd.DataFrame({"item":item, "gold":annotation, "category":category})
self.df = kp_df.groupby("item")["gold"].apply(list).reset_index()
self.itemdict = utils.make_categorical(self.df, "item")
def create_stan_data(self, n_users, pct_items, err_rates, difficulty_dict):
self.err_rates = err_rates
self.difficulty_dict = difficulty_dict
self.sim_df = simulation.create_sim_df(create_user_data, self.df, n_users, pct_items, err_rates, difficulty_dict)
stan_data = utils.calc_distances(self.sim_df, (lambda x,y: 1 - oks_score_multi(x, y)), label_colname="annotation", item_colname="item")
return stan_data
def sim_uerr_fn(self, uerr_a, uerr_b, n_users):
z = np.abs(np.random.normal(uerr_a, uerr_b, 10000))
return np.quantile(z, np.linspace(0,1,n_users+2)[1:-1])
def sim_diff_fn(self, difficulty_a, difficulty_b):
z = 1 * np.random.beta(difficulty_a, difficulty_b, 10000)
n_items = len(self.df["item"].unique())
return dict(zip(np.arange(n_items), np.quantile(z, np.linspace(0,1,n_items+2)[1:-1])))
|
[
"numpy.random.normal",
"numpy.mean",
"eval_functions.oks_score_multi",
"numpy.random.beta",
"utils.bounded_cauchy",
"numpy.reshape",
"numpy.random.choice",
"utils.make_categorical",
"numpy.exp",
"numpy.array",
"json.load",
"simulation.create_sim_df",
"numpy.linspace",
"numpy.sign",
"numpy.random.uniform",
"pandas.DataFrame",
"utils.rotate_via_numpy",
"numpy.arange"
] |
[((336, 359), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (343, 359), True, 'import numpy as np\n'), ((496, 519), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (503, 519), True, 'import numpy as np\n'), ((2047, 2067), 'pandas.DataFrame', 'pd.DataFrame', (['dfdict'], {}), '(dfdict)\n', (2059, 2067), True, 'import pandas as pd\n'), ((242, 280), 'numpy.array', 'np.array', (['[x + x_offset, y + y_offset]'], {}), '([x + x_offset, y + y_offset])\n', (250, 280), True, 'import numpy as np\n'), ((371, 425), 'utils.rotate_via_numpy', 'utils.rotate_via_numpy', (['(points - centroid).T', 'radians'], {}), '((points - centroid).T, radians)\n', (393, 425), False, 'import utils\n'), ((644, 684), 'numpy.random.normal', 'np.random.normal', (['(0)', 'scale', 'points.shape'], {}), '(0, scale, points.shape)\n', (660, 684), True, 'import numpy as np\n'), ((755, 807), 'utils.bounded_cauchy', 'utils.bounded_cauchy', (['scale', 'points.shape', 'abs_bound'], {}), '(scale, points.shape, abs_bound)\n', (775, 807), False, 'import utils\n'), ((1236, 1259), 'numpy.mean', 'np.mean', (['shifts'], {'axis': '(0)'}), '(shifts, axis=0)\n', (1243, 1259), True, 'import numpy as np\n'), ((1496, 1551), 'numpy.random.choice', 'np.random.choice', (['items', 'n_items_labeled'], {'replace': '(False)'}), '(items, n_items_labeled, replace=False)\n', (1512, 1551), True, 'import numpy as np\n'), ((3383, 3453), 'pandas.DataFrame', 'pd.DataFrame', (["{'item': item, 'gold': annotation, 'category': category}"], {}), "({'item': item, 'gold': annotation, 'category': category})\n", (3395, 3453), True, 'import pandas as pd\n'), ((3549, 3588), 'utils.make_categorical', 'utils.make_categorical', (['self.df', '"""item"""'], {}), "(self.df, 'item')\n", (3571, 3588), False, 'import utils\n'), ((3774, 3877), 'simulation.create_sim_df', 'simulation.create_sim_df', (['create_user_data', 'self.df', 'n_users', 'pct_items', 'err_rates', 'difficulty_dict'], {}), '(create_user_data, self.df, n_users, pct_items,\n err_rates, difficulty_dict)\n', (3798, 3877), False, 'import simulation\n'), ((553, 573), 'numpy.exp', 'np.exp', (['percent_diff'], {}), '(percent_diff)\n', (559, 573), True, 'import numpy as np\n'), ((864, 883), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (881, 883), True, 'import numpy as np\n'), ((1080, 1111), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.3 * uerr)'], {}), '(0, 0.3 * uerr)\n', (1096, 1111), True, 'import numpy as np\n'), ((1269, 1288), 'numpy.sign', 'np.sign', (['annotation'], {}), '(annotation)\n', (1276, 1288), True, 'import numpy as np\n'), ((2291, 2303), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2300, 2303), False, 'import json\n'), ((4115, 4154), 'numpy.random.normal', 'np.random.normal', (['uerr_a', 'uerr_b', '(10000)'], {}), '(uerr_a, uerr_b, 10000)\n', (4131, 4154), True, 'import numpy as np\n'), ((4296, 4345), 'numpy.random.beta', 'np.random.beta', (['difficulty_a', 'difficulty_b', '(10000)'], {}), '(difficulty_a, difficulty_b, 10000)\n', (4310, 4345), True, 'import numpy as np\n'), ((2351, 2374), 'numpy.array', 'np.array', (["c['skeleton']"], {}), "(c['skeleton'])\n", (2359, 2374), True, 'import numpy as np\n'), ((2974, 3026), 'numpy.reshape', 'np.reshape', (["dataset_annotation['keypoints']", '(-1, 3)'], {}), "(dataset_annotation['keypoints'], (-1, 3))\n", (2984, 3026), True, 'import numpy as np\n'), ((4186, 4216), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n_users + 2)'], {}), '(0, 1, n_users + 2)\n', (4197, 4216), True, 'import numpy as np\n'), ((4418, 4436), 'numpy.arange', 'np.arange', (['n_items'], {}), '(n_items)\n', (4427, 4436), True, 'import numpy as np\n'), ((998, 1029), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.5 * uerr)'], {}), '(0, 0.5 * uerr)\n', (1014, 1029), True, 'import numpy as np\n'), ((3945, 3966), 'eval_functions.oks_score_multi', 'oks_score_multi', (['x', 'y'], {}), '(x, y)\n', (3960, 3966), False, 'from eval_functions import oks_score_multi\n'), ((4453, 4483), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(n_items + 2)'], {}), '(0, 1, n_items + 2)\n', (4464, 4483), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from .shared import Conv_Block
from ..utils.utils import zeros, mean_cube, last_frame, ENS
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(torch.stack([self.norm(x[..., i]) for i in range(x.size()[-1])], dim=-1), **kwargs)
class FeedForward(nn.Module):
def __init__(self, kernel_size, num_hidden, dilation_rate, num_conv_layers):
super().__init__()
self.kernel_size = kernel_size
self.num_hidden = num_hidden
self.num_conv_layers = num_conv_layers
self.dilation_rate = dilation_rate
self.conv = Conv_Block(self.num_hidden, self.num_hidden, kernel_size=self.kernel_size,
dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)
def forward(self, x):
return torch.stack([self.conv(x[..., i]) for i in range(x.size()[-1])], dim=-1)
class ConvAttention(nn.Module):
def __init__(self, num_hidden, kernel_size, enc=True, mask=False):
super(ConvAttention, self).__init__()
self.enc = enc
self.mask = mask
self.kernel_size = kernel_size
self.num_hidden = num_hidden
# important note: shared convolution is intentional here
if self.enc:
# 3 times num_hidden for out_channels due to queries, keys & values
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=self.num_hidden, out_channels=3*self.num_hidden, kernel_size=1, padding="same", padding_mode="reflect")
)
else:
# only 2 times num_hidden for keys & values
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=self.num_hidden, out_channels=2*self.num_hidden, kernel_size=1, padding="same", padding_mode="reflect")
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.num_hidden*2, out_channels=1, kernel_size=self.kernel_size, padding="same", padding_mode="reflect")
)
def forward(self, x, enc_out=None):
# s is num queries, t is num keys/values
b, _, _, _, s = x.shape
if self.enc:
t = s
qkv_set = torch.stack([self.conv1(x[..., i]) for i in range(t)], dim=-1)
Q, K, V = torch.split(qkv_set, self.num_hidden, dim=1)
else:
# x correspond to queries
t = enc_out.size()[-1]
kv_set = torch.stack([self.conv1(enc_out[..., i]) for i in range(t)], dim=-1)
K, V = torch.split(kv_set, self.num_hidden, dim=1)
Q = x
K_rep = torch.stack([K] * s, dim=-2)
V_rep = torch.stack([V] * s, dim=-1)
Q_rep = torch.stack([Q] * t, dim=-1)
# concatenate queries and keys for cross-channel convolution
Q_K = torch.concat((Q_rep, K_rep), dim=1)
if self.mask:
# only feed in 'previous' keys & values for computing softmax
V_out = []
# for each query
for i in range(t):
Q_K_temp = rearrange(Q_K[..., :i+1, i], 'b c h w t -> (b t) c h w')
extr_feat = rearrange(torch.squeeze(self.conv2(Q_K_temp), dim=1), '(b t) h w -> b h w t', b=b, t=i+1)
attn_mask = F.softmax(extr_feat, dim=-1)
# convex combination over values using weights from attention mask, per channel c
V_out.append(torch.stack([torch.sum(torch.mul(attn_mask, V_rep[:, c, :, :, i, :i+1]), dim=-1) for c in range(V_rep.size()[1])], dim=1))
V_out = torch.stack(V_out, dim=-1)
else:
Q_K = rearrange(Q_K, 'b c h w s t -> (b s t) c h w') # no convolution across time dim!
extr_feat = rearrange(torch.squeeze(self.conv2(Q_K), dim=1), '(b s t) h w -> b h w t s', b=b, t=t)
attn_mask = F.softmax(extr_feat, dim=-2)
V_out = torch.stack([torch.sum(torch.mul(attn_mask, V_rep[:, c, ...]), dim=-2) for c in range(V_rep.size()[1])], dim=1)
return V_out
class PositionalEncoding(nn.Module):
def __init__(self, num_hidden, img_width):
# no differentiation should happen with respect to the params in here!
super(PositionalEncoding, self).__init__()
self.num_hidden = num_hidden
self.img_width = img_width
def _get_sinusoid_encoding_table(self, t, device):
''' Sinusoid position encoding table '''
sinusoid_table = torch.stack([self._get_position_angle_vec(pos_i) for pos_i in range(t)], dim=0)
sinusoid_table[:, :, 0::2] = torch.sin(sinusoid_table[:, :, 0::2]) # even dim
sinusoid_table[:, :, 1::2] = torch.cos(sinusoid_table[:, :, 1::2]) # odd dim
return torch.moveaxis(sinusoid_table, 0, -1)
def _get_position_angle_vec(self, position):
return_list = [torch.ones((1,
self.img_width,
self.img_width),
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")) *
(position / np.power(10000, 2 * (hid_j // 2) / self.num_hidden[-1])) for hid_j in range(self.num_hidden[-1])]
return torch.stack(return_list, dim=1)
def forward(self, x, t, single=False):
"""Returns entire positional encoding until step T if not single, otherwise only encoding of time step T."""
if not single:
self.register_buffer('pos_table', self._get_sinusoid_encoding_table(t, x.get_device()))
return torch.squeeze(x + self.pos_table.clone().detach(), dim=0)
else:
if t % 2 == 0:
return x + torch.unsqueeze(torch.sin(self._get_position_angle_vec(t)), dim=-1).clone().detach()
else:
return x + torch.unsqueeze(torch.cos(self._get_position_angle_vec(t)), dim=-1).clone().detach()
class Encoder(nn.Module):
def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width):
super().__init__()
self.num_hidden = num_hidden
self.depth = depth
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.kernel_size = kernel_size
self.img_width = img_width
self.layers = nn.ModuleList([])
self.num_hidden = self.num_hidden
for _ in range(self.depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
ConvAttention(kernel_size=self.kernel_size, num_hidden=self.num_hidden[-1], enc=True))),
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
FeedForward(kernel_size=self.kernel_size, num_hidden=self.num_hidden[-1],
dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
class Decoder(nn.Module):
def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels):
super().__init__()
self.layers = nn.ModuleList([])
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.depth = depth
self.kernel_size = kernel_size
self.img_width = img_width
self.num_hidden = num_hidden
self.num_non_pred_feat = non_pred_channels
for _ in range(self.depth):
self.layers.append(nn.ModuleList([
# (masked) query self-attention
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
ConvAttention(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, mask=True))),
# encoder-decoder attention
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
ConvAttention(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, enc=False))),
# feed forward
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
FeedForward(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)))
]))
def forward(self, queries, enc_out):
for query_attn, attn, ff in self.layers:
queries = query_attn(queries)
x = attn(queries, enc_out=enc_out)
x = ff(x)
return x
class Conv_Transformer(nn.Module):
"""Standard, single-headed ConvTransformer like in https://arxiv.org/pdf/2011.10185.pdf"""
def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels):
super(Conv_Transformer, self).__init__()
self.num_hidden = num_hidden
self.depth = depth
self.num_layers_query_feat = num_layers_query_feat
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.kernel_size = kernel_size
self.img_width = img_width
self.in_channels = in_channels
self.non_pred_channels = non_pred_channels
self.pos_embedding = PositionalEncoding(self.num_hidden, self.img_width)
self.Encoder = Encoder(num_hidden=self.num_hidden, depth=self.depth, dilation_rate=self.dilation_rate,
num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size, img_width=self.img_width)
self.Decoder = Decoder(num_hidden=self.num_hidden, depth=self.depth, dilation_rate=self.dilation_rate,
num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size, img_width=self.img_width, non_pred_channels=self.non_pred_channels)
self.input_feat_gen = Conv_Block(self.in_channels, self.num_hidden[-1], num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size)
# TODO (optionally): replace this by SFFN
self.back_to_pixel = nn.Sequential(
nn.Conv2d(self.num_hidden[-1], 4, kernel_size=1)
)
def forward(self, frames, n_predictions):
_, _, _, _, T = frames.size()
feature_map = self.feature_embedding(img=frames, network=self.input_feat_gen)
enc_in = self.pos_embedding(feature_map, T)
# encode all input values
enc_out = torch.concat(self.Encoder(enc_in), dim=-1)
out_list = []
queries = self.feature_embedding(img=feature_map[..., -1], network=self.query_feat_gen)
for _ in range(n_predictions):
dec_out = self.Decoder(queries, enc_out)
pred = self.feature_embedding(dec_out)
out_list.append(pred)
queries = torch.concat((queries, pred), dim=-1)
x = torch.stack(out_list, dim=-1)
return x
def feature_embedding(self, img, network):
generator = network
gen_img = []
for i in range(img.shape[-1]):
gen_img.append(generator(img[..., i]))
gen_img = torch.stack(gen_img, dim=-1)
return gen_img
class ENS_Conv_Transformer(Conv_Transformer):
"""ConvTransformer that employs delta model and can read in non-pred future features, hence taylored to the ENS challenge."""
def __init__(self, num_hidden, output_dim, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels, baseline):
super(ENS_Conv_Transformer, self).__init__(num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels - 1)
# remove cloud mask
self.in_channels = self.in_channels - 1
self.baseline = baseline
self.output_dim = output_dim
def forward(self, input_tensor, non_pred_feat=None, prediction_count=1):
baseline = eval(self.baseline + "(input_tensor[:, 0:5, :, :, :], 4)")
b, _, width, height, T = input_tensor.size()
pred_deltas = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())
preds = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())
baselines = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())
# remove cloud mask channel for feature embedding
feature_map = torch.concat((input_tensor[:, :4, ...], input_tensor[:, 5:, ...]), dim=1)
features = self.feature_embedding(img=feature_map, network=self.input_feat_gen)
enc_in = torch.stack([self.pos_embedding(features[i, ...], T) for i in range(b)], dim=0)
enc_out = self.Encoder(enc_in)
# first query stems from last input frame
queries = features[..., -1:]
baselines[..., 0] = baseline
pred_deltas[..., 0] = self.back_to_pixel(self.Decoder(queries, enc_out)[..., 0])
preds[..., 0] = pred_deltas[..., 0] + baselines[..., 0]
for t in range(1, prediction_count):
if self.baseline == "mean_cube":
baselines[..., t] = (preds[..., t - 1] + (baselines[..., t - 1] * (T + t)))/(T + t + 1)
if self.baseline == "zeros":
pass
else:
baselines[..., t] = preds[..., t - 1]
# concatenate with non-pred features & feature embedding & do positional encoding
query = self.pos_embedding(self.feature_embedding(torch.concat((preds[..., t-1:t], non_pred_feat[..., t-1:t]), dim=1), network=self.input_feat_gen), t, single=True)
queries = torch.concat((queries, query), dim=-1)
pred_deltas[..., :t] = torch.stack([self.back_to_pixel(self.Decoder(queries, enc_out)[..., i]) for i in range(t)], dim=-1)
preds[..., t] = pred_deltas[..., t] + baselines[..., t]
return preds, pred_deltas, baselines
def _get_device(self):
return next(self.parameters()).device
|
[
"torch.mul",
"torch.split",
"numpy.power",
"torch.nn.ModuleList",
"torch.nn.LayerNorm",
"torch.sin",
"torch.stack",
"einops.rearrange",
"torch.nn.Conv2d",
"torch.cos",
"torch.cuda.is_available",
"torch.concat",
"torch.moveaxis",
"torch.nn.functional.softmax"
] |
[((494, 511), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {}), '(dim)\n', (506, 511), True, 'import torch.nn as nn\n'), ((2972, 3000), 'torch.stack', 'torch.stack', (['([K] * s)'], {'dim': '(-2)'}), '([K] * s, dim=-2)\n', (2983, 3000), False, 'import torch\n'), ((3017, 3045), 'torch.stack', 'torch.stack', (['([V] * s)'], {'dim': '(-1)'}), '([V] * s, dim=-1)\n', (3028, 3045), False, 'import torch\n'), ((3062, 3090), 'torch.stack', 'torch.stack', (['([Q] * t)'], {'dim': '(-1)'}), '([Q] * t, dim=-1)\n', (3073, 3090), False, 'import torch\n'), ((3174, 3209), 'torch.concat', 'torch.concat', (['(Q_rep, K_rep)'], {'dim': '(1)'}), '((Q_rep, K_rep), dim=1)\n', (3186, 3209), False, 'import torch\n'), ((4911, 4948), 'torch.sin', 'torch.sin', (['sinusoid_table[:, :, 0::2]'], {}), '(sinusoid_table[:, :, 0::2])\n', (4920, 4948), False, 'import torch\n'), ((4998, 5035), 'torch.cos', 'torch.cos', (['sinusoid_table[:, :, 1::2]'], {}), '(sinusoid_table[:, :, 1::2])\n', (5007, 5035), False, 'import torch\n'), ((5063, 5100), 'torch.moveaxis', 'torch.moveaxis', (['sinusoid_table', '(0)', '(-1)'], {}), '(sinusoid_table, 0, -1)\n', (5077, 5100), False, 'import torch\n'), ((5565, 5596), 'torch.stack', 'torch.stack', (['return_list'], {'dim': '(1)'}), '(return_list, dim=1)\n', (5576, 5596), False, 'import torch\n'), ((6645, 6662), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (6658, 6662), True, 'import torch.nn as nn\n'), ((7639, 7656), 'torch.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (7652, 7656), True, 'import torch.nn as nn\n'), ((11391, 11420), 'torch.stack', 'torch.stack', (['out_list'], {'dim': '(-1)'}), '(out_list, dim=-1)\n', (11402, 11420), False, 'import torch\n'), ((11644, 11672), 'torch.stack', 'torch.stack', (['gen_img'], {'dim': '(-1)'}), '(gen_img, dim=-1)\n', (11655, 11672), False, 'import torch\n'), ((13025, 13098), 'torch.concat', 'torch.concat', (['(input_tensor[:, :4, ...], input_tensor[:, 5:, ...])'], {'dim': '(1)'}), '((input_tensor[:, :4, ...], input_tensor[:, 5:, ...]), dim=1)\n', (13037, 13098), False, 'import torch\n'), ((2246, 2379), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(self.num_hidden * 2)', 'out_channels': '(1)', 'kernel_size': 'self.kernel_size', 'padding': '"""same"""', 'padding_mode': '"""reflect"""'}), "(in_channels=self.num_hidden * 2, out_channels=1, kernel_size=self\n .kernel_size, padding='same', padding_mode='reflect')\n", (2255, 2379), True, 'import torch.nn as nn\n'), ((2651, 2695), 'torch.split', 'torch.split', (['qkv_set', 'self.num_hidden'], {'dim': '(1)'}), '(qkv_set, self.num_hidden, dim=1)\n', (2662, 2695), False, 'import torch\n'), ((2893, 2936), 'torch.split', 'torch.split', (['kv_set', 'self.num_hidden'], {'dim': '(1)'}), '(kv_set, self.num_hidden, dim=1)\n', (2904, 2936), False, 'import torch\n'), ((3919, 3945), 'torch.stack', 'torch.stack', (['V_out'], {'dim': '(-1)'}), '(V_out, dim=-1)\n', (3930, 3945), False, 'import torch\n'), ((3978, 4024), 'einops.rearrange', 'rearrange', (['Q_K', '"""b c h w s t -> (b s t) c h w"""'], {}), "(Q_K, 'b c h w s t -> (b s t) c h w')\n", (3987, 4024), False, 'from einops import rearrange\n'), ((4194, 4222), 'torch.nn.functional.softmax', 'F.softmax', (['extr_feat'], {'dim': '(-2)'}), '(extr_feat, dim=-2)\n', (4203, 4222), True, 'import torch.nn.functional as F\n'), ((10637, 10685), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.num_hidden[-1]', '(4)'], {'kernel_size': '(1)'}), '(self.num_hidden[-1], 4, kernel_size=1)\n', (10646, 10685), True, 'import torch.nn as nn\n'), ((11332, 11369), 'torch.concat', 'torch.concat', (['(queries, pred)'], {'dim': '(-1)'}), '((queries, pred), dim=-1)\n', (11344, 11369), False, 'import torch\n'), ((14235, 14273), 'torch.concat', 'torch.concat', (['(queries, query)'], {'dim': '(-1)'}), '((queries, query), dim=-1)\n', (14247, 14273), False, 'import torch\n'), ((1792, 1923), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.num_hidden', 'out_channels': '(3 * self.num_hidden)', 'kernel_size': '(1)', 'padding': '"""same"""', 'padding_mode': '"""reflect"""'}), "(in_channels=self.num_hidden, out_channels=3 * self.num_hidden,\n kernel_size=1, padding='same', padding_mode='reflect')\n", (1801, 1923), True, 'import torch.nn as nn\n'), ((2058, 2189), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.num_hidden', 'out_channels': '(2 * self.num_hidden)', 'kernel_size': '(1)', 'padding': '"""same"""', 'padding_mode': '"""reflect"""'}), "(in_channels=self.num_hidden, out_channels=2 * self.num_hidden,\n kernel_size=1, padding='same', padding_mode='reflect')\n", (2067, 2189), True, 'import torch.nn as nn\n'), ((3417, 3475), 'einops.rearrange', 'rearrange', (['Q_K[..., :i + 1, i]', '"""b c h w t -> (b t) c h w"""'], {}), "(Q_K[..., :i + 1, i], 'b c h w t -> (b t) c h w')\n", (3426, 3475), False, 'from einops import rearrange\n'), ((3620, 3648), 'torch.nn.functional.softmax', 'F.softmax', (['extr_feat'], {'dim': '(-1)'}), '(extr_feat, dim=-1)\n', (3629, 3648), True, 'import torch.nn.functional as F\n'), ((5452, 5507), 'numpy.power', 'np.power', (['(10000)', '(2 * (hid_j // 2) / self.num_hidden[-1])'], {}), '(10000, 2 * (hid_j // 2) / self.num_hidden[-1])\n', (5460, 5507), True, 'import numpy as np\n'), ((14098, 14169), 'torch.concat', 'torch.concat', (['(preds[..., t - 1:t], non_pred_feat[..., t - 1:t])'], {'dim': '(1)'}), '((preds[..., t - 1:t], non_pred_feat[..., t - 1:t]), dim=1)\n', (14110, 14169), False, 'import torch\n'), ((4266, 4304), 'torch.mul', 'torch.mul', (['attn_mask', 'V_rep[:, c, ...]'], {}), '(attn_mask, V_rep[:, c, ...])\n', (4275, 4304), False, 'import torch\n'), ((3799, 3849), 'torch.mul', 'torch.mul', (['attn_mask', 'V_rep[:, c, :, :, i, :i + 1]'], {}), '(attn_mask, V_rep[:, c, :, :, i, :i + 1])\n', (3808, 3849), False, 'import torch\n'), ((5363, 5388), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5386, 5388), False, 'import torch\n')]
|
import numpy as np
import photon_stream as ps
import photon_stream_production as psp
import pkg_resources
import os
runinfo_path = pkg_resources.resource_filename(
'photon_stream_production',
os.path.join('tests', 'resources', 'runinfo_20161115_to_20170103.csv')
)
drs_fRunID_for_obs_run = psp.drs_run._drs_fRunID_for_obs_run
def test_drs_run_assignment():
ri = psp.runinfo.read(runinfo_path)
ro = psp.drs_run.assign_drs_runs(ri)
ri = ri[(ri.fNight > 20161229) & (ri.fNight <= 20170102)]
ro = ro[(ro.fNight > 20161229) & (ro.fNight <= 20170102)]
for i, row in ri.iterrows():
assert row.fNight == ro.loc[i, 'fNight']
assert row.fRunID == ro.loc[i, 'fRunID']
if row.fRunTypeKey == psp.runinfo.OBSERVATION_RUN_TYPE_KEY:
first_method_drs_run_id = drs_fRunID_for_obs_run(
runinfo=ri,
fNight=row.fNight,
fRunID=row.fRunID
)
second_method_drs_run_id = ro.loc[i, 'DrsRunID']
if np.isnan(first_method_drs_run_id):
assert np.isnan(second_method_drs_run_id)
else:
assert first_method_drs_run_id == second_method_drs_run_id
|
[
"photon_stream_production.drs_run.assign_drs_runs",
"photon_stream_production.runinfo.read",
"os.path.join",
"numpy.isnan"
] |
[((201, 271), 'os.path.join', 'os.path.join', (['"""tests"""', '"""resources"""', '"""runinfo_20161115_to_20170103.csv"""'], {}), "('tests', 'resources', 'runinfo_20161115_to_20170103.csv')\n", (213, 271), False, 'import os\n'), ((379, 409), 'photon_stream_production.runinfo.read', 'psp.runinfo.read', (['runinfo_path'], {}), '(runinfo_path)\n', (395, 409), True, 'import photon_stream_production as psp\n'), ((419, 450), 'photon_stream_production.drs_run.assign_drs_runs', 'psp.drs_run.assign_drs_runs', (['ri'], {}), '(ri)\n', (446, 450), True, 'import photon_stream_production as psp\n'), ((1028, 1061), 'numpy.isnan', 'np.isnan', (['first_method_drs_run_id'], {}), '(first_method_drs_run_id)\n', (1036, 1061), True, 'import numpy as np\n'), ((1086, 1120), 'numpy.isnan', 'np.isnan', (['second_method_drs_run_id'], {}), '(second_method_drs_run_id)\n', (1094, 1120), True, 'import numpy as np\n')]
|
import datetime as dt
from os.path import dirname, join
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d
from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation
from bokeh.models import ColorBar
from bokeh.palettes import brewer, Spectral6
from bokeh.plotting import figure
from bokeh.embed import server_document
from bokeh.transform import factor_cmap
#################################################################################
# This just loads in the data...
# Alot of this was built of this "cross-fire demo"
# https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py
start_date = dt.datetime(2017,7,1)
end_date = dt.datetime(2022,1,1)
background = "#ffffff"
file = "./data"+ "/data.parquet"
df = pq.read_table(file).to_pandas()
df.sort_index(inplace=True)
options = df.index.unique(0).to_list()
#print(options)
product = "HS CODE 72, IRON AND STEEL"
level = "US Dollars"
#################################################################################
#These are functions used in the plot...
def growth_trade(foo):
# what this function does is take a dataframe and create a relative
return 100*((foo["china_exports"]/foo["china_exports"].shift(12)) - 1)
def cum_trade(foo):
outdf = pd.DataFrame([])
outdf["cuml_trade_2017"] = foo["china_exports"].loc["2017"].cumsum()
outdf.index = pd.date_range(start="2020-01-01", end="2020-12-01", freq = "MS")
outdf["cuml_trade_2020"] = foo["china_exports"].loc["2020"].cumsum()
return outdf
#################################################################################
# Then this makes the simple plots:
def make_plot():
height = int(1.15*533)
width = int(1.15*750)
foo = df.loc[product_select.value]
#foo = df.query("@a < a")
# below there is an object of selections which will be one of the values in
# the list of options. So the .value then grabs that particular option selected.
x = foo.index
if level_select.value == 'US Dollars':
y = foo['china_exports']
if level_select.value == 'Year over Year % Change':
y = growth_trade(foo)
if level_select.value == "Cumulative Purchases 2020 vs 2017":
cuml = cum_trade(foo)
x = cuml.index
y2017 = cuml["cuml_trade_2017"]
y2020 = cuml["cuml_trade_2020"]
title = "US Exports to China of " + product_select.value.title().upper()
if level_select.value != "Cumulative Purchases 2020 vs 2017":
# This is standard bokeh stuff so far
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan, xwheel_zoom", title = title,
x_range = (start_date,end_date) )
plot.line(x = x,
y = y, line_width=3.5, line_alpha=0.75, line_color = "slategray")
if level_select.value == "Cumulative Purchases 2020 vs 2017":
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan", title = title,
x_range = (dt.datetime(2020,1,1),dt.datetime(2021,2,1)) )
plot.line(x = x,
y = y2017, line_width=3.5, line_alpha=0.5, line_color = "red", line_dash = "dashed"
, legend_label= "2017")
plot.line(x = x,
y = y2020, line_width=3.5, line_alpha=0.75, line_color = "darkblue"
, legend_label= "2020")
plot.legend.title = 'Cumulative Purchases'
plot.legend.location = "top_left"
plot.legend.title_text_font_style = "bold"
# fixed attributes
plot.xaxis.axis_label = None
plot.yaxis.axis_label = ""
plot.axis.axis_label_text_font_style = "bold"
plot.grid.grid_line_alpha = 0.3
TIMETOOLTIPS = """
<div style="background-color:#F5F5F5; opacity: 0.95; border: 15px 15px 15px 15px;">
<div style = "text-align:left;">"""
if level_select.value == 'Year over Year % Change':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{0}%</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == 'US Dollars':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == "Cumulative Purchases 2020 vs 2017":
#################################################################################
singlesource2020 = ColumnDataSource({
'xs': x.values,
'ys': y2020.values,
"dates": np.array(x),
})
c2020 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2020, color = "crimson",alpha=0.0)
singlesource2017 = ColumnDataSource({
'xs': x.values,
'ys': y2017.values,
"dates": np.array(pd.date_range(start="2017-01-01", end="2017-12-01", freq = "MS")),
})
c2017 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2017, color = "darkblue",alpha=0.0)
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> @dates{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'@dates': 'datetime'}, renderers = [c2017,c2020]))
if level_select.value == 'Year over Year % Change':
if y.max() > 1500:
plot.y_range.end = 1500
plot.title.text_font_size = '13pt'
plot.background_fill_color = background
plot.background_fill_alpha = 0.75
plot.border_fill_color = background
tradewar_box = BoxAnnotation(left=dt.datetime(2018,7,1), right=dt.datetime(2019,10,11), fill_color='red', fill_alpha=0.1)
plot.add_layout(tradewar_box)
tradewar_box = BoxAnnotation(left=dt.datetime(2020,1,1), right=dt.datetime(2021,12,31), fill_color='blue', fill_alpha=0.1)
plot.add_layout(tradewar_box)
#p.yaxis.axis_label =
plot.yaxis.axis_label_text_font_style = 'bold'
plot.yaxis.axis_label_text_font_size = "13px"
plot.sizing_mode= "scale_both"
if level_select.value != 'Year over Year % Change':
plot.yaxis.formatter = NumeralTickFormatter(format="($0. a)")
plot.yaxis.axis_label = "US Dollars"
if level_select.value == 'Year over Year % Change':
plot.yaxis.axis_label = level_select.value
plot.max_height = height
plot.max_width = width
plot.min_height = int(0.25*height)
plot.min_width = int(0.25*width)
return plot
def update_plot(attrname, old, new):
layout.children[0] = make_plot()
# This part is still not clear to me. but it tells it what to update and where to put it
# so it updates the layout and [0] is the first option (see below there is a row with the
# first entry the plot, then the controls)
level_select = Select(value=level, title='Tranformations', options=['US Dollars', 'Year over Year % Change', "Cumulative Purchases 2020 vs 2017"])
level_select.on_change('value', update_plot)
#print(sorted(options))
product_select = Select(value=product, title='Product', options=sorted(options), width=400)
# This is the key thing that creates teh selection object
product_select.on_change('value', update_plot)
# Change the value upone selection via the update plot
div0 = Div(text = """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n
\n
\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
div1 = Div(text = """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
controls = column(product_select, div0, level_select, div1)
height = int(1.95*533)
width = int(1.95*675)
layout = row(make_plot(), controls, sizing_mode = "scale_height", max_height = height, max_width = width,
min_height = int(0.25*height), min_width = int(0.25*width))
curdoc().add_root(layout)
curdoc().title = "us-china-products"
|
[
"datetime.datetime",
"bokeh.layouts.column",
"bokeh.models.Div",
"pyarrow.parquet.read_table",
"bokeh.plotting.figure",
"bokeh.io.curdoc",
"bokeh.models.Select",
"numpy.array",
"bokeh.models.NumeralTickFormatter",
"pandas.DataFrame",
"pandas.date_range",
"bokeh.models.HoverTool"
] |
[((902, 925), 'datetime.datetime', 'dt.datetime', (['(2017)', '(7)', '(1)'], {}), '(2017, 7, 1)\n', (913, 925), True, 'import datetime as dt\n'), ((935, 958), 'datetime.datetime', 'dt.datetime', (['(2022)', '(1)', '(1)'], {}), '(2022, 1, 1)\n', (946, 958), True, 'import datetime as dt\n'), ((7986, 8121), 'bokeh.models.Select', 'Select', ([], {'value': 'level', 'title': '"""Tranformations"""', 'options': "['US Dollars', 'Year over Year % Change', 'Cumulative Purchases 2020 vs 2017']"}), "(value=level, title='Tranformations', options=['US Dollars',\n 'Year over Year % Change', 'Cumulative Purchases 2020 vs 2017'])\n", (7992, 8121), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((8451, 8835), 'bokeh.models.Div', 'Div', ([], {'text': '"""Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n\n \n\n \n\n """', 'width': '(400)', 'background': 'background', 'style': "{'justify-content': 'space-between', 'display': 'flex'}"}), '(text=\n """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n\n \n\n \n\n """\n , width=400, background=background, style={\'justify-content\':\n \'space-between\', \'display\': \'flex\'})\n', (8454, 8835), False, 'from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation\n'), ((8838, 9347), 'bokeh.models.Div', 'Div', ([], {'text': '"""Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China\'s progress towards meeting their purchase commitments.\n\n """', 'width': '(400)', 'background': 'background', 'style': "{'justify-content': 'space-between', 'display': 'flex'}"}), '(text=\n """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China\'s progress towards meeting their purchase commitments.\n\n """\n , width=400, background=background, style={\'justify-content\':\n \'space-between\', \'display\': \'flex\'})\n', (8841, 9347), False, 'from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation\n'), ((9353, 9401), 'bokeh.layouts.column', 'column', (['product_select', 'div0', 'level_select', 'div1'], {}), '(product_select, div0, level_select, div1)\n', (9359, 9401), False, 'from bokeh.layouts import column, gridplot, row\n'), ((1544, 1560), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (1556, 1560), True, 'import pandas as pd\n'), ((1666, 1728), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2020-01-01"""', 'end': '"""2020-12-01"""', 'freq': '"""MS"""'}), "(start='2020-01-01', end='2020-12-01', freq='MS')\n", (1679, 1728), True, 'import pandas as pd\n'), ((9656, 9664), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (9662, 9664), False, 'from bokeh.io import curdoc\n'), ((1021, 1040), 'pyarrow.parquet.read_table', 'pq.read_table', (['file'], {}), '(file)\n', (1034, 1040), True, 'import pyarrow.parquet as pq\n'), ((2894, 3084), 'bokeh.plotting.figure', 'figure', ([], {'x_axis_type': '"""datetime"""', 'plot_height': 'height', 'plot_width': 'width', 'toolbar_location': '"""below"""', 'tools': '"""box_zoom, reset, pan, xwheel_zoom"""', 'title': 'title', 'x_range': '(start_date, end_date)'}), "(x_axis_type='datetime', plot_height=height, plot_width=width,\n toolbar_location='below', tools='box_zoom, reset, pan, xwheel_zoom',\n title=title, x_range=(start_date, end_date))\n", (2900, 3084), False, 'from bokeh.plotting import figure\n'), ((7287, 7325), 'bokeh.models.NumeralTickFormatter', 'NumeralTickFormatter', ([], {'format': '"""($0. a)"""'}), "(format='($0. a)')\n", (7307, 7325), False, 'from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation\n'), ((9630, 9638), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (9636, 9638), False, 'from bokeh.io import curdoc\n'), ((4695, 4791), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'TIMETOOLTIPS', 'line_policy': '"""nearest"""', 'formatters': "{'$data_x': 'datetime'}"}), "(tooltips=TIMETOOLTIPS, line_policy='nearest', formatters={\n '$data_x': 'datetime'})\n", (4704, 4791), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((5071, 5167), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'TIMETOOLTIPS', 'line_policy': '"""nearest"""', 'formatters': "{'$data_x': 'datetime'}"}), "(tooltips=TIMETOOLTIPS, line_policy='nearest', formatters={\n '$data_x': 'datetime'})\n", (5080, 5167), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((6249, 6370), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'TIMETOOLTIPS', 'line_policy': '"""nearest"""', 'formatters': "{'@dates': 'datetime'}", 'renderers': '[c2017, c2020]'}), "(tooltips=TIMETOOLTIPS, line_policy='nearest', formatters={\n '@dates': 'datetime'}, renderers=[c2017, c2020])\n", (6258, 6370), False, 'from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d\n'), ((6720, 6743), 'datetime.datetime', 'dt.datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (6731, 6743), True, 'import datetime as dt\n'), ((6749, 6774), 'datetime.datetime', 'dt.datetime', (['(2019)', '(10)', '(11)'], {}), '(2019, 10, 11)\n', (6760, 6774), True, 'import datetime as dt\n'), ((6885, 6908), 'datetime.datetime', 'dt.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (6896, 6908), True, 'import datetime as dt\n'), ((6914, 6939), 'datetime.datetime', 'dt.datetime', (['(2021)', '(12)', '(31)'], {}), '(2021, 12, 31)\n', (6925, 6939), True, 'import datetime as dt\n'), ((5471, 5482), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5479, 5482), True, 'import numpy as np\n'), ((3507, 3530), 'datetime.datetime', 'dt.datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (3518, 3530), True, 'import datetime as dt\n'), ((3529, 3552), 'datetime.datetime', 'dt.datetime', (['(2021)', '(2)', '(1)'], {}), '(2021, 2, 1)\n', (3540, 3552), True, 'import datetime as dt\n'), ((5791, 5853), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""2017-01-01"""', 'end': '"""2017-12-01"""', 'freq': '"""MS"""'}), "(start='2017-01-01', end='2017-12-01', freq='MS')\n", (5804, 5853), True, 'import pandas as pd\n')]
|
import pandas_datareader.data as pdr
import yfinance as fix
import numpy as np
fix.pdr_override()
def back_test(strategy, seq_len, ticker, start_date, end_date, dim):
"""
A simple back test for a given date period
:param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.
:param seq_len: length of the days used for prediction
:param ticker: company ticker
:param start_date: starting date
:type start_date: "YYYY-mm-dd"
:param end_date: ending date
:type end_date: "YYYY-mm-dd"
:param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP
:type dim: tuple
:return: Percentage errors array that gives the errors for every test in the given date range
"""
data = pdr.get_data_yahoo(ticker, start_date, end_date)
stock_data = data["Adj Close"]
errors = []
for i in range((len(stock_data) // 10) * 10 - seq_len - 1):
x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200
y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200
predict = strategy.predict(x)
while predict == 0:
predict = strategy.predict(x)
error = (predict - y) / 100
errors.append(error)
total_error = np.array(errors)
print(f"Average error = {total_error.mean()}")
# If you want to see the full error list then print the following statement
# print(errors)
|
[
"yfinance.pdr_override",
"numpy.array",
"pandas_datareader.data.get_data_yahoo"
] |
[((79, 97), 'yfinance.pdr_override', 'fix.pdr_override', ([], {}), '()\n', (95, 97), True, 'import yfinance as fix\n'), ((785, 833), 'pandas_datareader.data.get_data_yahoo', 'pdr.get_data_yahoo', (['ticker', 'start_date', 'end_date'], {}), '(ticker, start_date, end_date)\n', (803, 833), True, 'import pandas_datareader.data as pdr\n'), ((1284, 1300), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (1292, 1300), True, 'import numpy as np\n'), ((1037, 1082), 'numpy.array', 'np.array', (['stock_data.iloc[i + seq_len + 1, 1]'], {}), '(stock_data.iloc[i + seq_len + 1, 1])\n', (1045, 1082), True, 'import numpy as np\n'), ((961, 1004), 'numpy.array', 'np.array', (['stock_data.iloc[i:i + seq_len, 1]'], {}), '(stock_data.iloc[i:i + seq_len, 1])\n', (969, 1004), True, 'import numpy as np\n')]
|
import os
import cv2
import random
import numpy as np
from tensorflow.keras.utils import to_categorical
from scripts.consts import class_dict
def get_data(path, split=0.2):
X, y = [], []
for directory in os.listdir(path):
dirpath = os.path.join(path, directory)
print(directory, len(os.listdir(dirpath)))
for file in os.listdir(dirpath):
filepath = os.path.join(dirpath, file)
img = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)
if img.shape != (360, 363, 3):
img = cv2.resize(img, (360, 363), cv2.INTER_CUBIC)
X.append(img)
y.append(class_dict[directory])
data = list(zip(X, y))
random.shuffle(data)
X, y = zip(*data)
num_train = int((1.0 - split) * len(y))
X_train, X_valid = np.array(X[:num_train]).astype(
'float32'), np.array(X[num_train:]).astype('float32')
y_train, y_valid = np.array(
y[:num_train]).reshape(-1, 1), np.array(y[num_train:]).reshape((-1, 1))
X_train = X_train / 255.0
X_valid = X_valid / 255.0
y_train, y_valid = to_categorical(y_train), to_categorical(y_valid)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
return X_train, y_train, X_valid, y_valid
|
[
"tensorflow.keras.utils.to_categorical",
"os.listdir",
"random.shuffle",
"os.path.join",
"numpy.array",
"cv2.resize",
"cv2.imread"
] |
[((216, 232), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (226, 232), False, 'import os\n'), ((716, 736), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (730, 736), False, 'import random\n'), ((253, 282), 'os.path.join', 'os.path.join', (['path', 'directory'], {}), '(path, directory)\n', (265, 282), False, 'import os\n'), ((355, 374), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (365, 374), False, 'import os\n'), ((1121, 1144), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train'], {}), '(y_train)\n', (1135, 1144), False, 'from tensorflow.keras.utils import to_categorical\n'), ((1146, 1169), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_valid'], {}), '(y_valid)\n', (1160, 1169), False, 'from tensorflow.keras.utils import to_categorical\n'), ((400, 427), 'os.path.join', 'os.path.join', (['dirpath', 'file'], {}), '(dirpath, file)\n', (412, 427), False, 'import os\n'), ((446, 488), 'cv2.imread', 'cv2.imread', (['filepath', 'cv2.IMREAD_UNCHANGED'], {}), '(filepath, cv2.IMREAD_UNCHANGED)\n', (456, 488), False, 'import cv2\n'), ((312, 331), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (322, 331), False, 'import os\n'), ((567, 611), 'cv2.resize', 'cv2.resize', (['img', '(360, 363)', 'cv2.INTER_CUBIC'], {}), '(img, (360, 363), cv2.INTER_CUBIC)\n', (577, 611), False, 'import cv2\n'), ((829, 852), 'numpy.array', 'np.array', (['X[:num_train]'], {}), '(X[:num_train])\n', (837, 852), True, 'import numpy as np\n'), ((881, 904), 'numpy.array', 'np.array', (['X[num_train:]'], {}), '(X[num_train:])\n', (889, 904), True, 'import numpy as np\n'), ((946, 969), 'numpy.array', 'np.array', (['y[:num_train]'], {}), '(y[:num_train])\n', (954, 969), True, 'import numpy as np\n'), ((995, 1018), 'numpy.array', 'np.array', (['y[num_train:]'], {}), '(y[num_train:])\n', (1003, 1018), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# GFOLD_static_p3p4
min_=min
from cvxpy import *
import cvxpy_codegen as cpg
from time import time
import numpy as np
import sys
import GFOLD_params
''' As defined in the paper...
PROBLEM 3: Minimum Landing Error (tf roughly solved)
MINIMIZE : norm of landing error vector
SUBJ TO :
0) initial conditions satisfied (position, velocity)
1) final conditions satisfied (altitude, velocity)
2) dynamics always satisfied
3) x stays in cone at all times
4) relaxed convexified mass and thrust constraints
5) thrust pointing constraint
6) sub-surface flight constraint
PROBLEM 4: Minimum Fuel Use
MAXIMIZE : landing mass, opt variables are dynamical and
SUBJ TO :
0) same constraints as p1, plus:
1) landing point must be equal or better than that found by p1
'''
def solve(params, params_super = None, codegen = False, verbose=False):
#super params
if (params_super == None):
params_super = GFOLD_params.SuperParams() # default
N = params_super.N
#优化变量
x =Variable(6,N,name='var_x') # state vector (3position,3velocity)
u =Variable(3,N,name='var_u') # u = Tc/mass because Tc[:,n]/m[n] is not allowed by DCP
z= Variable(1,N,name='var_z') # z = ln(mass)
s= Variable(1,N,name='var_s') # thrust slack parameter
# Parameters
x0 = Parameter(6, 1, name="x0")
xf = Parameter(6, 1, name="xf")
z0_term_inv = Parameter(1, N, name="z0_term_inv", sign='positive')
z0_term_log = Parameter(1, N, name="z0_term_log")
g = Parameter(3, 1, name="g_vec")
p_cs_cos = Parameter(1, N, name='p_cs_cos')
sparse_params = Parameter(7, 1, name="sparse_params", sign='positive')
m_wet_log = Parameter(2, 1, name='m_wet_log')
if (not codegen):
x0.value = params.x0.reshape(6, 1)
xf.value = params.xf.reshape(6, 1)
z0_term_inv.value = params.z0_term_inv.reshape(1, N)
z0_term_log.value = params.z0_term_log.reshape(1, N)
g.value = params.g.reshape(3, 1)
p_cs_cos.value = params.p_cs_cos.reshape(1, N)
m_wet_log.value = [params.m_wet_log, 0]
sparse_params.value = np.array([
params.alpha_dt,
params.G_max,
params.V_max,
params.y_gs_cot,
params.r1,
params.r2,
params.tf
]).reshape(7, 1)
alpha_dt, G_max, V_max, y_gs_cot, r1, r2, tf_ = sparse_params
dt = tf_ * (1/N) # Integration dt
# constraints
con = []
con += [x[0:3,0] == x0[0:3]] # initial pos
con += [x[3:6,0] == x0[3:6]] # initial vel
con += [x[0:3,N-1] == xf[0:3]] # final pos
con += [x[3:6,N-1]== xf[3:6]] # final vel
con += [s[0,N-1] == 0] # thrust at the end must be zero
con += [u[:,0] == s[0,0]*np.array([1,0,0])] # thrust direction starts straight
con += [u[:,N-1] == s[0,N-1]*np.array([1,0,0])] # and ends straight
con += [z[0,0] == m_wet_log[0,0]] # convexified (7)
for n in range(0,N-1):
#dynamics
con += [x[3:6,n+1] == x[3:6,n] + (dt*0.5)*((u[:,n]+g[:,0]) + (u[:,n+1]+g[:,0]))]
con += [x[0:3,n+1] == x[0:3,n] + (dt*0.5)*(x[3:6,n+1]+x[3:6,n])]
# glideslope cone
con += [ norm( (x[0:3,n])[1:3] ) - y_gs_cot*(x[0,n]) <= 0 ]
con += [ norm(x[3:6,n]) <= V_max ] # velocity
#con += [norm(u[:,n+1]-u[:,n]) <= dt*T_max/m_dry * 3]
con += [z[0,n+1] == z[0,n] - (alpha_dt*0.5)*(s[0,n] + s[0,n+1])] # mass decreases
con += [norm(u[:,n]) <= s[0,n]] # limit thrust magnitude & also therefore, mass
# Thrust pointing constraint
con += [ u[0,n] >= p_cs_cos[0,n]*s[0,n] ]
if n > 0:
#z0_term = m_wet - alpha * r2 * (n) * dt # see ref [2], eq 34,35,36
#z0 = log(z0_term)
z0 = z0_term_log[0,n]
mu_1 = r1*(z0_term_inv[0,n])
mu_2 = r2*(z0_term_inv[0,n])
#更正一处原项目与论文不符之处
# 示意图:https://www.desmos.com/calculator/wtcfgnepe1
con += [s[0,n] >= mu_1 * (1 - (z[0,n] - z0) + (z[0,n] - z0)**2 *0.5)] # lower thrust bound
con += [s[0,n] <= mu_2 * (1 - (z[0,n] - z0))] # upper thrust bound
#Objective
objective = Minimize(-z[0,N-1])
problem=Problem(objective, con)
if codegen:
cpg.codegen(problem, codegen_path)
else:
obj_opt = problem.solve(solver=ECOS, verbose=verbose)
return (
obj_opt,
np.array(x.value), # r,v
np.array(u.value), # u (acceleration)
np.exp(np.array(z.value)) # mass
) if type(x.value) != type(None) else (None, None, None, None)
if __name__ == '__main__':
if (len(sys.argv) > 2 and sys.argv[1] == 'codegen'):
codegen_path = sys.argv[2]
solve(None, None, True)
else:
print("invalid input")
print(sys.argv)
|
[
"numpy.array",
"GFOLD_params.SuperParams",
"cvxpy_codegen.codegen"
] |
[((1050, 1076), 'GFOLD_params.SuperParams', 'GFOLD_params.SuperParams', ([], {}), '()\n', (1074, 1076), False, 'import GFOLD_params\n'), ((4489, 4523), 'cvxpy_codegen.codegen', 'cpg.codegen', (['problem', 'codegen_path'], {}), '(problem, codegen_path)\n', (4500, 4523), True, 'import cvxpy_codegen as cpg\n'), ((2230, 2339), 'numpy.array', 'np.array', (['[params.alpha_dt, params.G_max, params.V_max, params.y_gs_cot, params.r1,\n params.r2, params.tf]'], {}), '([params.alpha_dt, params.G_max, params.V_max, params.y_gs_cot,\n params.r1, params.r2, params.tf])\n', (2238, 2339), True, 'import numpy as np\n'), ((2890, 2909), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2898, 2909), True, 'import numpy as np\n'), ((2977, 2996), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2985, 2996), True, 'import numpy as np\n'), ((4646, 4663), 'numpy.array', 'np.array', (['x.value'], {}), '(x.value)\n', (4654, 4663), True, 'import numpy as np\n'), ((4683, 4700), 'numpy.array', 'np.array', (['u.value'], {}), '(u.value)\n', (4691, 4700), True, 'import numpy as np\n'), ((4740, 4757), 'numpy.array', 'np.array', (['z.value'], {}), '(z.value)\n', (4748, 4757), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# vim:fileencoding=UTF-8
# -*- coding: UTF-8 -*-
"""
Created on 15 juny 2019 y.
@author: <NAME> <EMAIL>
"""
import sys
import struct
import numpy as np
from progress.bar import Bar
import logging
logging.basicConfig(format = u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, stream=sys.stdout)
# class ser(np.array):
class ser(object):
"""
A set of methods for working with a set of images in the SER format.
"""
def __init__(self, fname):
"""
Download information from file.
"""
# super.__init__()
# luids
self.MONO = 0
self.BAYER_RGGB = 8
self.BAYER_GRBG = 9
self.BAYER_GBRG = 10
self.BAYER_BGGR = 11
self.BAYER_CYYM = 16
self.BAYER_YCMY = 17
self.BAYER_YMCY = 18
self.BAYER_MYYC = 19
self.RGB = 100
self.BGR = 101
self.fname = fname
with open(self.fname, 'rb') as fd:
# Download information from the header.
self.header = fd.read(178)
self.parse_header()
# Download images.
self.frames = np.zeros((self.framecount, self.imageheight, self.imagewidth))
bar = Bar('Downloading', max=self.framecount)
for frame in range(self.framecount):
# for frame in range(1):
bar.next()
t_frame = fd.read(self.imageheight * self.imagewidth * self.pixeldepthperplane//8)
for line in range(self.imageheight):
for pixel in range(self.imagewidth):
index = (line * self.imagewidth + pixel) * 2
self.frames[frame][line][pixel] = struct.unpack('<H', t_frame[index:index+2])[0]
bar.finish()
# Download the trailer
self.trailer = fd.read(self.framecount * 8)
self.parse_trailer()
def parse_header(self):
"""
Parse the title.
"""
self.fileid = self.header[0:14]
self.luid = struct.unpack('<i', self.header[14:18])[0]
self.colorid = struct.unpack('<i', self.header[18:22])[0]
self.littleendian_FALSE = 0
self.littleendian_TRUE = 1
self.littleendian = struct.unpack('<i', self.header[22:26])[0]
self.imagewidth = struct.unpack('<i', self.header[26:30])[0]
self.imageheight = struct.unpack('<i', self.header[30:34])[0]
self.pixeldepthperplane = struct.unpack('<i', self.header[34:38])[0]
self.framecount = struct.unpack('<i', self.header[38:42])[0]
self.observer = self.header[42:82]
self.telescope = self.header[82:122]
self.datetime = struct.unpack('<q', self.header[122:130])[0]
self.datetime_utc = struct.unpack('<q', self.header[130:138])[0]
# logging.info('{0}x{1}'.format(self.imagewidth, self.imageheight))
def parse_trailer(self):
"""
Parse the trailer
"""
for i in range(0, self.framecount*8, 8):
tuli = (struct.unpack('<Q', self.trailer[i:i+8])[0])
def main(argv):
logging.info('%s started.\n' % argv[0])
fn = './images/ASICAP_2019-05-10_01_43_36_523.SER'
frames = ser(fn)
# logging.debug(type(frames))
# logging.debug(type(object))
# # darks_fn = './images/ASICAP_2019-05-10_02_12_00_621.SER'
# # offsets_fn = './images/ASICAP_2019-05-10_02_30_47_294.SER'
#
# # frames = ser.ser()
# # frames.read(darks_fn)
# # frames.read(lights_fn)
# # ser_fr = serialise_frames(frames)
# # logging.debug('std1={}'.format(ser_fr.std()))
# # hist_fr = get_hist(ser_fr)
# # plt.plot(hist_fr)
# # plt.grid()
# # plt.show()
#
# fnames = [
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_34_52_584.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_36_05_343.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_34_373.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_47_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_58_784.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_06_703.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_17_476.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_27_330.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_36_623.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_48_239.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_20_816.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_32_118.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_47_796.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_59_999.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_10_321.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_41_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_07_956.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_19_287.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_31_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_43_981.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_07_152.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_36_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_01_167.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_33_214.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_58_952.SER',
# ]
#
# print('{};{};{};{};{}'.format('File', 'Temperature', 'Exposure', 'Gain', 'std'))
# for fn in fnames:
# print('{}'.format(fn), flush=True, file=sys.stderr)
# frames = ser.ser()
# frames.read(fn)
# ser_fr = serialise_frames(frames)
#
# config = configparser.ConfigParser()
# config.read(fn + '.txt')
#
# print('{};{};{};{};{}'.format(fn, config['ZWO ASI120MC']['temperature'], config['ZWO ASI120MC']['exposure'], config['ZWO ASI120MC']['gain'], ser_fr.std()))
logging.info('%s finished.\n' % argv[0])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"logging.basicConfig",
"numpy.zeros",
"struct.unpack",
"progress.bar.Bar",
"logging.info"
] |
[((223, 369), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'u"""%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s"""', 'level': 'logging.DEBUG', 'stream': 'sys.stdout'}), "(format=\n u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.DEBUG, stream=sys.stdout)\n", (242, 369), False, 'import logging\n'), ((3276, 3315), 'logging.info', 'logging.info', (["('%s started.\\n' % argv[0])"], {}), "('%s started.\\n' % argv[0])\n", (3288, 3315), False, 'import logging\n'), ((6271, 6311), 'logging.info', 'logging.info', (["('%s finished.\\n' % argv[0])"], {}), "('%s finished.\\n' % argv[0])\n", (6283, 6311), False, 'import logging\n'), ((1209, 1271), 'numpy.zeros', 'np.zeros', (['(self.framecount, self.imageheight, self.imagewidth)'], {}), '((self.framecount, self.imageheight, self.imagewidth))\n', (1217, 1271), True, 'import numpy as np\n'), ((1290, 1329), 'progress.bar.Bar', 'Bar', (['"""Downloading"""'], {'max': 'self.framecount'}), "('Downloading', max=self.framecount)\n", (1293, 1329), False, 'from progress.bar import Bar\n'), ((2141, 2180), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[14:18]'], {}), "('<i', self.header[14:18])\n", (2154, 2180), False, 'import struct\n'), ((2218, 2257), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[18:22]'], {}), "('<i', self.header[18:22])\n", (2231, 2257), False, 'import struct\n'), ((2367, 2406), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[22:26]'], {}), "('<i', self.header[22:26])\n", (2380, 2406), False, 'import struct\n'), ((2444, 2483), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[26:30]'], {}), "('<i', self.header[26:30])\n", (2457, 2483), False, 'import struct\n'), ((2521, 2560), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[30:34]'], {}), "('<i', self.header[30:34])\n", (2534, 2560), False, 'import struct\n'), ((2598, 2637), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[34:38]'], {}), "('<i', self.header[34:38])\n", (2611, 2637), False, 'import struct\n'), ((2675, 2714), 'struct.unpack', 'struct.unpack', (['"""<i"""', 'self.header[38:42]'], {}), "('<i', self.header[38:42])\n", (2688, 2714), False, 'import struct\n'), ((2859, 2900), 'struct.unpack', 'struct.unpack', (['"""<q"""', 'self.header[122:130]'], {}), "('<q', self.header[122:130])\n", (2872, 2900), False, 'import struct\n'), ((2938, 2979), 'struct.unpack', 'struct.unpack', (['"""<q"""', 'self.header[130:138]'], {}), "('<q', self.header[130:138])\n", (2951, 2979), False, 'import struct\n'), ((3209, 3251), 'struct.unpack', 'struct.unpack', (['"""<Q"""', 'self.trailer[i:i + 8]'], {}), "('<Q', self.trailer[i:i + 8])\n", (3222, 3251), False, 'import struct\n'), ((1779, 1824), 'struct.unpack', 'struct.unpack', (['"""<H"""', 't_frame[index:index + 2]'], {}), "('<H', t_frame[index:index + 2])\n", (1792, 1824), False, 'import struct\n')]
|
import os
import pickle
import time
import timeit
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torch
import tempfile
import horovod.torch as hvd
from horovod.ray import RayExecutor
from ray_shuffling_data_loader.torch_dataset import (TorchShufflingDataset)
from ray_shuffling_data_loader.data_generation import (generate_data,
DATA_SPEC)
import argparse
DEFAULT_DATA_DIR = "s3://shuffling-data-loader-benchmarks/data/"
numpy_to_torch_dtype = {
np.bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--test-batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help=("how many batches to wait before logging training "
"status"))
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--gradient-predivide-factor",
type=float,
default=1.0,
help=("apply gradient predivide factor in optimizer "
"(default: 1.0)"))
parser.add_argument("--num-workers", type=int, default=None)
parser.add_argument("--num-hosts", type=int, default=None)
parser.add_argument("--num-workers-per-host", type=int, default=None)
parser.add_argument("--cpus-per-worker", type=int, default=1)
parser.add_argument("--mock-train-step-time", type=float, default=1.0)
# Synthetic training data generation settings.
parser.add_argument("--cache-files", action="store_true", default=False)
parser.add_argument("--num-rows", type=int, default=2 * (10**7))
parser.add_argument("--num-files", type=int, default=25)
parser.add_argument("--max-row-group-skew", type=float, default=0.0)
parser.add_argument("--num-row-groups-per-file", type=int, default=5)
parser.add_argument("--data-dir", type=str, default=DEFAULT_DATA_DIR)
# Shuffling data loader settings.
parser.add_argument("--num-reducers", type=int, default=32)
parser.add_argument("--max-concurrent-epochs", type=int, default=2)
parser.add_argument("--address", default="auto")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_main(args, filenames):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
rank = hvd.rank()
train_dataset = create_dataset(
filenames,
batch_size=args.batch_size,
rank=rank,
num_epochs=args.epochs,
world_size=hvd.size(),
num_reducers=args.num_reducers,
max_concurrent_epochs=args.max_concurrent_epochs)
model = Net()
# By default, Adasum doesn"t need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if torch.cuda.is_available() and not args.no_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average,
gradient_predivide_factor=args.gradient_predivide_factor)
def _train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_dataset.set_epoch(epoch)
start_epoch = timeit.default_timer()
last_batch_time = start_epoch
batch_wait_times = []
for batch_idx, (data, target) in enumerate(train_dataset):
batch_wait_times.append(timeit.default_timer() - last_batch_time)
if torch.cuda.is_available() and not args.no_cuda:
if isinstance(data, list):
data = [t.cuda() for t in data]
target = target.cuda()
optimizer.zero_grad()
# output = model(data)
if batch_idx % args.log_interval == 0:
print(
f"Processing batch {batch_idx} in epoch {epoch} on worker "
f"{rank}.")
time.sleep(args.mock_train_step_time)
# TODO(Clark): Add worker synchronization barrier here.
# loss = F.nll_loss(output, target)
# loss.backward()
# optimizer.step()
last_batch_time = timeit.default_timer()
epoch_duration = timeit.default_timer() - start_epoch
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nEpoch {epoch}, worker {rank} stats over "
f"{len(batch_wait_times)} steps: {epoch_duration:.3f}")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
return batch_wait_times
print(f"Starting training on worker {rank}.")
batch_wait_times = []
for epoch in range(args.epochs):
batch_wait_times.extend(_train(epoch))
batch_wait_times.pop(0)
print(f"Done training on worker {rank}.")
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nWorker {rank} training stats over {args.epochs} epochs:")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
# TODO(Clark): Add logic to the dataset abstraction so we don't have to do
# this.
if rank == 0:
print("Waiting in rank 0 worker to let other workers consume queue...")
time.sleep(10)
print("Done waiting in rank 0 worker.")
def create_dataset(filenames, *, batch_size, rank, num_epochs, world_size,
num_reducers, max_concurrent_epochs):
print(f"Creating Torch shuffling dataset for worker {rank} with "
f"{batch_size} batch size, {num_epochs} epochs, {num_reducers} "
f"reducers, and {world_size} trainers.")
feature_columns = list(DATA_SPEC.keys())
feature_types = [
numpy_to_torch_dtype[dtype] for _, _, dtype in DATA_SPEC.values()
]
label_column = feature_columns.pop()
label_type = feature_types.pop()
return TorchShufflingDataset(
filenames,
num_epochs,
world_size,
batch_size,
rank,
num_reducers=num_reducers,
max_concurrent_epochs=max_concurrent_epochs,
feature_columns=feature_columns,
feature_types=feature_types,
label_column=label_column,
label_type=label_type)
if __name__ == "__main__":
args = parser.parse_args()
from ray_shuffling_data_loader.stats import human_readable_size
import ray
print("Connecting to Ray cluster...")
ray.init(address=args.address)
num_rows = args.num_rows
num_files = args.num_files
num_row_groups_per_file = args.num_row_groups_per_file
max_row_group_skew = args.max_row_group_skew
data_dir = args.data_dir
cache_path = os.path.join(tempfile.gettempdir(), "data_cache")
filenames = None
if args.cache_files and os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
filenames, num_bytes = pickle.load(f)
except Exception as exc:
print(f"Cache load failed - {exc}")
if not filenames:
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
if args.cache_files:
with open(os.path.join(tempfile.gettempdir(), "data_cache"),
"wb") as f:
pickle.dump((filenames, num_bytes), f)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
print("Create Ray executor")
worker_kwargs = {}
num_workers = args.num_workers
num_hosts = args.num_hosts
num_workers_per_host = args.num_workers_per_host
if num_workers is not None:
if num_hosts is not None:
raise ValueError(
"Only one of --num-workers and --num-hosts should be used.")
worker_kwargs["num_workers"] = num_workers
elif num_hosts is not None:
worker_kwargs["num_hosts"] = num_hosts
if num_workers_per_host is None:
raise ValueError("When giving --num-hosts, --num-workers-per-host "
"must also be given.")
worker_kwargs["num_workers_per_host"] = num_workers_per_host
cpus_per_worker = args.cpus_per_worker
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(
settings,
use_gpu=True,
gpus_per_worker=1,
cpus_per_worker=cpus_per_worker,
**worker_kwargs)
executor.start()
executor.run(train_main, args=[args, filenames])
executor.shutdown()
print("Done consuming batches.")
|
[
"horovod.torch.broadcast_optimizer_state",
"horovod.torch.local_rank",
"ray_shuffling_data_loader.data_generation.DATA_SPEC.values",
"time.sleep",
"horovod.torch.local_size",
"torch.cuda.is_available",
"horovod.torch.size",
"ray.init",
"numpy.mean",
"os.path.exists",
"horovod.torch.rank",
"argparse.ArgumentParser",
"horovod.ray.RayExecutor",
"torch.set_num_threads",
"numpy.max",
"horovod.torch.nccl_built",
"ray_shuffling_data_loader.data_generation.DATA_SPEC.keys",
"numpy.min",
"torch.nn.Dropout2d",
"pickle.load",
"torch.nn.functional.dropout",
"torch.nn.functional.log_softmax",
"numpy.std",
"horovod.ray.RayExecutor.create_settings",
"torch.manual_seed",
"pickle.dump",
"timeit.default_timer",
"horovod.torch.init",
"torch.nn.Conv2d",
"ray_shuffling_data_loader.stats.human_readable_size",
"ray_shuffling_data_loader.data_generation.generate_data",
"ray_shuffling_data_loader.torch_dataset.TorchShufflingDataset",
"tempfile.gettempdir",
"torch.nn.Linear",
"torch.cuda.manual_seed"
] |
[((922, 982), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (945, 982), False, 'import argparse\n'), ((4215, 4225), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (4223, 4225), True, 'import horovod.torch as hvd\n'), ((4230, 4258), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4247, 4258), False, 'import torch\n'), ((4513, 4537), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (4534, 4537), False, 'import torch\n'), ((4549, 4559), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (4557, 4559), True, 'import horovod.torch as hvd\n'), ((5517, 5570), 'horovod.torch.broadcast_optimizer_state', 'hvd.broadcast_optimizer_state', (['optimizer'], {'root_rank': '(0)'}), '(optimizer, root_rank=0)\n', (5546, 5570), True, 'import horovod.torch as hvd\n'), ((8136, 8161), 'numpy.mean', 'np.mean', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8143, 8161), True, 'import numpy as np\n'), ((8188, 8212), 'numpy.std', 'np.std', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8194, 8212), True, 'import numpy as np\n'), ((8239, 8263), 'numpy.max', 'np.max', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8245, 8263), True, 'import numpy as np\n'), ((8290, 8314), 'numpy.min', 'np.min', (['batch_wait_times'], {}), '(batch_wait_times)\n', (8296, 8314), True, 'import numpy as np\n'), ((9439, 9709), 'ray_shuffling_data_loader.torch_dataset.TorchShufflingDataset', 'TorchShufflingDataset', (['filenames', 'num_epochs', 'world_size', 'batch_size', 'rank'], {'num_reducers': 'num_reducers', 'max_concurrent_epochs': 'max_concurrent_epochs', 'feature_columns': 'feature_columns', 'feature_types': 'feature_types', 'label_column': 'label_column', 'label_type': 'label_type'}), '(filenames, num_epochs, world_size, batch_size, rank,\n num_reducers=num_reducers, max_concurrent_epochs=max_concurrent_epochs,\n feature_columns=feature_columns, feature_types=feature_types,\n label_column=label_column, label_type=label_type)\n', (9460, 9709), False, 'from ray_shuffling_data_loader.torch_dataset import TorchShufflingDataset\n'), ((9976, 10006), 'ray.init', 'ray.init', ([], {'address': 'args.address'}), '(address=args.address)\n', (9984, 10006), False, 'import ray\n'), ((12163, 12204), 'horovod.ray.RayExecutor.create_settings', 'RayExecutor.create_settings', ([], {'timeout_s': '(30)'}), '(timeout_s=30)\n', (12190, 12204), False, 'from horovod.ray import RayExecutor\n'), ((12220, 12329), 'horovod.ray.RayExecutor', 'RayExecutor', (['settings'], {'use_gpu': '(True)', 'gpus_per_worker': '(1)', 'cpus_per_worker': 'cpus_per_worker'}), '(settings, use_gpu=True, gpus_per_worker=1, cpus_per_worker=\n cpus_per_worker, **worker_kwargs)\n', (12231, 12329), False, 'from horovod.ray import RayExecutor\n'), ((3628, 3659), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (3637, 3659), True, 'import torch.nn as nn\n'), ((3681, 3713), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (3690, 3713), True, 'import torch.nn as nn\n'), ((3740, 3754), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {}), '()\n', (3752, 3754), True, 'import torch.nn as nn\n'), ((3774, 3792), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (3783, 3792), True, 'import torch.nn as nn\n'), ((3812, 3829), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(10)'], {}), '(50, 10)\n', (3821, 3829), True, 'import torch.nn as nn\n'), ((4048, 4084), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (4057, 4084), True, 'import torch.nn.functional as F\n'), ((4124, 4140), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {}), '(x)\n', (4137, 4140), True, 'import torch.nn.functional as F\n'), ((4267, 4292), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4290, 4292), False, 'import torch\n'), ((4413, 4446), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4435, 4446), False, 'import torch\n'), ((4929, 4939), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4937, 4939), True, 'import horovod.torch as hvd\n'), ((4978, 5003), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5001, 5003), False, 'import torch\n'), ((6220, 6242), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6240, 6242), False, 'import timeit\n'), ((7280, 7305), 'numpy.mean', 'np.mean', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7287, 7305), True, 'import numpy as np\n'), ((7336, 7360), 'numpy.std', 'np.std', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7342, 7360), True, 'import numpy as np\n'), ((7391, 7415), 'numpy.max', 'np.max', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7397, 7415), True, 'import numpy as np\n'), ((7446, 7470), 'numpy.min', 'np.min', (['batch_wait_times'], {}), '(batch_wait_times)\n', (7452, 7470), True, 'import numpy as np\n'), ((8810, 8824), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (8820, 8824), False, 'import time\n'), ((9230, 9246), 'ray_shuffling_data_loader.data_generation.DATA_SPEC.keys', 'DATA_SPEC.keys', ([], {}), '()\n', (9244, 9246), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((10236, 10257), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (10255, 10257), False, 'import tempfile\n'), ((10322, 10348), 'os.path.exists', 'os.path.exists', (['cache_path'], {}), '(cache_path)\n', (10336, 10348), False, 'import os\n'), ((10815, 10908), 'ray_shuffling_data_loader.data_generation.generate_data', 'generate_data', (['num_rows', 'num_files', 'num_row_groups_per_file', 'max_row_group_skew', 'data_dir'], {}), '(num_rows, num_files, num_row_groups_per_file,\n max_row_group_skew, data_dir)\n', (10828, 10908), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((4387, 4403), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (4401, 4403), True, 'import horovod.torch as hvd\n'), ((4721, 4731), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4729, 4731), True, 'import horovod.torch as hvd\n'), ((5183, 5199), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (5197, 5199), True, 'import horovod.torch as hvd\n'), ((5225, 5241), 'horovod.torch.local_size', 'hvd.local_size', ([], {}), '()\n', (5239, 5241), True, 'import horovod.torch as hvd\n'), ((6920, 6957), 'time.sleep', 'time.sleep', (['args.mock_train_step_time'], {}), '(args.mock_train_step_time)\n', (6930, 6957), False, 'import time\n'), ((7165, 7187), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7185, 7187), False, 'import timeit\n'), ((7213, 7235), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7233, 7235), False, 'import timeit\n'), ((9325, 9343), 'ray_shuffling_data_loader.data_generation.DATA_SPEC.values', 'DATA_SPEC.values', ([], {}), '()\n', (9341, 9343), False, 'from ray_shuffling_data_loader.data_generation import generate_data, DATA_SPEC\n'), ((6471, 6496), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6494, 6496), False, 'import torch\n'), ((10448, 10462), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10459, 10462), False, 'import pickle\n'), ((11147, 11185), 'pickle.dump', 'pickle.dump', (['(filenames, num_bytes)', 'f'], {}), '((filenames, num_bytes), f)\n', (11158, 11185), False, 'import pickle\n'), ((11349, 11379), 'ray_shuffling_data_loader.stats.human_readable_size', 'human_readable_size', (['num_bytes'], {}), '(num_bytes)\n', (11368, 11379), False, 'from ray_shuffling_data_loader.stats import human_readable_size\n'), ((6414, 6436), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6434, 6436), False, 'import timeit\n'), ((11059, 11080), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (11078, 11080), False, 'import tempfile\n')]
|
import pytest
import numpy as np
import eqtk
def test_promiscuous_binding_failure():
A = np.array(
[
[
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
],
[
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
],
[
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
],
[
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
],
]
)
G = np.array(
[
-0.51720535,
-0.69471304,
-1.78260496,
-1.32337777,
-0.63267947,
-0.57923893,
-0.78718634,
-0.27521037,
-0.13733511,
-0.69433251,
1.6858364,
-0.43683479,
0.39312096,
-0.0625205,
0.23139303,
0.07680628,
-0.52774543,
1.74592678,
]
)
x0 = np.array(
[
[
2.48257788e01,
1.72132293e-01,
1.14833731e-02,
5.00547317e-02,
1.38949549e-01,
1.93069773e01,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
]
]
)
def test_spontaneous_production_failure():
N = np.array(
[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]], dtype=float
)
A = np.array(
[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]], dtype=float
)
G = np.array([0, 1, 2, 3, 4, 5])
K = np.exp(-np.dot(N, G))
for x0_val in [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
]:
x0 = np.array(x0_val, dtype=float)
x_NK = eqtk.solve(c0=x0, N=N, K=K)
with pytest.raises(ValueError) as excinfo:
x_AG = eqtk.solve(c0=x0, A=A, G=G)
excinfo.match("`A` must have all nonnegative entries.")
assert eqtk.eqcheck(x_NK, x0, N=N, K=K)
def test_scale_factor_failure():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])
x0 = np.array(
[
[
5.50293892e-05,
6.49273515e-08,
2.75796219e-05,
1.29854703e-07,
3.24636758e-08,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_trivial_elemental_failure():
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[3.48219906e-06, 1.32719868e-10]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[2.24222410e-08, 1.63359284e-04]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
G = np.array([0.0, 0.0, 0.0])
x0 = np.array([[2.63761955e-04, 4.93360042e-07, 4.88340687e-07]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
def test_past_failure_1():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])
x0 = np.array(
[
[
1.65989040e-10,
1.07630096e-04,
1.65989040e-10,
1.65989040e-10,
5.38150479e-05,
]
]
)
x = eqtk.solve(x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_past_failure_2():
N = np.array([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])
minus_log_K = np.array([-43.66660344, -68.14676841, -92.28023823])
x0 = np.array([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])
K = np.exp(-minus_log_K)
x = eqtk.solve(x0, N, K)
assert eqtk.eqcheck(x, x0, N, K)
def test_small_conc_failure():
A = np.array(
[
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 2.0],
[1.0, 0.0, 0.0, 1.0, 2.0],
]
)
G = np.array(
[
-1.1323012373599138e02,
-2.7028447814426110e-01,
-2.3382656193096754e01,
-1.0088531260804201e02,
-5.7676558386243052e01,
]
)
x0 = np.array(
[
[
1.8134373707286439e-08,
3.5913242229740680e-14,
3.5913242229740680e-14,
3.5913242229740680e-14,
1.7956621114870340e-14,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
|
[
"eqtk.solve",
"eqtk.eqcheck",
"numpy.exp",
"numpy.array",
"numpy.dot",
"pytest.raises"
] |
[((96, 687), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0,\n 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0,\n 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0,\n 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,\n 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0], [0.0,\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, \n 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,\n 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0,\n 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0]])\n', (104, 687), True, 'import numpy as np\n'), ((2582, 2833), 'numpy.array', 'np.array', (['[-0.51720535, -0.69471304, -1.78260496, -1.32337777, -0.63267947, -\n 0.57923893, -0.78718634, -0.27521037, -0.13733511, -0.69433251, \n 1.6858364, -0.43683479, 0.39312096, -0.0625205, 0.23139303, 0.07680628,\n -0.52774543, 1.74592678]'], {}), '([-0.51720535, -0.69471304, -1.78260496, -1.32337777, -0.63267947, \n -0.57923893, -0.78718634, -0.27521037, -0.13733511, -0.69433251, \n 1.6858364, -0.43683479, 0.39312096, -0.0625205, 0.23139303, 0.07680628,\n -0.52774543, 1.74592678])\n', (2590, 2833), True, 'import numpy as np\n'), ((3070, 3224), 'numpy.array', 'np.array', (['[[24.8257788, 0.172132293, 0.0114833731, 0.0500547317, 0.138949549, \n 19.3069773, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[24.8257788, 0.172132293, 0.0114833731, 0.0500547317, 0.138949549,\n 19.3069773, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])\n', (3078, 3224), True, 'import numpy as np\n'), ((3749, 3838), 'numpy.array', 'np.array', (['[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]]'], {'dtype': 'float'}), '([[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]],\n dtype=float)\n', (3757, 3838), True, 'import numpy as np\n'), ((3858, 3947), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]]'], {'dtype': 'float'}), '([[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]],\n dtype=float)\n', (3866, 3947), True, 'import numpy as np\n'), ((3967, 3995), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (3975, 3995), True, 'import numpy as np\n'), ((4507, 4571), 'numpy.array', 'np.array', (['[[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]]'], {}), '([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])\n', (4515, 4571), True, 'import numpy as np\n'), ((4580, 4638), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.77428976, -5.64873697, -0.95863043]'], {}), '([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])\n', (4588, 4638), True, 'import numpy as np\n'), ((4648, 4745), 'numpy.array', 'np.array', (['[[5.50293892e-05, 6.49273515e-08, 2.75796219e-05, 1.29854703e-07, \n 3.24636758e-08]]'], {}), '([[5.50293892e-05, 6.49273515e-08, 2.75796219e-05, 1.29854703e-07, \n 3.24636758e-08]])\n', (4656, 4745), True, 'import numpy as np\n'), ((4880, 4907), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (4890, 4907), False, 'import eqtk\n'), ((4919, 4948), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (4931, 4948), False, 'import eqtk\n'), ((4997, 5031), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (5005, 5031), True, 'import numpy as np\n'), ((5040, 5060), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5048, 5060), True, 'import numpy as np\n'), ((5070, 5114), 'numpy.array', 'np.array', (['[[3.48219906e-06, 1.32719868e-10]]'], {}), '([[3.48219906e-06, 1.32719868e-10]])\n', (5078, 5114), True, 'import numpy as np\n'), ((5180, 5214), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (5188, 5214), True, 'import numpy as np\n'), ((5223, 5243), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5231, 5243), True, 'import numpy as np\n'), ((5253, 5296), 'numpy.array', 'np.array', (['[[2.2422241e-08, 0.000163359284]]'], {}), '([[2.2422241e-08, 0.000163359284]])\n', (5261, 5296), True, 'import numpy as np\n'), ((5363, 5424), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (5371, 5424), True, 'import numpy as np\n'), ((5433, 5458), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5441, 5458), True, 'import numpy as np\n'), ((5468, 5528), 'numpy.array', 'np.array', (['[[0.000263761955, 4.93360042e-07, 4.88340687e-07]]'], {}), '([[0.000263761955, 4.93360042e-07, 4.88340687e-07]])\n', (5476, 5528), True, 'import numpy as np\n'), ((5622, 5686), 'numpy.array', 'np.array', (['[[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]]'], {}), '([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])\n', (5630, 5686), True, 'import numpy as np\n'), ((5695, 5754), 'numpy.array', 'np.array', (['[0.0, 0.0, -16.76857677, -2.38430181, 1.22028775]'], {}), '([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])\n', (5703, 5754), True, 'import numpy as np\n'), ((5764, 5858), 'numpy.array', 'np.array', (['[[1.6598904e-10, 0.000107630096, 1.6598904e-10, 1.6598904e-10, 5.38150479e-05]]'], {}), '([[1.6598904e-10, 0.000107630096, 1.6598904e-10, 1.6598904e-10, \n 5.38150479e-05]])\n', (5772, 5858), True, 'import numpy as np\n'), ((5996, 6020), 'eqtk.solve', 'eqtk.solve', (['x0'], {'A': 'A', 'G': 'G'}), '(x0, A=A, G=G)\n', (6006, 6020), False, 'import eqtk\n'), ((6032, 6061), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (6044, 6061), False, 'import eqtk\n'), ((6099, 6178), 'numpy.array', 'np.array', (['[[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]]'], {}), '([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])\n', (6107, 6178), True, 'import numpy as np\n'), ((6197, 6249), 'numpy.array', 'np.array', (['[-43.66660344, -68.14676841, -92.28023823]'], {}), '([-43.66660344, -68.14676841, -92.28023823])\n', (6205, 6249), True, 'import numpy as np\n'), ((6259, 6335), 'numpy.array', 'np.array', (['[[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]]'], {}), '([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])\n', (6267, 6335), True, 'import numpy as np\n'), ((6344, 6364), 'numpy.exp', 'np.exp', (['(-minus_log_K)'], {}), '(-minus_log_K)\n', (6350, 6364), True, 'import numpy as np\n'), ((6373, 6393), 'eqtk.solve', 'eqtk.solve', (['x0', 'N', 'K'], {}), '(x0, N, K)\n', (6383, 6393), False, 'import eqtk\n'), ((6405, 6430), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0', 'N', 'K'], {}), '(x, x0, N, K)\n', (6417, 6430), False, 'import eqtk\n'), ((6472, 6568), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 2.0], [1.0, 0.0, 0.0, 1.0,\n 2.0]]'], {}), '([[1.0, 0.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 2.0], [1.0, 0.0, \n 0.0, 1.0, 2.0]])\n', (6480, 6568), True, 'import numpy as np\n'), ((6633, 6752), 'numpy.array', 'np.array', (['[-113.23012373599138, -0.2702844781442611, -23.382656193096754, -\n 100.88531260804201, -57.67655838624305]'], {}), '([-113.23012373599138, -0.2702844781442611, -23.382656193096754, -\n 100.88531260804201, -57.67655838624305])\n', (6641, 6752), True, 'import numpy as np\n'), ((6859, 6991), 'numpy.array', 'np.array', (['[[1.813437370728644e-08, 3.591324222974068e-14, 3.591324222974068e-14, \n 3.591324222974068e-14, 1.795662111487034e-14]]'], {}), '([[1.813437370728644e-08, 3.591324222974068e-14, \n 3.591324222974068e-14, 3.591324222974068e-14, 1.795662111487034e-14]])\n', (6867, 6991), True, 'import numpy as np\n'), ((7131, 7158), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (7141, 7158), False, 'import eqtk\n'), ((7170, 7199), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x', 'x0'], {'A': 'A', 'G': 'G'}), '(x, x0, A=A, G=G)\n', (7182, 7199), False, 'import eqtk\n'), ((4179, 4208), 'numpy.array', 'np.array', (['x0_val'], {'dtype': 'float'}), '(x0_val, dtype=float)\n', (4187, 4208), True, 'import numpy as np\n'), ((4224, 4251), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'N': 'N', 'K': 'K'}), '(c0=x0, N=N, K=K)\n', (4234, 4251), False, 'import eqtk\n'), ((4431, 4463), 'eqtk.eqcheck', 'eqtk.eqcheck', (['x_NK', 'x0'], {'N': 'N', 'K': 'K'}), '(x_NK, x0, N=N, K=K)\n', (4443, 4463), False, 'import eqtk\n'), ((5138, 5165), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5148, 5165), False, 'import eqtk\n'), ((5321, 5348), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5331, 5348), False, 'import eqtk\n'), ((5552, 5579), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (5562, 5579), False, 'import eqtk\n'), ((4012, 4024), 'numpy.dot', 'np.dot', (['N', 'G'], {}), '(N, G)\n', (4018, 4024), True, 'import numpy as np\n'), ((4266, 4291), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4279, 4291), False, 'import pytest\n'), ((4323, 4350), 'eqtk.solve', 'eqtk.solve', ([], {'c0': 'x0', 'A': 'A', 'G': 'G'}), '(c0=x0, A=A, G=G)\n', (4333, 4350), False, 'import eqtk\n')]
|
import database as d
import numpy as np
import random
from transitions import Machine
#Conversations are markov chains. Works as follows: a column vector for each CURRENT state j, a row vector for each TARGET state i.
#Each entry i,j = the probability of moving to state i from state j.
#target state D = end of conversation. We start in state D when initializing conversation.
#row vectors sum to 1, internal lists are columns.
#Conversation is a singleton. DO NOT CREATE NEW CONVERSATION OBJECTS.
class Conversation(object):
#a. stores, b.manufacturers, c.friends, d. myself, e.end conversation
topicMatrix = [
[0.00,0.20,0.15,0.15,0.25],
[0.20,0.00,0.15,0.15,0.25],
[0.15,0.15,0.00,0.20,0.25],
[0.15,0.15,0.20,0.00,0.25],
[0.50,0.50,0.50,0.50,0.00]
]
#a. different store, b. new topic, c. end convo, d. prices
storeMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different manufacturer, b. new topic, c. end convo, d. prices
manuMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different friend, b. new topic, c. end convo, d. family, e. job, /f. skills
friendMatrix = [
[0.0,0.0,0.2,0.1,0.1],
[0.0,0.0,0.2,0.2,0.2],
[0.0,0.0,0.2,0.5,0.5],
[0.5,0.5,0.2,0.0,0.2],
[0.5,0.5,0.2,0.2,0.0]
]
# friendMatrix = [
# [0.00,0.00,0.15,0.1,0.1,0.1],
# [0.00,0.00,0.15,0.2,0.2,0.2],
# [0.00,0.00,0.15,0.5,0.5,0.5],
# [0.34,0.34,0.15,0.0,0.1,0.1],
# [0.33,0.33,0.15,0.1,0.0,0.1],
# [0.33,0.33,0.25,0.1,0.1,0.0]
# ]
#a. introduction, b. new topic, c. end convo, d. myfamily, e. myjob, /f. myskills
myselfMatrix = [
[0.00,1,0.2,0.0,0.0],
[0.25,0,0.2,0.2,0.2],
[0.25,0,0.2,0.5,0.5],
[0.25,0,0.2,0.0,0.3],
[0.25,0,0.2,0.3,0.0]
]
# myselfMatrix = [
# [0.0,1,0.15,0.00,0.00,0.00],
# [0.2,0,0.15,0.20,0.20,0.20],
# [0.2,0,0.15,0.50,0.50,0.50],
# [0.2,0,0.15,0.00,0.15,0.15],
# [0.2,0,0.15,0.15,0.00,0.15],
# [0.2,0,0.15,0.15,0.15,0.00]
# ]
states = ['topic','store','manu','friend', 'myself', 'exit']
transitions = [
{'trigger' : 'toTopic', 'source' : '*', 'dest' : 'topic'},
{'trigger' : 'toStore', 'source' : 'topic', 'dest' : 'store'},
{'trigger' : 'toManu' , 'source' : 'topic', 'dest' : 'manu' },
{'trigger' : 'toFriend', 'source' : 'topic', 'dest' : 'friend' },
{'trigger' : 'toMyself', 'source' : 'topic', 'dest' : 'myself'},
{'trigger' : 'toExit', 'source' : '*', 'dest' : 'exit'}
]
def __init__(self):
self.isPlayer = False
self.firstPerson = None
self.secondPerson = None
self.target = None
self.machine = Machine(model=self, states=Conversation.states, transitions=Conversation.transitions, initial='exit')
self.menuDict = {
'topic' : [self.toStore, self.toManu, self.toFriend, self.toMyself, self.toExit],
'store' : [self.different, self.toTopic, self.toExit, self.prices],
'manu' : [self.different, self.toTopic, self.toExit, self.prices],
'friend' : [self.different, self.toTopic, self.toExit, self.family, self.job],
'myself' : [self.introduction, self.toTopic, self.toExit, self.myfamily, self.myjob]
}
self.machine.on_enter_topic('topicHandler')
self.machine.on_enter_store('storeHandler')
self.machine.on_enter_manu('manuHandler')
self.machine.on_enter_friend('friendHandler')
self.machine.on_enter_myself('myselfHandler')
self.machine.on_enter_exit('exitHandler')
def beginConversation(self, firstPerson, secondPerson, isPlayer=False):
self.isPlayer = isPlayer
self.firstPerson = firstPerson
self.secondPerson = secondPerson
self.introduction()
self.toTopic()
def introduction(self):
p2 = self.firstPerson.peopleManager(self.secondPerson)
p1 = self.secondPerson.peopleManager(self.firstPerson)
p2.name = self.secondPerson.name
p1.name = self.firstPerson.name
p2.updateOpinion(1)
p1.updateOpinion(1)
def different(self):
if self.state == 'friend':
testTarget = self.firstPerson.randomPerson(self.target)
if testTarget is not None:
self.target = testTarget.person
else:
self.target = None
elif self.state == 'manu':
testTarget = self.firstPerson.randomManu(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
elif self.state == 'store':
testTarget = self.firstPerson.randomStore(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
def prices(self):
if self.target is not None:
firstProfile = self.firstPerson.unitManager(self.target, self.secondPerson)
secondProfile = self.secondPerson.unitManager(self.target, self.firstPerson)
firstPrices = firstProfile.getPricesWithDayNum()
secondPrices = secondProfile.getPricesWithDayNum()
firstDayNum = firstPrices[1]
secondDayNum = secondPrices[1]
if firstDayNum > secondDayNum:
prices = firstPrices[0]
secondProfile.updatePrices(prices, firstDayNum)
#thoughts
self.firstPerson.think("I told " + self.secondPerson.name + " about the prices at " + self.target.name + ".")
self.secondPerson.think(self.firstPerson.name + " told me about the prices at " + self.target.name + ".")
elif secondDayNum > firstDayNum:
prices = secondPrices[0]
firstProfile.updatePrices(prices, secondDayNum)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about the prices at " + self.target.name + ".")
self.secondPerson.think("I told " + self.firstPerson.name + " about the prices at " + self.target.name + ".")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s prices.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s prices.")
else:
if self.state == 'store':
self.firstPerson.think(self.secondPerson.name + " listened to me gripe about how I can't find anywhere to shop.")
self.secondPerson.think(self.firstPerson.name + " told me that they can't find anywhere to shop.")
elif self.state == 'manu':
self.firstPerson.think("I mentioned to " + self.secondPerson.name + " that I don't know anything about the local industry.")
self.secondPerson.think(self.firstPerson.name + " told me that they don't know much about the local industry.")
else:
self.firstPerson.think("There is a bug in conversation.prices. (not manu or store)")
self.secondPerson.think("There is a bug in conversation.prices. (not manu or store)")
def family(self):
if self.target is not None:
#info: family, people
#profiles
p1 = self.firstPerson.peopleManager(self.target)
p2 = self.secondPerson.peopleManager(self.target)
#variables
f1 = p1.getFamily()
f2 = p2.getFamily()
ff = []
#update profiles
for a, b in zip(f1, f2):
if a[-1] >= b[-1]:
ff.append(a)
else:
ff.append(b)
p1.updateFamily(*ff)
p2.updateFamily(*ff)
#thoughts
self.firstPerson.think(self.secondPerson.name + " and I gossipped about " + self.target.name + "'s family.")
self.secondPerson.think(self.firstPerson.name + " and I gossipped about " + self.target.name + "'s family.")
else:
self.firstPerson.think("I don't really know anything about my friends' families.")
self.secondPerson.think("I don't really know anything about my friends' families.")
def job(self):
if self.target is not None:
#profiles
firstProfile = self.firstPerson.peopleManager(self.target)
secondProfile = self.secondPerson.peopleManager(self.target)
#variables
firstJob = firstProfile.getJob()
secondJob = secondProfile.getJob()
#update profiles
if firstJob[1] > secondJob[1]:
secondProfile.updateJob(*firstJob)
self.firstPerson.think("I told " + self.secondPerson.name + " what " + self.target.name + " does for a living.")
self.secondPerson.think(self.firstPerson.name + " told me what " + self.target.name + " does for a living.")
elif secondJob[1] > firstJob[1]:
firstProfile.updateJob(*secondJob)
self.firstPerson.think(self.secondPerson.name + " told me what " + self.target.name + " does for a living.")
self.secondPerson.think("I told " + self.firstPerson.name + " about " + self.target.name + " does for a living.")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s job.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s job.")
else:
self.firstPerson.think("I don't know what any of my friends do for a living!")
self.secondPerson.think("I don't know what any of my friends do for a living!")
# def skills(self):
# #info: skills
# if self.target is not None:
# #profiles
# firstProfile = self.firstPerson.peopleManager(self.target)
# secondProfile = self.secondPerson.peopleManager(self.target)
# #variables
# firstSkills = firstProfile.getSkills()
# secondSkills = secondProfile.getSkills()
# #update profiles
# if firstSkills[1] > secondSkills[1]:
# secondProfile.updateSkills(*firstSkills)
# self.firstPerson.think("I told " + self.secondPerson.name + " about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# elif secondSkills[1] > firstSkills[1]:
# firstProfile.updateSkills(*secondSkills)
# self.firstPerson.think(self.secondPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think("I told " + self.firstPerson.name + " about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think(self.secondPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think("I should spend more time doing things with my friends.")
# self.secondPerson.think("I should spend more time doing things with my friends.")
def myfamily(self):
#info: family, people
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
firstOwn = self.firstPerson.peopleManager(self.firstPerson)
secondOwn = self.secondPerson.peopleManager(self.secondPerson)
#update profiles
firstProfile.updateFamily(firstOwn.getFather(), firstOwn.getMother(), firstOwn.getSpouse(), firstOwn.getSiblings(), firstOwn.getChildren())
secondProfile.updateFamily(secondOwn.getFather(), secondOwn.getMother(), secondOwn.getSpouse(), secondOwn.getSiblings(), secondOwn.getChildren())
#thoughts
self.firstPerson.think(self.secondPerson.name + " caught me up on their family life.")
self.secondPerson.think(self.firstPerson.name + " caught me up on their family life.")
def myjob(self):
#info: jobs, jobUnits, *salaries
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
#variables
firstJob = self.firstPerson.getJob()
secondJob = self.secondPerson.getJob()
dayNum = self.firstPerson.model.getDayNum()
try:
firstJobType = firstJob.getJobType()
firstJobUnit = firstJob.getUnit()
firstJobLoc = firstJobUnit.getName()
firstSalary = firstJob.getSalary()
except:
firstJobType = "Jobhunter"
firstJobUnit = None
firstJobLoc = "home"
firstSalary = 0
try:
secondJobType = secondJob.getJobType()
secondJobUnit = secondJob.getUnit()
secondJobLoc = secondJobUnit.getName()
secondSalary = secondJob.getSalary()
except:
secondJobType = "Jobhunter"
secondJobUnit = None
secondJobLoc = "home"
secondSalary = 0
#update profiles
if dayNum > firstProfile.getJob()[1]:
firstProfile.updateJob(firstJob, dayNum)
if dayNum > firstProfile.getSalary()[1]:
firstProfile.updateSalary(firstSalary, dayNum)
if dayNum > secondProfile.getJob()[1]:
secondProfile.updateJob(secondJob, dayNum)
if dayNum > secondProfile.getSalary()[1]:
secondProfile.updateSalary(firstSalary, dayNum)
if firstJobUnit is not None:
self.secondPerson.unitManager(firstJobUnit, self.firstPerson)
if secondJobUnit is not None:
self.firstPerson.unitManager(secondJobUnit, self.secondPerson)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about their job as a " + secondJobType + " at " + secondJobLoc + ".")
self.secondPerson.think(self.firstPerson.name + " told me about their job as a " + firstJobType + " at " + firstJobLoc + ".")
# def myskills(self):
# #info skills
# #profiles
# firstProfile = self.secondPerson.peopleManager(self.firstPerson)
# secondProfile = self.firstPerson.peopleManager(self.secondPerson)
# #variables
# firstSkills = self.firstPerson.getSkills()
# secondSkills = self.secondPerson.getSkills()
# dayNum = self.firstPerson.model.getDayNum()
# #update profiles
# if dayNum > firstProfile.getSkills()[1]:
# firstProfile.updateSkills(firstSkills, dayNum)
# if dayNum > secondProfile.getSkills()[1]:
# secondProfile.updateSkills(secondSkills, dayNum)
# #thoughts
# self.firstPerson.think(self.secondPerson.name + " and I talked shop for a while.")
# self.secondPerson.think(self.firstPerson.name + " and I talked shop for a while.")
#dialogues are chosen here, but the actual method call is in the handler (eg prices)
def talk(self, matrix, stateVector):
if self.isPlayer:
# stateVector = playerChoice
pass
else:
#get dialogue probabilities given last dialogue
probArray = np.dot(matrix, stateVector)
prob = probArray.tolist()
#choose dialogue
choice = random.random()
stateVector = [0 for i in range(len(prob))]
for i in range(len(prob)):
outcome = prob[i]
if outcome >= choice:
stateVector[i] = 1
return stateVector
else:
choice = choice - outcome
def topicHandler(self):
matrix = Conversation.topicMatrix
stateVector = [0,0,0,0,1]
# self.firstPerson.think("topicHandler")
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def storeHandler(self):
matrix = Conversation.storeMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("storeHandler")
self.different()
while self.state == 'store':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def manuHandler(self):
matrix = Conversation.manuMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("manuHandler")
self.different()
while self.state == 'manu':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def friendHandler(self):
matrix = Conversation.friendMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("friendHandler")
self.different()
while self.state == 'friend':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def myselfHandler(self):
matrix = Conversation.myselfMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("myselfHandler")
while self.state == 'myself':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def exitHandler(self):
self.isPlayer = False
Convo = Conversation()
|
[
"random.random",
"numpy.dot",
"transitions.Machine"
] |
[((2827, 2933), 'transitions.Machine', 'Machine', ([], {'model': 'self', 'states': 'Conversation.states', 'transitions': 'Conversation.transitions', 'initial': '"""exit"""'}), "(model=self, states=Conversation.states, transitions=Conversation.\n transitions, initial='exit')\n", (2834, 2933), False, 'from transitions import Machine\n'), ((15917, 15944), 'numpy.dot', 'np.dot', (['matrix', 'stateVector'], {}), '(matrix, stateVector)\n', (15923, 15944), True, 'import numpy as np\n'), ((16034, 16049), 'random.random', 'random.random', ([], {}), '()\n', (16047, 16049), False, 'import random\n')]
|
import tensorflow as tf
import os
import pickle
import numpy as np
from constant_params import input_feature_dim, window_size
def build_dataset(input_tfrecord_files, batch_size):
drop_remainder = False
feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'ref_aa': tf.io.FixedLenFeature([], tf.int64),
'alt_aa': tf.io.FixedLenFeature([], tf.int64),
'feature': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'var_id': tf.io.FixedLenFeature([], tf.string),
}
def _parser(example_proto):
parsed = tf.io.parse_single_example(example_proto, feature_description)
label, ref_aa, alt_aa = parsed['label'], parsed['ref_aa'], parsed[
'alt_aa']
var_id = parsed['var_id']
ref_aa, alt_aa, label = tf.cast(ref_aa, tf.int32), tf.cast(
alt_aa, tf.int32), tf.cast(label, tf.float32)
feature = tf.io.decode_raw(parsed['feature'], tf.float32)
feature = tf.reshape(feature, (window_size, input_feature_dim))
mask = tf.io.decode_raw(parsed['mask'], tf.float32)
mask = tf.reshape(mask, (window_size, ))
h = window_size // 2
#mask the postion of interest
mask = tf.concat(
[mask[:h],
tf.cast([
1,
], dtype=tf.float32), mask[h + 1:]],
axis=-1)
'''
pos_encoding = 1.0 + tf.cast(
tf.math.abs(window_size // 2 - tf.range(window_size)),
dtype=tf.float32)
#pos_encoding = tf.math.log() / tf.math.log(2.0)
feature = tf.concat([feature, pos_encoding[:, tf.newaxis]], axis=-1)
'''
return var_id, ref_aa, alt_aa, feature, label, mask
dataset = tf.data.TFRecordDataset(input_tfrecord_files)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
dataset = dataset.shuffle(2048)
dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
#dataset = dataset.prefetch(4)
return dataset
def build_all_possible_missenses_dataset(tr_list, feature_dir, batch_size):
amino_acid_order = 'ACDEFGHIKLMNPQRSTVWY*'
def _gen_data():
for transcript_id in tr_list:
feature_path = f'{feature_dir}/{transcript_id}.pickle'
if not os.path.exists(feature_path):
continue
print(feature_path, flush=True)
with open(feature_path, 'rb') as fr:
feature = pickle.load(fr)
L = feature.shape[0]
w = window_size // 2
for aa_pos in range(L):
ref_aa = int(feature[aa_pos, 0])
start = max(aa_pos - w, 0)
end = min(L, aa_pos + 1 + w)
var_start = start - (aa_pos - w)
var_end = var_start + (end - start)
var_feature = np.zeros([w * 2 + 1, feature.shape[1]])
var_feature[var_start:var_end] = feature[start:end]
mask = np.ones((w * 2 + 1, ), dtype=np.float32)
mask[var_start:var_end] = 0.0
mask[w] = 1.0
for alt_aa in range(20):
var_id = f'{transcript_id}_{str(aa_pos+1)}_{amino_acid_order[ref_aa]}_{amino_acid_order[alt_aa]}'.encode(
'utf-8')
yield var_id, np.int32(ref_aa), np.int32(
alt_aa), np.float32(var_feature), np.float32(mask)
dataset = tf.data.Dataset.from_generator(
_gen_data, (tf.string, tf.int32, tf.int32, tf.float32, tf.float32),
(tf.TensorShape(()), tf.TensorShape(()), tf.TensorShape(
()), tf.TensorShape((window_size, input_feature_dim)),
tf.TensorShape((window_size, ))))
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
#dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(4)
return dataset
def build_test_dataset(input_tfrecord_files, batch_size):
drop_remainder = False
feature_description = {
'ref_aa': tf.io.FixedLenFeature([], tf.int64),
'alt_aa': tf.io.FixedLenFeature([], tf.int64),
'feature': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'var_id': tf.io.FixedLenFeature([], tf.string),
}
def _parser(example_proto):
parsed = tf.io.parse_single_example(example_proto, feature_description)
ref_aa, alt_aa = parsed['ref_aa'], parsed['alt_aa']
var_id = parsed['var_id']
ref_aa, alt_aa = tf.cast(ref_aa, tf.int32), tf.cast(alt_aa, tf.int32)
feature = tf.io.decode_raw(parsed['feature'], tf.float32)
feature = tf.reshape(feature, (window_size, input_feature_dim))
mask = tf.io.decode_raw(parsed['mask'], tf.float32)
mask = tf.reshape(mask, (window_size, ))
h = window_size // 2
#mask the postion of interest
mask = tf.concat(
[mask[:h],
tf.cast([
1,
], dtype=tf.float32), mask[h + 1:]],
axis=-1)
return var_id, ref_aa, alt_aa, feature, mask
dataset = tf.data.TFRecordDataset(input_tfrecord_files)
options = tf.data.Options()
options.experimental_threading.max_intra_op_parallelism = 1
dataset = dataset.with_options(options)
dataset = dataset.map(_parser, num_parallel_calls=8)
dataset = dataset.batch(batch_size)
#dataset = dataset.prefetch(4)
return dataset
|
[
"tensorflow.data.TFRecordDataset",
"os.path.exists",
"numpy.ones",
"tensorflow.io.parse_single_example",
"tensorflow.data.Options",
"pickle.load",
"numpy.int32",
"numpy.zeros",
"tensorflow.io.FixedLenFeature",
"tensorflow.io.decode_raw",
"tensorflow.reshape",
"tensorflow.cast",
"numpy.float32",
"tensorflow.TensorShape"
] |
[((1795, 1840), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_tfrecord_files'], {}), '(input_tfrecord_files)\n', (1818, 1840), True, 'import tensorflow as tf\n'), ((1856, 1873), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (1871, 1873), True, 'import tensorflow as tf\n'), ((3909, 3926), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (3924, 3926), True, 'import tensorflow as tf\n'), ((5424, 5469), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['input_tfrecord_files'], {}), '(input_tfrecord_files)\n', (5447, 5469), True, 'import tensorflow as tf\n'), ((5485, 5502), 'tensorflow.data.Options', 'tf.data.Options', ([], {}), '()\n', (5500, 5502), True, 'import tensorflow as tf\n'), ((255, 290), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (276, 290), True, 'import tensorflow as tf\n'), ((310, 345), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (331, 345), True, 'import tensorflow as tf\n'), ((365, 400), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (386, 400), True, 'import tensorflow as tf\n'), ((421, 457), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (442, 457), True, 'import tensorflow as tf\n'), ((475, 511), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (496, 511), True, 'import tensorflow as tf\n'), ((531, 567), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (552, 567), True, 'import tensorflow as tf\n'), ((625, 687), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (651, 687), True, 'import tensorflow as tf\n'), ((965, 1012), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['feature']", 'tf.float32'], {}), "(parsed['feature'], tf.float32)\n", (981, 1012), True, 'import tensorflow as tf\n'), ((1031, 1084), 'tensorflow.reshape', 'tf.reshape', (['feature', '(window_size, input_feature_dim)'], {}), '(feature, (window_size, input_feature_dim))\n', (1041, 1084), True, 'import tensorflow as tf\n'), ((1101, 1145), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['mask']", 'tf.float32'], {}), "(parsed['mask'], tf.float32)\n", (1117, 1145), True, 'import tensorflow as tf\n'), ((1161, 1193), 'tensorflow.reshape', 'tf.reshape', (['mask', '(window_size,)'], {}), '(mask, (window_size,))\n', (1171, 1193), True, 'import tensorflow as tf\n'), ((4324, 4359), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4345, 4359), True, 'import tensorflow as tf\n'), ((4379, 4414), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (4400, 4414), True, 'import tensorflow as tf\n'), ((4435, 4471), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4456, 4471), True, 'import tensorflow as tf\n'), ((4489, 4525), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4510, 4525), True, 'import tensorflow as tf\n'), ((4545, 4581), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (4566, 4581), True, 'import tensorflow as tf\n'), ((4639, 4701), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'feature_description'], {}), '(example_proto, feature_description)\n', (4665, 4701), True, 'import tensorflow as tf\n'), ((4894, 4941), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['feature']", 'tf.float32'], {}), "(parsed['feature'], tf.float32)\n", (4910, 4941), True, 'import tensorflow as tf\n'), ((4960, 5013), 'tensorflow.reshape', 'tf.reshape', (['feature', '(window_size, input_feature_dim)'], {}), '(feature, (window_size, input_feature_dim))\n', (4970, 5013), True, 'import tensorflow as tf\n'), ((5030, 5074), 'tensorflow.io.decode_raw', 'tf.io.decode_raw', (["parsed['mask']", 'tf.float32'], {}), "(parsed['mask'], tf.float32)\n", (5046, 5074), True, 'import tensorflow as tf\n'), ((5090, 5122), 'tensorflow.reshape', 'tf.reshape', (['mask', '(window_size,)'], {}), '(mask, (window_size,))\n', (5100, 5122), True, 'import tensorflow as tf\n'), ((852, 877), 'tensorflow.cast', 'tf.cast', (['ref_aa', 'tf.int32'], {}), '(ref_aa, tf.int32)\n', (859, 877), True, 'import tensorflow as tf\n'), ((879, 904), 'tensorflow.cast', 'tf.cast', (['alt_aa', 'tf.int32'], {}), '(alt_aa, tf.int32)\n', (886, 904), True, 'import tensorflow as tf\n'), ((919, 945), 'tensorflow.cast', 'tf.cast', (['label', 'tf.float32'], {}), '(label, tf.float32)\n', (926, 945), True, 'import tensorflow as tf\n'), ((3728, 3746), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3742, 3746), True, 'import tensorflow as tf\n'), ((3748, 3766), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3762, 3766), True, 'import tensorflow as tf\n'), ((3768, 3786), 'tensorflow.TensorShape', 'tf.TensorShape', (['()'], {}), '(())\n', (3782, 3786), True, 'import tensorflow as tf\n'), ((3801, 3849), 'tensorflow.TensorShape', 'tf.TensorShape', (['(window_size, input_feature_dim)'], {}), '((window_size, input_feature_dim))\n', (3815, 3849), True, 'import tensorflow as tf\n'), ((3860, 3890), 'tensorflow.TensorShape', 'tf.TensorShape', (['(window_size,)'], {}), '((window_size,))\n', (3874, 3890), True, 'import tensorflow as tf\n'), ((4822, 4847), 'tensorflow.cast', 'tf.cast', (['ref_aa', 'tf.int32'], {}), '(ref_aa, tf.int32)\n', (4829, 4847), True, 'import tensorflow as tf\n'), ((4849, 4874), 'tensorflow.cast', 'tf.cast', (['alt_aa', 'tf.int32'], {}), '(alt_aa, tf.int32)\n', (4856, 4874), True, 'import tensorflow as tf\n'), ((1325, 1355), 'tensorflow.cast', 'tf.cast', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (1332, 1355), True, 'import tensorflow as tf\n'), ((2444, 2472), 'os.path.exists', 'os.path.exists', (['feature_path'], {}), '(feature_path)\n', (2458, 2472), False, 'import os\n'), ((2619, 2634), 'pickle.load', 'pickle.load', (['fr'], {}), '(fr)\n', (2630, 2634), False, 'import pickle\n'), ((3009, 3048), 'numpy.zeros', 'np.zeros', (['[w * 2 + 1, feature.shape[1]]'], {}), '([w * 2 + 1, feature.shape[1]])\n', (3017, 3048), True, 'import numpy as np\n'), ((3141, 3180), 'numpy.ones', 'np.ones', (['(w * 2 + 1,)'], {'dtype': 'np.float32'}), '((w * 2 + 1,), dtype=np.float32)\n', (3148, 3180), True, 'import numpy as np\n'), ((5254, 5284), 'tensorflow.cast', 'tf.cast', (['[1]'], {'dtype': 'tf.float32'}), '([1], dtype=tf.float32)\n', (5261, 5284), True, 'import tensorflow as tf\n'), ((3493, 3509), 'numpy.int32', 'np.int32', (['ref_aa'], {}), '(ref_aa)\n', (3501, 3509), True, 'import numpy as np\n'), ((3511, 3527), 'numpy.int32', 'np.int32', (['alt_aa'], {}), '(alt_aa)\n', (3519, 3527), True, 'import numpy as np\n'), ((3554, 3577), 'numpy.float32', 'np.float32', (['var_feature'], {}), '(var_feature)\n', (3564, 3577), True, 'import numpy as np\n'), ((3579, 3595), 'numpy.float32', 'np.float32', (['mask'], {}), '(mask)\n', (3589, 3595), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import pickle
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.svm import SVR
from sklearn.svm import LinearSVR
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
def prune(x):
if x < 1:
return 1
elif x > 3:
return 3
else:
return x
def regression(reg_type, standardize_df, debug=False):
# load model
filename = '../../dataset/model_' + reg_type + '.pickle'
lin_model = None
with open(filename, 'rb') as f:
lin_model = pickle.load(f)
score_df_tst = pd.read_pickle('../../dataset/score_df_final_tst.pickle')
# Fill NaN value
# score_df = score_df.fillna(0.0)
# The last column is the target
X = np.array(score_df_tst)
if standardize_df:
print("Standardizing...")
with open("../../dataset/scaler.pickle", 'rb') as handle:
scaler = pickle.load(handle)
X = scaler.transform(X)
# Debug
if debug:
print("Score DataFrame")
print(score_df)
print("")
print("Training Values")
print(X)
print("")
print("Output Values")
print(Y)
print("")
print("Shapes of X and Y")
print(X.shape)
print(Y.shape)
# Debug
if debug:
print("XTR - XTS")
print(xtr.shape)
print(xts.shape)
print("")
print("YTR - YTS")
print(ytr.shape)
print(yts.shape)
print("")
yts_pred = lin_model.predict(X)
#yts_error = sqrt(mean_squared_error(yts_pred, yts))
print("Prediction by (" + reg_type + ") on Test data have finished")
# create submission file
id_series = pd.read_csv('../../dataset/test.csv')['id']
submission_df = pd.DataFrame(id_series, columns=['id'])
submission_df['relevance'] = yts_pred
submission_df['relevance'] = submission_df['relevance'].map(lambda x: prune(x))
submission_df.to_csv('../../dataset/submission.csv', columns=['id', 'relevance'], index=False)
if __name__ == "__main__":
# Change between:
# svr
# linear
# rfr
regression_type = 'svr'
standardize_df = True
regression(regression_type, standardize_df, debug=False)
|
[
"pandas.read_pickle",
"pandas.read_csv",
"pickle.load",
"numpy.array",
"pandas.DataFrame"
] |
[((697, 754), 'pandas.read_pickle', 'pd.read_pickle', (['"""../../dataset/score_df_final_tst.pickle"""'], {}), "('../../dataset/score_df_final_tst.pickle')\n", (711, 754), True, 'import pandas as pd\n'), ((848, 870), 'numpy.array', 'np.array', (['score_df_tst'], {}), '(score_df_tst)\n', (856, 870), True, 'import numpy as np\n'), ((1706, 1745), 'pandas.DataFrame', 'pd.DataFrame', (['id_series'], {'columns': "['id']"}), "(id_series, columns=['id'])\n", (1718, 1745), True, 'import pandas as pd\n'), ((665, 679), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (676, 679), False, 'import pickle\n'), ((1645, 1682), 'pandas.read_csv', 'pd.read_csv', (['"""../../dataset/test.csv"""'], {}), "('../../dataset/test.csv')\n", (1656, 1682), True, 'import pandas as pd\n'), ((992, 1011), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1003, 1011), False, 'import pickle\n')]
|
import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 14}
class MappingEnv(gym.Env):
def __init__(self):
# config_file = path.join(path.dirname(__file__), "params_flock.cfg")
# config = configparser.ConfigParser()
# config.read(config_file)
# config = config['flock']
self.nearest_agents = 7
self.nearest_targets = 7
self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not
self.centralized = True
# number states per agent
self.nx_system = 4
# number of actions per agent
self.nu = 2
# default problem parameters
self.n_agents = 100 # int(config['network_size'])
# self.comm_radius = 0.9 # float(config['comm_radius'])
self.dt = 0.1 # #float(config['system_dt'])
self.v_max = 5.0 # float(config['max_vel_init'])
self.v_bias = self.v_max
# intitialize state matrices
self.x = None
self.u = None
self.mean_vel = None
self.init_vel = None
self.greedy_action = None
self.diff = None
self.r2 = None
self.adj_mat = None
self.adj_mat_mean = None
self.diff_targets = None
self.r2_targets = None
self.target_observed = None
self.state_network = None
self.state_values = None
self.reward = None
self.max_accel = 1
# self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
# dtype=np.float32)
#
# self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, ),
# dtype=np.float32)
# target initialization
self.px_max = 100
self.py_max = 100
x = np.linspace(-1.0 * self.px_max, self.px_max, self.n_agents)
y = np.linspace(-1.0 * self.py_max, self.py_max, self.n_agents)
tx, ty = np.meshgrid(x, y)
tx = tx.reshape((-1, 1))
ty = ty.reshape((-1, 1))
self.obs_rad = 2.0
self.obs_rad2 = self.obs_rad * self.obs_rad
self.target_x = np.stack((tx, ty), axis=1).reshape((-1, 2))
self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)
# rendering initialization
self.fig = None
self.ax = None
self.line1 = None
self.line2 = None
self.action_scalar = 10.0
self.seed()
def reset(self):
x = np.zeros((self.n_agents, self.nx_system))
self.target_unobserved = np.ones((self.n_agents * self.n_agents, 2), dtype=np.bool)
x[:, 0] = np.random.uniform(low=-self.px_max, high=self.px_max, size=(self.n_agents,))
x[:, 1] = np.random.uniform(low=-self.py_max, high=self.py_max, size=(self.n_agents,))
#bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) #+ bias[1]
# keep good initialization
self.mean_vel = np.mean(x[:, 2:4], axis=0)
self.init_vel = x[:, 2:4]
self.x = x
# self.a_net = self.get_connectivity(self.x)
self.compute_helpers()
return self.state_values, self.state_network
def params_from_cfg(self, args):
# TODO
pass
# # self.comm_radius = args.getfloat('comm_radius')
# # self.comm_radius2 = self.comm_radius * self.comm_radius
# # self.vr = 1 / self.comm_radius2 + np.log(self.comm_radius2)
# #
# # self.n_agents = args.getint('n_agents')
# # self.r_max = self.r_max * np.sqrt(self.n_agents)
#
# # self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
# # dtype=np.float32)
# #
# # self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),
# # dtype=np.float32)
#
# self.v_max = args.getfloat('v_max')
# self.v_bias = self.v_max
# self.dt = args.getfloat('dt')
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
# u = np.reshape(u, (-1, 2))
assert u.shape == (self.n_agents, self.nu)
u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)
self.u = u * self.action_scalar
old_x = np.copy(self.x)
# x position
self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt + self.u[:, 0] * self.dt * self.dt * 0.5
# y position
self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt + self.u[:, 1] * self.dt * self.dt * 0.5
# x velocity
self.x[:, 2] = self.x[:, 2] + self.u[:, 0] * self.dt
# y velocity
self.x[:, 3] = self.x[:, 3] + self.u[:, 1] * self.dt
# clip velocities
self.x[:, 2:4] = np.clip(self.x[:, 2:4], -1.0*self.v_max, self.v_max)
dist_traveled = np.sum(np.linalg.norm(self.x[:, 0:2] - old_x[:, 0:2], axis=1))
self.compute_helpers()
done = (0 == np.sum(self.target_unobserved))
return (self.state_values, self.state_network), 10.0 * self.reward - dist_traveled, done, {}
def compute_helpers(self):
# TODO - check this, and initialize stuff in the init(), and try to make more efficient
# Neighbors computations
self.diff = self.x.reshape((self.n_agents, 1, self.nx_system)) - self.x.reshape(
(1, self.n_agents, self.nx_system))
self.r2 = np.multiply(self.diff[:, :, 0], self.diff[:, :, 0]) + np.multiply(self.diff[:, :, 1],
self.diff[:, :, 1])
np.fill_diagonal(self.r2, np.Inf)
nearest = np.argsort(self.r2, axis=1)
obs_neigh = np.zeros((self.n_agents, self.nearest_agents * 4))
self.adj_mat = np.zeros((self.n_agents, self.n_agents))
for i in range(self.nearest_agents):
ind2, ind3 = np.meshgrid(nearest[:, i], range(4), indexing='ij')
ind1, _ = np.meshgrid(range(self.n_agents), range(4), indexing='ij')
obs_neigh[:, i * self.nx_system:(i + 1) * self.nx_system] = np.reshape(
self.diff[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 4))
self.adj_mat[:, nearest[:, i]] = 1.0
# Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling
n_neighbors = np.reshape(np.sum(self.adj_mat, axis=1), (self.n_agents, 1)) # correct - checked this
n_neighbors[n_neighbors == 0] = 1
self.adj_mat_mean = self.adj_mat / n_neighbors
# Targets computations
self.diff_targets = self.x[:, 0:2].reshape((self.n_agents, 1, 2)) - self.target_x[
self.target_unobserved].reshape(
(1, -1, 2))
self.r2_targets = np.multiply(self.diff_targets[:, :, 0], self.diff_targets[:, :, 0]) + np.multiply(
self.diff_targets[:, :, 1],
self.diff_targets[:, :, 1])
nearest_targets = np.argsort(self.r2_targets, axis=1)
obs_target = np.zeros((self.n_agents, self.nearest_targets * 2))
for i in range(min(self.nearest_targets, np.shape(nearest_targets)[1])):
ind2, ind3 = np.meshgrid(nearest_targets[:, i], range(2), indexing='ij')
ind1, _ = np.meshgrid(range(self.n_agents), range(2), indexing='ij')
obs_target[:, i * 2:(i + 1) * 2] = np.reshape(
self.diff_targets[ind1.flatten(), ind2.flatten(), ind3.flatten()], (-1, 2))
self.target_observed = np.any(self.r2_targets < self.obs_rad2, axis=0).reshape((-1, 1))
self.target_unobserved[self.target_unobserved] = np.tile(np.logical_not(self.target_observed), (1, 2)).flatten()
self.reward = np.sum(self.target_observed.astype(np.int))
self.state_values = np.hstack((obs_neigh, obs_target))
self.greedy_action = -1.0 * obs_target[:, 0:2]
if self.mean_pooling:
self.state_network = self.adj_mat_mean
else:
self.state_network = self.adj_mat
def controller(self):
"""
The controller for flocking from Turner 2003.
Returns: the optimal action
"""
# TODO
# return np.zeros((self.n_agents, 2))
return self.greedy_action / 10.0
def render(self, mode='human'):
"""
Render the environment with agents as points in 2D space
"""
if self.fig is None:
plt.ion()
fig = plt.figure()
self.ax = fig.add_subplot(111)
line1, = self.ax.plot(self.x[:, 0], self.x[:, 1], 'bo')
locs = self.target_x[self.target_unobserved].reshape((-1, 2))
line2, = self.ax.plot(locs[:, 0], locs[:, 1], 'rx')
plt.ylim(-1.0 * self.py_max, 1.0 * self.py_max)
plt.xlim(-1.0 * self.px_max, 1.0 * self.px_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line2 = line2
# TODO render unobserved targets
else:
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
locs = self.target_x[self.target_unobserved].reshape((-1,2))
self.line2.set_xdata(locs[:, 0])
self.line2.set_ydata(locs[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
pass
|
[
"numpy.clip",
"numpy.hstack",
"numpy.logical_not",
"numpy.argsort",
"numpy.linalg.norm",
"gym.utils.seeding.np_random",
"numpy.mean",
"numpy.multiply",
"numpy.stack",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.ylim",
"numpy.ones",
"matplotlib.pyplot.gca",
"numpy.fill_diagonal",
"numpy.any",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.shape",
"numpy.copy",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.random.uniform"
] |
[((2106, 2165), 'numpy.linspace', 'np.linspace', (['(-1.0 * self.px_max)', 'self.px_max', 'self.n_agents'], {}), '(-1.0 * self.px_max, self.px_max, self.n_agents)\n', (2117, 2165), True, 'import numpy as np\n'), ((2178, 2237), 'numpy.linspace', 'np.linspace', (['(-1.0 * self.py_max)', 'self.py_max', 'self.n_agents'], {}), '(-1.0 * self.py_max, self.py_max, self.n_agents)\n', (2189, 2237), True, 'import numpy as np\n'), ((2256, 2273), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2267, 2273), True, 'import numpy as np\n'), ((2522, 2580), 'numpy.ones', 'np.ones', (['(self.n_agents * self.n_agents, 2)'], {'dtype': 'np.bool'}), '((self.n_agents * self.n_agents, 2), dtype=np.bool)\n', (2529, 2580), True, 'import numpy as np\n'), ((2805, 2846), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nx_system)'], {}), '((self.n_agents, self.nx_system))\n', (2813, 2846), True, 'import numpy as np\n'), ((2880, 2938), 'numpy.ones', 'np.ones', (['(self.n_agents * self.n_agents, 2)'], {'dtype': 'np.bool'}), '((self.n_agents * self.n_agents, 2), dtype=np.bool)\n', (2887, 2938), True, 'import numpy as np\n'), ((2958, 3034), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.px_max)', 'high': 'self.px_max', 'size': '(self.n_agents,)'}), '(low=-self.px_max, high=self.px_max, size=(self.n_agents,))\n', (2975, 3034), True, 'import numpy as np\n'), ((3053, 3129), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.py_max)', 'high': 'self.py_max', 'size': '(self.n_agents,)'}), '(low=-self.py_max, high=self.py_max, size=(self.n_agents,))\n', (3070, 3129), True, 'import numpy as np\n'), ((3230, 3304), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_agents,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_agents,))\n', (3247, 3304), True, 'import numpy as np\n'), ((3334, 3408), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-self.v_max)', 'high': 'self.v_max', 'size': '(self.n_agents,)'}), '(low=-self.v_max, high=self.v_max, size=(self.n_agents,))\n', (3351, 3408), True, 'import numpy as np\n'), ((3480, 3506), 'numpy.mean', 'np.mean', (['x[:, 2:4]'], {'axis': '(0)'}), '(x[:, 2:4], axis=0)\n', (3487, 3506), True, 'import numpy as np\n'), ((4658, 4681), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (4675, 4681), False, 'from gym.utils import seeding\n'), ((4829, 4884), 'numpy.clip', 'np.clip', (['u'], {'a_min': '(-self.max_accel)', 'a_max': 'self.max_accel'}), '(u, a_min=-self.max_accel, a_max=self.max_accel)\n', (4836, 4884), True, 'import numpy as np\n'), ((4942, 4957), 'numpy.copy', 'np.copy', (['self.x'], {}), '(self.x)\n', (4949, 4957), True, 'import numpy as np\n'), ((5421, 5475), 'numpy.clip', 'np.clip', (['self.x[:, 2:4]', '(-1.0 * self.v_max)', 'self.v_max'], {}), '(self.x[:, 2:4], -1.0 * self.v_max, self.v_max)\n', (5428, 5475), True, 'import numpy as np\n'), ((6265, 6298), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.r2', 'np.Inf'], {}), '(self.r2, np.Inf)\n', (6281, 6298), True, 'import numpy as np\n'), ((6318, 6345), 'numpy.argsort', 'np.argsort', (['self.r2'], {'axis': '(1)'}), '(self.r2, axis=1)\n', (6328, 6345), True, 'import numpy as np\n'), ((6366, 6416), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nearest_agents * 4)'], {}), '((self.n_agents, self.nearest_agents * 4))\n', (6374, 6416), True, 'import numpy as np\n'), ((6440, 6480), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.n_agents)'], {}), '((self.n_agents, self.n_agents))\n', (6448, 6480), True, 'import numpy as np\n'), ((7634, 7669), 'numpy.argsort', 'np.argsort', (['self.r2_targets'], {'axis': '(1)'}), '(self.r2_targets, axis=1)\n', (7644, 7669), True, 'import numpy as np\n'), ((7691, 7742), 'numpy.zeros', 'np.zeros', (['(self.n_agents, self.nearest_targets * 2)'], {}), '((self.n_agents, self.nearest_targets * 2))\n', (7699, 7742), True, 'import numpy as np\n'), ((8456, 8490), 'numpy.hstack', 'np.hstack', (['(obs_neigh, obs_target)'], {}), '((obs_neigh, obs_target))\n', (8465, 8490), True, 'import numpy as np\n'), ((5506, 5560), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.x[:, 0:2] - old_x[:, 0:2])'], {'axis': '(1)'}), '(self.x[:, 0:2] - old_x[:, 0:2], axis=1)\n', (5520, 5560), True, 'import numpy as np\n'), ((5615, 5645), 'numpy.sum', 'np.sum', (['self.target_unobserved'], {}), '(self.target_unobserved)\n', (5621, 5645), True, 'import numpy as np\n'), ((6067, 6118), 'numpy.multiply', 'np.multiply', (['self.diff[:, :, 0]', 'self.diff[:, :, 0]'], {}), '(self.diff[:, :, 0], self.diff[:, :, 0])\n', (6078, 6118), True, 'import numpy as np\n'), ((6121, 6172), 'numpy.multiply', 'np.multiply', (['self.diff[:, :, 1]', 'self.diff[:, :, 1]'], {}), '(self.diff[:, :, 1], self.diff[:, :, 1])\n', (6132, 6172), True, 'import numpy as np\n'), ((7053, 7081), 'numpy.sum', 'np.sum', (['self.adj_mat'], {'axis': '(1)'}), '(self.adj_mat, axis=1)\n', (7059, 7081), True, 'import numpy as np\n'), ((7444, 7511), 'numpy.multiply', 'np.multiply', (['self.diff_targets[:, :, 0]', 'self.diff_targets[:, :, 0]'], {}), '(self.diff_targets[:, :, 0], self.diff_targets[:, :, 0])\n', (7455, 7511), True, 'import numpy as np\n'), ((7514, 7581), 'numpy.multiply', 'np.multiply', (['self.diff_targets[:, :, 1]', 'self.diff_targets[:, :, 1]'], {}), '(self.diff_targets[:, :, 1], self.diff_targets[:, :, 1])\n', (7525, 7581), True, 'import numpy as np\n'), ((9100, 9109), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (9107, 9109), True, 'import matplotlib.pyplot as plt\n'), ((9128, 9140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9138, 9140), True, 'import matplotlib.pyplot as plt\n'), ((9402, 9449), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.0 * self.py_max)', '(1.0 * self.py_max)'], {}), '(-1.0 * self.py_max, 1.0 * self.py_max)\n', (9410, 9449), True, 'import matplotlib.pyplot as plt\n'), ((9462, 9509), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.0 * self.px_max)', '(1.0 * self.px_max)'], {}), '(-1.0 * self.px_max, 1.0 * self.px_max)\n', (9470, 9509), True, 'import matplotlib.pyplot as plt\n'), ((9526, 9531), 'matplotlib.pyplot.gca', 'gca', ([], {}), '()\n', (9529, 9531), False, 'from matplotlib.pyplot import gca\n'), ((9648, 9675), 'matplotlib.pyplot.title', 'plt.title', (['"""GNN Controller"""'], {}), "('GNN Controller')\n", (9657, 9675), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2470), 'numpy.stack', 'np.stack', (['(tx, ty)'], {'axis': '(1)'}), '((tx, ty), axis=1)\n', (2452, 2470), True, 'import numpy as np\n'), ((8175, 8222), 'numpy.any', 'np.any', (['(self.r2_targets < self.obs_rad2)'], {'axis': '(0)'}), '(self.r2_targets < self.obs_rad2, axis=0)\n', (8181, 8222), True, 'import numpy as np\n'), ((7793, 7818), 'numpy.shape', 'np.shape', (['nearest_targets'], {}), '(nearest_targets)\n', (7801, 7818), True, 'import numpy as np\n'), ((8305, 8341), 'numpy.logical_not', 'np.logical_not', (['self.target_observed'], {}), '(self.target_observed)\n', (8319, 8341), True, 'import numpy as np\n')]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.Lu."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class LuOpTest(test.TestCase):
@property
def float_types(self):
return set((np.float64, np.float32, np.complex64, np.complex128))
def _verifyLuBase(self, x, lower, upper, perm, verification,
output_idx_type):
lower_np, upper_np, perm_np, verification_np = self.evaluate(
[lower, upper, perm, verification])
self.assertAllClose(x, verification_np)
self.assertShapeEqual(x, lower)
self.assertShapeEqual(x, upper)
self.assertAllEqual(x.shape[:-1], perm.shape.as_list())
# Check dtypes are as expected.
self.assertEqual(x.dtype, lower_np.dtype)
self.assertEqual(x.dtype, upper_np.dtype)
self.assertEqual(output_idx_type.as_numpy_dtype, perm_np.dtype)
# Check that the permutation is valid.
if perm_np.shape[-1] > 0:
perm_reshaped = np.reshape(perm_np, (-1, perm_np.shape[-1]))
for perm_vector in perm_reshaped:
self.assertAllClose(np.arange(len(perm_vector)), np.sort(perm_vector))
def _verifyLu(self, x, output_idx_type=dtypes.int64):
# Verify that Px = LU.
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
# Prepare the lower factor of shape num_rows x num_rows
lu_shape = np.array(lu.shape.as_list())
batch_shape = lu_shape[:-2]
num_rows = lu_shape[-2]
num_cols = lu_shape[-1]
lower = array_ops.matrix_band_part(lu, -1, 0)
if num_rows > num_cols:
eye = linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=lower.dtype)
lower = array_ops.concat([lower, eye[..., num_cols:]], axis=-1)
elif num_rows < num_cols:
lower = lower[..., :num_rows]
# Fill the diagonal with ones.
ones_diag = array_ops.ones(
np.append(batch_shape, num_rows), dtype=lower.dtype)
lower = array_ops.matrix_set_diag(lower, ones_diag)
# Prepare the upper factor.
upper = array_ops.matrix_band_part(lu, 0, -1)
verification = math_ops.matmul(lower, upper)
# Permute the rows of product of the Cholesky factors.
if num_rows > 0:
# Reshape the product of the triangular factors and permutation indices
# to a single batch dimension. This makes it easy to apply
# invert_permutation and gather_nd ops.
perm_reshaped = array_ops.reshape(perm, [-1, num_rows])
verification_reshaped = array_ops.reshape(verification,
[-1, num_rows, num_cols])
# Invert the permutation in each batch.
inv_perm_reshaped = map_fn.map_fn(array_ops.invert_permutation,
perm_reshaped)
batch_size = perm_reshaped.shape.as_list()[0]
# Prepare the batch indices with the same shape as the permutation.
# The corresponding batch index is paired with each of the `num_rows`
# permutation indices.
batch_indices = math_ops.cast(
array_ops.broadcast_to(
math_ops.range(batch_size)[:, None], perm_reshaped.shape),
dtype=output_idx_type)
permuted_verification_reshaped = array_ops.gather_nd(
verification_reshaped,
array_ops.stack([batch_indices, inv_perm_reshaped], axis=-1))
# Reshape the verification matrix back to the original shape.
verification = array_ops.reshape(permuted_verification_reshaped,
lu_shape)
self._verifyLuBase(x, lower, upper, perm, verification,
output_idx_type)
def testBasic(self):
data = np.array([[4., -1., 2.], [-1., 6., 0], [10., 0., 5.]])
for dtype in (np.float32, np.float64):
for output_idx_type in (dtypes.int32, dtypes.int64):
self._verifyLu(data.astype(dtype), output_idx_type=output_idx_type)
for dtype in (np.complex64, np.complex128):
for output_idx_type in (dtypes.int32, dtypes.int64):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data, output_idx_type=output_idx_type)
def testPivoting(self):
# This matrix triggers partial pivoting because the first diagonal entry
# is small.
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
self._verifyLu(data.astype(np.float32))
for dtype in (np.float32, np.float64):
self._verifyLu(data.astype(dtype))
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
for dtype in (np.complex64, np.complex128):
complex_data = np.tril(1j * data, -1).astype(dtype)
complex_data += np.triu(-1j * data, 1).astype(dtype)
complex_data += data
self._verifyLu(complex_data)
_, p = linalg_ops.lu(data)
p_val = self.evaluate([p])
# Make sure p_val is not the identity permutation.
self.assertNotAllClose(np.arange(3), p_val)
def testInvalidMatrix(self):
# LU factorization gives an error when the input is singular.
# Note: A singular matrix may return without error but it won't be a valid
# factorization.
for dtype in self.float_types:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[1., 2., 3.], [2., 4., 6.], [2., 3., 4.]],
dtype=dtype)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(
linalg_ops.lu(
np.array([[[1., 2., 3.], [2., 4., 6.], [1., 2., 3.]],
[[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]]],
dtype=dtype)))
def testBatch(self):
simple_array = np.array([[[1., -1.], [2., 5.]]]) # shape (1, 2, 2)
self._verifyLu(simple_array)
self._verifyLu(np.vstack((simple_array, simple_array)))
odd_sized_array = np.array([[[4., -1., 2.], [-1., 6., 0], [2., 0., 5.]]])
self._verifyLu(np.vstack((odd_sized_array, odd_sized_array)))
batch_size = 200
# Generate random matrices.
np.random.seed(42)
matrices = np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
# Generate random complex valued matrices.
np.random.seed(52)
matrices = np.random.rand(batch_size, 5,
5) + 1j * np.random.rand(batch_size, 5, 5)
self._verifyLu(matrices)
def testLargeMatrix(self):
# Generate random matrices.
n = 500
np.random.seed(64)
data = np.random.rand(n, n)
self._verifyLu(data)
# Generate random complex valued matrices.
np.random.seed(129)
data = np.random.rand(n, n) + 1j * np.random.rand(n, n)
self._verifyLu(data)
@test_util.run_v1_only("b/120545219")
def testEmpty(self):
self._verifyLu(np.empty([0, 2, 2]))
self._verifyLu(np.empty([2, 0, 0]))
@test_util.run_deprecated_v1
def testConcurrentExecutesWithoutError(self):
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
lu1, p1 = linalg_ops.lu(matrix1)
lu2, p2 = linalg_ops.lu(matrix2)
lu1_val, p1_val, lu2_val, p2_val = self.evaluate([lu1, p1, lu2, p2])
self.assertAllEqual(lu1_val, lu2_val)
self.assertAllEqual(p1_val, p2_val)
class LuBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(4096, 4096),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkLuOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
lu, p = linalg_ops.lu(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(lu, p),
min_iters=25,
name="lu_gpu_{shape}".format(shape=shape))
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.ops.map_fn.map_fn",
"numpy.random.rand",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.array",
"tensorflow.python.ops.array_ops.matrix_set_diag",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.array_ops.matrix_band_part",
"numpy.arange",
"numpy.reshape",
"numpy.sort",
"tensorflow.python.platform.benchmark.benchmark_config",
"numpy.empty",
"numpy.random.seed",
"numpy.vstack",
"tensorflow.python.framework.ops.device",
"numpy.triu",
"tensorflow.python.ops.linalg_ops.eye",
"tensorflow.python.ops.random_ops.random_normal",
"numpy.tile",
"numpy.ones",
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.linalg_ops.lu",
"numpy.append",
"numpy.tril",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.concat"
] |
[((8201, 8237), 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""b/120545219"""'], {}), "('b/120545219')\n", (8222, 8237), False, 'from tensorflow.python.framework import test_util\n'), ((10466, 10477), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (10475, 10477), False, 'from tensorflow.python.platform import test\n'), ((2584, 2633), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['x'], {'output_idx_type': 'output_idx_type'}), '(x, output_idx_type=output_idx_type)\n', (2597, 2633), False, 'from tensorflow.python.ops import linalg_ops\n'), ((2840, 2877), 'tensorflow.python.ops.array_ops.matrix_band_part', 'array_ops.matrix_band_part', (['lu', '(-1)', '(0)'], {}), '(lu, -1, 0)\n', (2866, 2877), False, 'from tensorflow.python.ops import array_ops\n'), ((3276, 3319), 'tensorflow.python.ops.array_ops.matrix_set_diag', 'array_ops.matrix_set_diag', (['lower', 'ones_diag'], {}), '(lower, ones_diag)\n', (3301, 3319), False, 'from tensorflow.python.ops import array_ops\n'), ((3365, 3402), 'tensorflow.python.ops.array_ops.matrix_band_part', 'array_ops.matrix_band_part', (['lu', '(0)', '(-1)'], {}), '(lu, 0, -1)\n', (3391, 3402), False, 'from tensorflow.python.ops import array_ops\n'), ((3423, 3452), 'tensorflow.python.ops.math_ops.matmul', 'math_ops.matmul', (['lower', 'upper'], {}), '(lower, upper)\n', (3438, 3452), False, 'from tensorflow.python.ops import math_ops\n'), ((4990, 5052), 'numpy.array', 'np.array', (['[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [10.0, 0.0, 5.0]]'], {}), '([[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [10.0, 0.0, 5.0]])\n', (4998, 5052), True, 'import numpy as np\n'), ((5683, 5742), 'numpy.array', 'np.array', (['[[1e-09, 1.0, 0.0], [1.0, 0.0, 0], [0.0, 1.0, 5]]'], {}), '([[1e-09, 1.0, 0.0], [1.0, 0.0, 0], [0.0, 1.0, 5]])\n', (5691, 5742), True, 'import numpy as np\n'), ((7223, 7260), 'numpy.array', 'np.array', (['[[[1.0, -1.0], [2.0, 5.0]]]'], {}), '([[[1.0, -1.0], [2.0, 5.0]]])\n', (7231, 7260), True, 'import numpy as np\n'), ((7391, 7454), 'numpy.array', 'np.array', (['[[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]]]'], {}), '([[[4.0, -1.0, 2.0], [-1.0, 6.0, 0], [2.0, 0.0, 5.0]]])\n', (7399, 7454), True, 'import numpy as np\n'), ((7572, 7590), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7586, 7590), True, 'import numpy as np\n'), ((7606, 7638), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(5)', '(5)'], {}), '(batch_size, 5, 5)\n', (7620, 7638), True, 'import numpy as np\n'), ((7720, 7738), 'numpy.random.seed', 'np.random.seed', (['(52)'], {}), '(52)\n', (7734, 7738), True, 'import numpy as np\n'), ((7964, 7982), 'numpy.random.seed', 'np.random.seed', (['(64)'], {}), '(64)\n', (7978, 7982), True, 'import numpy as np\n'), ((7994, 8014), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (8008, 8014), True, 'import numpy as np\n'), ((8092, 8111), 'numpy.random.seed', 'np.random.seed', (['(129)'], {}), '(129)\n', (8106, 8111), True, 'import numpy as np\n'), ((8435, 8476), 'tensorflow.python.ops.random_ops.random_normal', 'random_ops.random_normal', (['[5, 5]'], {'seed': '(42)'}), '([5, 5], seed=42)\n', (8459, 8476), False, 'from tensorflow.python.ops import random_ops\n'), ((8491, 8532), 'tensorflow.python.ops.random_ops.random_normal', 'random_ops.random_normal', (['[5, 5]'], {'seed': '(42)'}), '([5, 5], seed=42)\n', (8515, 8532), False, 'from tensorflow.python.ops import random_ops\n'), ((8547, 8569), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix1'], {}), '(matrix1)\n', (8560, 8569), False, 'from tensorflow.python.ops import linalg_ops\n'), ((8584, 8606), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix2'], {}), '(matrix2)\n', (8597, 8606), False, 'from tensorflow.python.ops import linalg_ops\n'), ((9319, 9356), 'numpy.tile', 'np.tile', (['matrix', '(batch_shape + (1, 1))'], {}), '(matrix, batch_shape + (1, 1))\n', (9326, 9356), True, 'import numpy as np\n'), ((2321, 2365), 'numpy.reshape', 'np.reshape', (['perm_np', '(-1, perm_np.shape[-1])'], {}), '(perm_np, (-1, perm_np.shape[-1]))\n', (2331, 2365), True, 'import numpy as np\n'), ((2919, 2987), 'tensorflow.python.ops.linalg_ops.eye', 'linalg_ops.eye', (['num_rows'], {'batch_shape': 'batch_shape', 'dtype': 'lower.dtype'}), '(num_rows, batch_shape=batch_shape, dtype=lower.dtype)\n', (2933, 2987), False, 'from tensorflow.python.ops import linalg_ops\n'), ((3013, 3068), 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['[lower, eye[..., num_cols:]]'], {'axis': '(-1)'}), '([lower, eye[..., num_cols:]], axis=-1)\n', (3029, 3068), False, 'from tensorflow.python.ops import array_ops\n'), ((3211, 3243), 'numpy.append', 'np.append', (['batch_shape', 'num_rows'], {}), '(batch_shape, num_rows)\n', (3220, 3243), True, 'import numpy as np\n'), ((3745, 3784), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['perm', '[-1, num_rows]'], {}), '(perm, [-1, num_rows])\n', (3762, 3784), False, 'from tensorflow.python.ops import array_ops\n'), ((3815, 3872), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['verification', '[-1, num_rows, num_cols]'], {}), '(verification, [-1, num_rows, num_cols])\n', (3832, 3872), False, 'from tensorflow.python.ops import array_ops\n'), ((3993, 4051), 'tensorflow.python.ops.map_fn.map_fn', 'map_fn.map_fn', (['array_ops.invert_permutation', 'perm_reshaped'], {}), '(array_ops.invert_permutation, perm_reshaped)\n', (4006, 4051), False, 'from tensorflow.python.ops import map_fn\n'), ((4755, 4814), 'tensorflow.python.ops.array_ops.reshape', 'array_ops.reshape', (['permuted_verification_reshaped', 'lu_shape'], {}), '(permuted_verification_reshaped, lu_shape)\n', (4772, 4814), False, 'from tensorflow.python.ops import array_ops\n'), ((5878, 5897), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['data'], {}), '(data)\n', (5891, 5897), False, 'from tensorflow.python.ops import linalg_ops\n'), ((6279, 6298), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['data'], {}), '(data)\n', (6292, 6298), False, 'from tensorflow.python.ops import linalg_ops\n'), ((7328, 7367), 'numpy.vstack', 'np.vstack', (['(simple_array, simple_array)'], {}), '((simple_array, simple_array))\n', (7337, 7367), True, 'import numpy as np\n'), ((7466, 7511), 'numpy.vstack', 'np.vstack', (['(odd_sized_array, odd_sized_array)'], {}), '((odd_sized_array, odd_sized_array))\n', (7475, 7511), True, 'import numpy as np\n'), ((7754, 7786), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(5)', '(5)'], {}), '(batch_size, 5, 5)\n', (7768, 7786), True, 'import numpy as np\n'), ((8123, 8143), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (8137, 8143), True, 'import numpy as np\n'), ((8280, 8299), 'numpy.empty', 'np.empty', (['[0, 2, 2]'], {}), '([0, 2, 2])\n', (8288, 8299), True, 'import numpy as np\n'), ((8320, 8339), 'numpy.empty', 'np.empty', (['[2, 0, 0]'], {}), '([2, 0, 0])\n', (8328, 8339), True, 'import numpy as np\n'), ((9901, 9928), 'tensorflow.python.platform.test.is_gpu_available', 'test.is_gpu_available', (['(True)'], {}), '(True)\n', (9922, 9928), False, 'from tensorflow.python.platform import test\n'), ((4603, 4663), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['[batch_indices, inv_perm_reshaped]'], {'axis': '(-1)'}), '([batch_indices, inv_perm_reshaped], axis=-1)\n', (4618, 4663), False, 'from tensorflow.python.ops import array_ops\n'), ((6017, 6029), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (6026, 6029), True, 'import numpy as np\n'), ((6418, 6430), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (6427, 6430), True, 'import numpy as np\n'), ((7824, 7856), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(5)', '(5)'], {}), '(batch_size, 5, 5)\n', (7838, 7856), True, 'import numpy as np\n'), ((8151, 8171), 'numpy.random.rand', 'np.random.rand', (['n', 'n'], {}), '(n, n)\n', (8165, 8171), True, 'import numpy as np\n'), ((9538, 9558), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (9548, 9558), False, 'from tensorflow.python.framework import ops\n'), ((9641, 9662), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix'], {}), '(matrix)\n', (9654, 9662), False, 'from tensorflow.python.ops import linalg_ops\n'), ((2463, 2483), 'numpy.sort', 'np.sort', (['perm_vector'], {}), '(perm_vector)\n', (2470, 2483), True, 'import numpy as np\n'), ((6108, 6132), 'numpy.tril', 'np.tril', (['(1.0j * data)', '(-1)'], {}), '(1.0j * data, -1)\n', (6115, 6132), True, 'import numpy as np\n'), ((6167, 6191), 'numpy.triu', 'np.triu', (['(-1.0j * data)', '(1)'], {}), '(-1.0j * data, 1)\n', (6174, 6191), True, 'import numpy as np\n'), ((9779, 9808), 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['lu', 'p'], {}), '(lu, p)\n', (9801, 9808), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((10059, 10086), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (10069, 10086), False, 'from tensorflow.python.framework import ops\n'), ((10173, 10194), 'tensorflow.python.ops.linalg_ops.lu', 'linalg_ops.lu', (['matrix'], {}), '(matrix)\n', (10186, 10194), False, 'from tensorflow.python.ops import linalg_ops\n'), ((4408, 4434), 'tensorflow.python.ops.math_ops.range', 'math_ops.range', (['batch_size'], {}), '(batch_size)\n', (4422, 4434), False, 'from tensorflow.python.ops import math_ops\n'), ((5355, 5379), 'numpy.tril', 'np.tril', (['(1.0j * data)', '(-1)'], {}), '(1.0j * data, -1)\n', (5362, 5379), True, 'import numpy as np\n'), ((5416, 5440), 'numpy.triu', 'np.triu', (['(-1.0j * data)', '(1)'], {}), '(-1.0j * data, 1)\n', (5423, 5440), True, 'import numpy as np\n'), ((6797, 6871), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [2.0, 3.0, 4.0]]'], {'dtype': 'dtype'}), '([[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [2.0, 3.0, 4.0]], dtype=dtype)\n', (6805, 6871), True, 'import numpy as np\n'), ((7015, 7149), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [1.0, 2.0, 3.0]], [[1.0, 2.0, 3.0], [\n 3.0, 4.0, 5.0], [5.0, 6.0, 7.0]]]'], {'dtype': 'dtype'}), '([[[1.0, 2.0, 3.0], [2.0, 4.0, 6.0], [1.0, 2.0, 3.0]], [[1.0, 2.0, \n 3.0], [3.0, 4.0, 5.0], [5.0, 6.0, 7.0]]], dtype=dtype)\n', (7023, 7149), True, 'import numpy as np\n'), ((9212, 9226), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (9219, 9226), True, 'import numpy as np\n'), ((9277, 9287), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (9284, 9287), True, 'import numpy as np\n'), ((9426, 9437), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (9435, 9437), False, 'from tensorflow.python.framework import ops\n'), ((9487, 9515), 'tensorflow.python.platform.benchmark.benchmark_config', 'benchmark.benchmark_config', ([], {}), '()\n', (9513, 9515), False, 'from tensorflow.python.platform import benchmark\n'), ((9671, 9711), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (9709, 9711), False, 'from tensorflow.python.ops import variables\n'), ((10319, 10348), 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['lu', 'p'], {}), '(lu, p)\n', (10341, 10348), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((9943, 9954), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (9952, 9954), False, 'from tensorflow.python.framework import ops\n'), ((10006, 10034), 'tensorflow.python.platform.benchmark.benchmark_config', 'benchmark.benchmark_config', ([], {}), '()\n', (10032, 10034), False, 'from tensorflow.python.platform import benchmark\n'), ((10205, 10245), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (10243, 10245), False, 'from tensorflow.python.ops import variables\n')]
|
# Library for the dynamics of a lumen network
# The lumen are 2 dimensional and symmetric and connected with 1 dimensional tubes
#
# Created by <NAME>, 2018
# Modified by <NAME>--Serandour on 8/04/2019
"""
network.py conf.init
Defines the class network and associated functions
Imports
-------
Libraries : numpy, os, math
Created by <NAME>
Modified by <NAME> on 8/06/2018
Modified by <NAME>--Serandour on 8/04/2019
"""
import numpy as np
import math
import os
class network:
def __init__(self, network_folder, out_path, t_step, tube_radius = 0.01, friction = 1, swelling = False, swelling_rate=0., save_area_dat=False):
"""
Initialization of the object network
All properties needed for the simulation are read and initialized
Input
-----
network_folder : str
out_path : str, path-like
t_step : float
Time step of the simulation. Note that if the simulation is adaptative, this time step will change.
tube_radius : float, optional, default = 0.01
Radius of the tube connecting lumens. Define the condition for empty lumens.
friction : float, optional, default = 1
Friction constant for the fluid circulating through pipes.
swelling : bool, optional, default = False
Swelling option for the simulation. True if swelling is included, False otherwise.
swelling_rate : float, optional, default = 0.
Swelling rate value in case the swelling is considered. Make sure the rate is not to big to avoid non-converging simulations.
save_area_dat : bool, optional, default = False
Save area option. True if areas are saved in area.dat, False otherwise.
"""
self.network_folder = network_folder
# Reading properties of the lumen
self.gamma_lumen, self.gamma_contact, self.area = np.loadtxt(os.path.join(network_folder, 'lumen.dat'), dtype = float, usecols = [0,2,3], unpack = True)
# Reading links between two lumen
self.lumen_lumen = self.read_lumen_lumen(os.path.join(network_folder, 'lumen_lumen.dat'))
# Reading links between bridge and lumen
self.bridge_lumen, self.num_bridges = self.read_bridge_lumen(os.path.join(network_folder, 'bridge_lumen.dat'))
# Reading links between two bridges
self.bridge_bridge, self.num_bridges = self.read_bridge_bridge(os.path.join(network_folder, 'bridge_bridge.dat'), self.num_bridges)
# Surface tension ratio
self.alpha = self.gamma_contact/(2*self.gamma_lumen)
self.delta = np.full(len(self.alpha), 1) # Possibility of asymmetric lumen is not included
# Resistances
self.tube_radius = tube_radius # Radius of the tube connecting the lumen and the bridges
self.friction = friction # Friction coefficient; friction * length = resistance
# Opening angle of the lumen (angle between curvature and tube)
self.theta = self.set_theta()
# Area factor for expressing the pressure in terms of the area instead of the radius
self.area_factor = self.set_area_factor()
# Ending time: time at which only one lumen is remaining
self.end_time = 0
# Time step for the output of the area evolution
self.time_step = t_step
# Creating output file for the area evolution, events, error messages
self.save_area(start = True, out_path = out_path)
self.save_event('', start = True, out_path = out_path)
self.save_error('', start = True, out_path = out_path)
# Area distribution after only one lumen is remaining
self.final_area = []
# Current time step of the simulation
self.current_time = 0
# List of empty lumen (area < tube_radius **2)
self.empty_list = np.zeros(len(self.alpha))
# Swelling
self.swelling_bool = swelling
self.swelling_rate = swelling_rate
# Save area
self.save_area_dat = save_area_dat
############################################################################################################################
########################################################## Dynamics ########################################################
############################################################################################################################
def flux(self, t, state):
"""
Determines the flux/ area change for each lumen of the network, main function of network.py
Input
-----
self : network object
Needs to be called by a class object
t : float
Actual time step (not needed for the calculation of the flux, but required for the used integration method in network_simulation.py
state : float array
The current area of the lumens
Returns
-------
flux : float array
Contains the area change for each lumen in dt
"""
# Initialization of the array containing the area change (index == lumen ID)
flux = []
self.current_time = t
for i in range(len(self.alpha)):
flux.append(0)
# If only one lumen remains -> End of simulation, flux is zero (needed as for the integration method used, no dynamic stop is possible)
if(np.sum(self.empty_list) >= len(self.alpha) - 1):
if(self.end_time == 0):
# Setting the end time for the output file area.log
self.end_time = t
# more than one lumen remaining: calculation of the flux
else:
# Adapting network to new state: Empty lumen are removed and graph is reconnected
self.area = state
self.remove_empty_lumen()
# Area change between directly connected lumen
flux = self.flux_lumen(flux)
# Calculating artificial pressure at each bridge; linear system of equations, with flux(bridge) = 0, the bridge does not gain or loose area
pressure_bridges = self.pressure_bridges()
# Area change between lumen-bridges
flux = self.flux_bridges(flux, pressure_bridges)
# Area change due to swelling
if self.swelling_bool :
flux = self.flux_swelling(flux)
# Saving area for the time step given in the configuration file
if self.save_area_dat :
self.save_area()
self.t_old = t
if(np.abs(np.sum(flux)) > self.tube_radius ** 2):
error = 'total flux is non-zero: total flux = %f' % (np.sum(flux))
self.save_error(error)
return flux
def flux_lumen(self,flux):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and lumen
Input
-----
self network object
needs to be called by a class object
flux float array
vector containing the area change for each lumen; index = lumen ID
Returns
-------
flux float array
area changes due to lumen-lumen connection added to the vector passed
"""
# for each connection between two lumen
for line in range(len(self.lumen_lumen)):
lumen_1 = int (self.lumen_lumen[line][0]) # first lumen
lumen_2 = int (self.lumen_lumen[line][1]) # second lumen
# flux from lumen 2 to lumen 1
fl = (self.pressure(lumen_2) - self.pressure(lumen_1))*self.friction/self.lumen_lumen[line][2]
flux[lumen_1] += fl
flux[lumen_2] -= fl
return flux
def pressure_bridges(self):
"""
Determines the pressure at each bridge
for each bridge the total flux is 0, meaning that the bridge does not gain or loose area
this gives a linear equation system, which can be solved
The connections are taken from the files bridge_lumen.dat and bridge_bridge.dat
For Information about the equations see the documentation to the code
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
pressure_bridges : float array
Pressure at each bridge
"""
R_sum = np.zeros(self.num_bridges, dtype = float) # sum of the resistences around one bridge
P_over_R_sum = np.zeros(self.num_bridges, dtype = float) # sum of pressure over resistance between one bridge and all directly connected lumen
matrix_bridges = np.zeros([self.num_bridges, self.num_bridges], dtype= float) # matrix to calculate the pressure at each bridge
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
R_sum[bridge] += 1./line[2]*self.friction
P_over_R_sum[bridge] += self.pressure(lumen)/line[2]*self.friction
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
matrix_bridges[bridge1][bridge2] = 1./line[2]*self.friction
matrix_bridges[bridge2][bridge1] = 1./line[2]*self.friction
R_sum[bridge1] += 1./line[2]*self.friction
R_sum[bridge2] += 1./line[2]*self.friction
for line in range(self.num_bridges):
matrix_bridges[line][line] = -R_sum[line]
# Solving linear problem with the pressure at each bridge as solution
pressure_bridges = np.linalg.solve(matrix_bridges, -P_over_R_sum)
return pressure_bridges;
def flux_bridges(self, flux, pressure_bridges):
"""
Determines the flux/ area change for each lumen due to the connection between lumen and bridge
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
flux : float array
Area changes due to bridge-lumen connection added to the vector passed
"""
# Area change in one bridge; should be 0; calculated as control value
flux_bridge = np.zeros(self.num_bridges, dtype = float)
# For each connection between bridge and bridge
for line in self.bridge_bridge:
bridge1 = int(line[0])
bridge2 = int(line[1])
fb = (pressure_bridges[bridge2] - pressure_bridges[bridge1])*self.friction/line[2]
flux_bridge[bridge1] += fb
flux_bridge[bridge2] -= fb
# For each connection between bridge and lumen
for line in self.bridge_lumen:
bridge = int(line[0])
lumen = int(line[1])
fl = (pressure_bridges[bridge] - self.pressure(lumen))*self.friction/line[2]
flux[lumen] += fl
flux_bridge[bridge] -= fl
for i in range(len(flux_bridge)):
if (np.abs(flux_bridge[i]) > self.tube_radius ** 2):
error = 'total flux of bridge %d is non-zero: total flux = %f' % (i,flux_bridge[i])
self.save_error(error)
return flux
def flux_swelling(self, flux) :
"""
Determines the flux/ area change for each lumen due to sewlling
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
flux : float array
Area changes due to bridge-lumen connection added to the vector passed
"""
# for each lumen (lumen is the index of the lumen's area)
for lumen in range(len(self.area)) :
# if not empty
if not self.area[lumen] < 2*self.tube_radius ** 2 :
# then add the swelling contribution
flux[lumen] += self.swelling(lumen)
return flux
############################################################################################################################
###################################################### Removing Functions #####################################################
############################################################################################################################
def remove_empty_lumen(self):
"""
Determines and removes empty lumen
Calls a function to obtain a list of empty lumen and passes the list to a function to remove them and reconnect the network
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
no return
"""
empty_lumen_list = []
# Creating a list of empty lumen
empty_lumen_list = self.get_empty_lumen()
# Removing empty lumen and reconnecting the network
if (len(empty_lumen_list) > 0 ):
event = 'empty lumen: ' + ' '.join(map(str, empty_lumen_list))
#print event
self.save_event(event)
self.remove_lumen(empty_lumen_list)
return;
def remove_lumen(self, lumen_to_remove):
"""
Removes the lumen that are passed and connects the neighbors of these lumen
Input
-----
self : network object
Needs to be called by a class object
lumen_to_remove : int list
List of lumen to be removed
Returns
-------
no return
"""
# For each lumen that has to be removed
for lumen in lumen_to_remove:
neighbours = self.get_neighbours(lumen) # List of connected lumen
bridges = self.get_bridges(lumen) # List of connected bridges
self.save_event('lumen ' + str(lumen) + ' neighbours ' + str(neighbours))
self.save_event('lumen ' + str(lumen) + ' bridges ' + str(bridges))
# Lumen had two connections, this means that it disappears and the two connected parts get directly connected, the resistance for the new link is the sum of the resistance of the two previous connections
test=True
if(len(neighbours) + len(bridges) == 2):
# Lumen was connected to two lumen -> new connection between lumen and lumen
if(len(neighbours) == 2):
self.create_link([neighbours[0][0], neighbours[1][0], neighbours[0][1] + neighbours[1][1]])
#print 'lumen_lumen connexion (' + str(neighbours[0][0]) + ', ' + str(neighbours[1][0]) + ')'
# Lumen was connected to a lumen and a bridge -> new connection between lumen and bridge
if(len(neighbours) == 1 and len(bridges)==1):
self.create_bridge_lumen([bridges[0][0], neighbours[0][0], bridges[0][1] + neighbours[0][1]])
#print 'lumen_bridge connexion (' + str(bridges[0][0]) + ', ' + str(neighbours[0][0]) + ')'
# Lumen was connected to two bridges -> new connection between bridge and bridge
if(len(bridges)==2):
self.create_bridge_bridge([bridges[0][0], bridges[1][0], bridges[0][1] + bridges[1][1]])
#print 'bridge_bridge connexion (' + str(bridges[0][0]) + ', ' + str(bridges[1][0]) + ')'
self.create_bridge(neighbours, bridges, lumid=lumen)
# Lumen had more than two connections -> becomes a bridge, the resistances remain the same but the connections are changed to connections to a bridge
if(len(neighbours) + len(bridges) > 2):
self.create_bridge(neighbours, bridges, lumid=lumen)
return;
def remove_link(self, lumen_1, lumen_2):
"""
Removes a connection between two lumen
Input
-----
self : network object
Needs to be called by a class object
lumen_1 : int
First lumen of the connection
lumen_2 :
Second lumen of the connection
Returns
-------
no return
"""
# Due to data structure first lumen must be smaller than second lumen
if(lumen_1 > lumen_2):
n = lumen_1
lumen_1 = lumen_2
lumen_2 = n
# Find connection in lumen_lumen file and remove it
line = 0
# For each line in lumen_lumen until connection is found
while (line < len(self.lumen_lumen)):
# If connection is found removing it
if(self.lumen_lumen[line][0] == lumen_1 and self.lumen_lumen[line][1] == lumen_2):
event = 'link lumen %d to lumen %d removed' % (lumen_1, lumen_2)
#print event
self.save_event(event)
link = [lumen_1, lumen_2, self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
break;
# Look at next line
else: line += 1
############################################################################################################################
###################################################### Get Functions #####################################################
############################################################################################################################
def get_empty_lumen(self):
"""
Gets the IDs of the empty lumen
Empty means that the area is smaller than the tube_radius^2
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
empty_lumen_list : int list
Contains the IDs of the empty lumens
"""
empty_lumen_list = []
# For each lumen ID
for i in range(len(self.area)):
# If area is smaller than the treshhold
if(self.area[i] < self.tube_radius ** 2 and self.empty_list[i] == 0):
self.empty_list[i] = 1
self.area[i] = 0
empty_lumen_list.append(i)
return empty_lumen_list
def get_neighbours(self, lumen):
"""
Gets the lumen that are directly connected to the lumen passed on and deletes the connections
Input
-----
self : network object
Needs to be called by a class object
lumen : int
ID of a lumen
Returns
-------
neighbour_list : int list
ID of all lumen that are directly connected to the lumen passed on
"""
neighbour_list = []
line = 0
# Going through links in lumen_lumen.dat
while line < len(self.lumen_lumen) and self.lumen_lumen[line][0] < lumen :
if self.lumen_lumen[line][1] == lumen :
neighbour_list.append([self.lumen_lumen[line][0], self.lumen_lumen[line][2]])
event = 'link lumen %d to lumen %d removed' % (self.lumen_lumen[line][0], lumen)
self.save_event(event)
link = [self.lumen_lumen[line][0], self.lumen_lumen[line][1], self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
else : line += 1
while line < len(self.lumen_lumen) and self.lumen_lumen[line][0] < lumen :
line += 1
while(line < len(self.lumen_lumen) and self.lumen_lumen[line][0] == lumen):
neighbour_list.append([self.lumen_lumen[line][1], self.lumen_lumen[line][2]])
event = 'link lumen %d to lumen %d removed' % (lumen, self.lumen_lumen[line][1])
self.save_event(event)
link = [self.lumen_lumen[line][0], self.lumen_lumen[line][1], self.lumen_lumen[line][2]]
self.lumen_lumen.remove(link)
return neighbour_list
def get_bridges(self, lumen):
"""
Gets the bridges that are directly connected to the lumen passed on
Input
-----
self : network object
Needs to be called by a class object
lumen : int
ID of a lumen
Returns
-------
neighbour_list : int list
ID of all lumen that are directly connected to the lumen passed on
"""
bridge_list = []
line = 0
# Going through the links in bridge_lumen.dat
while(line < len(self.bridge_lumen)):
if (self.bridge_lumen[line][1] == lumen):
bridge_list.append([self.bridge_lumen[line][0], self.bridge_lumen[line][2]])
event = 'link bridge %d to lumen %d removed' % (self.bridge_lumen[line][0], lumen)
self.save_event(event)
self.bridge_lumen.remove(self.bridge_lumen[line])
else: line += 1
return bridge_list
############################################################################################################################
#################################################### Creating Functions ###################################################
############################################################################################################################
def create_link(self, link):
"""
Creates a link between two lumen in lumen_lumen.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID lumen1, ID lumen2, length]
Returns
-------
no return
"""
# no self-loops allowed
if(len(link) == 4 and link[0] != link[1]):
# Ensuring: lumen_1 < lumen_2
if(link[0] < link[2]):
lumen_1 = link[0]
lumen_2 = link[1]
else:
lumen_1 = link[1]
lumen_2 = link[0]
length = link[2]
line = 0
# Finding line in lumen_lumen.dat, to keep the sorting
while(line < len(self.lumen_lumen) and lumen_1 > self.lumen_lumen[line][0]): line += 1
if(line < len(self.lumen_lumen) - 1):
while(line < len(self.lumen_lumen) and lumen_2 > self.lumen_lumen[line][1] and lumen_1 == self.lumen_lumen[line][0]): line += 1
# Creating the link in lumen_lumen.dat
self.lumen_lumen.append([lumen_1,lumen_2, length])
self.lumen_lumen.sort()
event = 'link lumen %d to lumen %d created' % (lumen_1,lumen_2)
self.save_event(event)
return;
def create_bridge_lumen(self, link):
"""
Creates a link between a lumen and a bridge in bridge_lumen.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID bridge, ID lumen, length]
Returns
-------
no return
"""
bridge = link[0]
lumen = link[1]
length = link[2]
line = 0
# Creating the link in bridge_lumen.dat
self.bridge_lumen.append(link)
self.bridge_lumen.sort()
event = 'link bridge %d to lumen %d created' % (bridge,lumen)
self.save_event(event)
return;
def create_bridge_bridge(self, link):
"""
Creates a link between two bridges in bridge_bridge.dat
Input
-----
self : network object
Needs to be called by a class object
link : float array
[ID bridge1, ID bridge2, length]
Returns
-------
no return
"""
if(link[0] == link[1]): return;
if(link[0] < link[1]):
bridge_1 = link[0]
bridge_2 = link[1]
else:
bridge_1 = link[1]
bridge_2 = link[0]
length = link[2]
line = 0
# Creating the link in bridge_bridge.dat
self.bridge_bridge.append([bridge_1,bridge_2, length])
self.bridge_bridge.sort()
event = 'link bridge %d to bridge %d created' % (bridge_1,bridge_2)
self.save_event(event)
return;
def create_bridge(self, lumen, bridge, lumid):
"""
Creates a new bridge connected with the lumen and bridges passed on
Input
-----
self : network object
Needs to be called by a class object
lumen : int list
[[lumen ID, length], [lumen ID, length],.....]
lumen IDs to which the new bridge should be connected to
bridge : int list
[[bridge ID, length], [bridge ID, length],.....]
bridge IDs to which the new bridge should be connected to
Returns
-------
no return
"""
#####
bridge_conversionfile = os.path.join(self.network_folder,'bridgesconversion.txt')
# ID of the new bridge
bridge_number = self.num_bridges
# Bridge ID counter, contains the ID of the next new bridge
self.num_bridges += 1
event = 'new bridge %d' % (bridge_number) + ' (' + str(lumid) + ')'
self.save_event(event)
line = 0
lumen.sort()
bridge.sort()
# For each lumen that should be connected to the new bridge
for i in range(len(lumen)):
new_link = [bridge_number, lumen[i][0], lumen[i][1]]
# Create link in bridge_lumen.dat
self.create_bridge_lumen(new_link)
# For each lumen that should be connected to the new bridge
for i in range(len(bridge)):
new_link = [bridge[i][0], bridge_number, bridge[i][1]]
# Create link in bridge_bridge.dat
self.create_bridge_bridge(new_link)
open(bridge_conversionfile, 'a').write(str(bridge_number) + ' ' + str(lumid)+ '\n')
return;
############################################################################################################################
################################ Geometric Functions for area and Pressure ###############################################
############################################################################################################################
def set_theta(self):
"""
Sets the angle theta
Calculates the angle theta, angle between the lumen and the tube
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
theta : float list
Theta value for each lumen
"""
theta = []
for i in range(len(self.alpha)):
#cos = (2*self.alpha[i]-(4*self.alpha[i]**2-self.delta[i]**2+1)/(4*self.alpha[i]))/self.delta[i] ## Old version, for assymmetric lumen
#theta.append(math.acos(cos))
theta.append(np.arccos(self.alpha[i]))
return theta;
def set_area_factor(self):
"""
Sets the area factor, needed to express the pressure in terms of the area instead of the curvature radius
Input
-----
self : network object
Needs to be called by a class object
Returns
-------
area_factor : float list
Area factor for each lumen
"""
area_factor = []
for i in range(len(self.alpha)):
area_factor.append(np.sqrt((2*self.theta[i]-np.sin(2*self.theta[i]))))
return area_factor;
def opening_radius(self, lumen):
"""
Calculates the length/2 parallel to the 'tube' where the membrane is not attached for a given lumen
Input
-----
lumen : int
ID of the lumen
Returns
-------
radius : float
Length/2 of the opening radius
"""
return np.sqrt(2*self.area[lumen]/(2*self.theta[lumen]-np.sin(2*self.theta[lumen])))*np.sin(self.theta[lumen])
def get_area(self, lumen):
"""
Calculates the area in one half of the lumen (for symmetric lumen)
Input
-----
lumen : int
ID of the lumen
Returns
-------
area : float
Area/2 of the lumen
"""
area = self.area[lumen]
return area
def pressure(self,lumen):
"""
Calculates the pressure inside the lumen (for symmetric lumen)
Input
-----
lumen : int
ID of the lumen
Returns
-------
pressure : float
Pressure of the lumen
"""
area = self.get_area(lumen)
# Avoid dividing by zero
if(area < 0.1 * self.tube_radius**2 ):
error = 'division by zero in pressure: lumen ID: %d' % (lumen)
self.save_error(error)
pressure = self.gamma_lumen[lumen]*self.area_factor[lumen]/np.sqrt(area)
return pressure
############################################################################################################################
################################################# Reading Functions ########################################################
############################################################################################################################
def read_lumen_lumen(self, lumen_lumen_file):
"""
Reading the file with links between two lumens
Input
-----
lumen_lumen_file : str
File path to file with the links between two lumens
Returns
-------
lumen_lumen : float list [lumen1, lumen2, length]
Information about the links between two lumens
"""
if (os.path.getsize(lumen_lumen_file)>0): # If the file is not empty
lumen_1, lumen_2 = np.loadtxt(lumen_lumen_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(lumen_lumen_file, dtype = float, usecols = [2])
lumen_lumen = np.column_stack([lumen_1, lumen_2, length]).tolist()
else:
lumen_lumen = []
return lumen_lumen
def read_bridge_lumen(self, bridge_lumen_file):
"""
Reading the file with links between bridge and lumen
Input
-----
bridge_lumen_file : str
File path to file with the links between bridge and lumen
Returns
-------
bridge_lumen : float list [bridge, lumen, length]
Information about the links between bridge and lumen
num_bridges : int
Number of bridge_lumen links
"""
with open(bridge_lumen_file, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
if ('#' in last_line): # If the file is empty
bridge_lumen = []
num_bridges = 0 # number of existing bridges
else:
bridge, lumen = np.loadtxt(bridge_lumen_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(bridge_lumen_file, dtype = float, usecols = [2])
bridge_lumen = np.column_stack([bridge, lumen, length]).tolist()
num_bridges = max(bridge)+1 # number of existing bridges
return bridge_lumen, num_bridges
def read_bridge_bridge(self, bridge_bridge_file, num_bridges):
"""
Reading the file with links between two bridge
Input
-----
bridge_bridge_file : str
File path to file with the links between two bridge
Returns
-------
bridge_bridge : float list [bridge1, bridge2, length]
Information about the links between two bridge
num : int
Number of bridge_bridge links
"""
with open(bridge_bridge_file, 'r') as f:
lines = f.read().splitlines()
last_line = lines[-1]
if ('#' in last_line>0): # If the file is empty
bridge_bridge = []
num = num_bridges
else:
bridge1, bridge2 = np.loadtxt(bridge_bridge_file, dtype = int, usecols = [0,1], unpack = True)
length = np.loadtxt(bridge_bridge_file, dtype = float, usecols = [2])
bridge_bridge = np.column_stack([bridge1, bridge2, length]).tolist()
if (max(bridge2)+1 > num_bridges): num = max(bridge2)+1
return bridge_bridge, num
############################################################################################################################
################################################# Output functions #########################################################
############################################################################################################################
def save_event(self, event, start = False, out_path = ''):
"""
Saves each event in the output folder in the file event.dat
Events like a lumen disappearing, reconnections in the graph
Input
-----
event : str
Message of the event
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_event = '# Saves each event during the simulation; event is a disappearing lumen, graph reconnection \n'
self.file_event = os.path.join(out_path, 'event.dat')
fevent = open(self.file_event, 'w')
fevent.write(header_event)
fevent.close()
else:
fevent = open(self.file_event, 'a')
fevent.write('%.5f' % self.current_time)
fevent.write(' ')
fevent.write(event)
fevent.write('\n')
fevent.close()
return;
def save_error(self, error, start = False, out_path = ''):
"""
Saves errors in the output folder in the file error.dat
Errors like volume loss
Input
-----
error : string
Message of the event
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_error = '# Saves each warning like volume loss \n'
self.file_error = os.path.join(out_path, 'error.dat')
ferror = open(self.file_error, 'w')
ferror.write(header_error)
ferror.close()
else:
ferror = open(self.file_error, 'a')
ferror.write('%.5f' % self.current_time)
ferror.write(' ')
ferror.write(error)
ferror.write('\n')
ferror.close()
return;
def save_area(self, start = False, out_path = ''):
"""
Saves the volume evolution in the output folder in the file area.dat
Input
-----
start : boolean
True: File is created
False: the message is stored in the file
Returns
------
no return
"""
if(start):
header_volume = '# Saves the volume evolution of each lumen for the time step %f \n' %(self.time_step)
self.file_area = os.path.join(out_path, 'area.dat')
farea = open(self.file_area, 'w')
farea.write(header_volume)
farea.close()
self.t_old = 0
else:
farea = open(self.file_area, 'a')
farea.write('%.5f' % self.current_time)
farea.write(' ')
farea.write(' '.join(map(str, self.area)))
farea.write('\n')
farea.close()
return;
############################################################################################################################
################################################# Swelling functions #######################################################
############################################################################################################################
def swelling(self, lumen) :
"""
self.swelling(lumen)
Calculates the input flux for the area fo a given lumen, due to swelling.
Input
-----
lumen : int
Index of the lumen
"""
area = self.get_area(lumen)
theta = self.theta[lumen]
flux_swelling = self.swelling_rate * 4 * theta * np.sqrt(area)/ self.area_factor[lumen]
#print flux_swelling
return flux_swelling
|
[
"numpy.abs",
"os.path.getsize",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.arccos",
"os.path.join",
"numpy.column_stack",
"numpy.sum",
"numpy.zeros",
"numpy.sin",
"numpy.loadtxt"
] |
[((8944, 8983), 'numpy.zeros', 'np.zeros', (['self.num_bridges'], {'dtype': 'float'}), '(self.num_bridges, dtype=float)\n', (8952, 8983), True, 'import numpy as np\n'), ((9052, 9091), 'numpy.zeros', 'np.zeros', (['self.num_bridges'], {'dtype': 'float'}), '(self.num_bridges, dtype=float)\n', (9060, 9091), True, 'import numpy as np\n'), ((9205, 9264), 'numpy.zeros', 'np.zeros', (['[self.num_bridges, self.num_bridges]'], {'dtype': 'float'}), '([self.num_bridges, self.num_bridges], dtype=float)\n', (9213, 9264), True, 'import numpy as np\n'), ((10261, 10307), 'numpy.linalg.solve', 'np.linalg.solve', (['matrix_bridges', '(-P_over_R_sum)'], {}), '(matrix_bridges, -P_over_R_sum)\n', (10276, 10307), True, 'import numpy as np\n'), ((10950, 10989), 'numpy.zeros', 'np.zeros', (['self.num_bridges'], {'dtype': 'float'}), '(self.num_bridges, dtype=float)\n', (10958, 10989), True, 'import numpy as np\n'), ((26410, 26468), 'os.path.join', 'os.path.join', (['self.network_folder', '"""bridgesconversion.txt"""'], {}), "(self.network_folder, 'bridgesconversion.txt')\n", (26422, 26468), False, 'import os\n'), ((2057, 2098), 'os.path.join', 'os.path.join', (['network_folder', '"""lumen.dat"""'], {}), "(network_folder, 'lumen.dat')\n", (2069, 2098), False, 'import os\n'), ((2250, 2297), 'os.path.join', 'os.path.join', (['network_folder', '"""lumen_lumen.dat"""'], {}), "(network_folder, 'lumen_lumen.dat')\n", (2262, 2297), False, 'import os\n'), ((2426, 2474), 'os.path.join', 'os.path.join', (['network_folder', '"""bridge_lumen.dat"""'], {}), "(network_folder, 'bridge_lumen.dat')\n", (2438, 2474), False, 'import os\n'), ((2592, 2641), 'os.path.join', 'os.path.join', (['network_folder', '"""bridge_bridge.dat"""'], {}), "(network_folder, 'bridge_bridge.dat')\n", (2604, 2641), False, 'import os\n'), ((5691, 5714), 'numpy.sum', 'np.sum', (['self.empty_list'], {}), '(self.empty_list)\n', (5697, 5714), True, 'import numpy as np\n'), ((29767, 29792), 'numpy.sin', 'np.sin', (['self.theta[lumen]'], {}), '(self.theta[lumen])\n', (29773, 29792), True, 'import numpy as np\n'), ((30856, 30869), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (30863, 30869), True, 'import numpy as np\n'), ((31757, 31790), 'os.path.getsize', 'os.path.getsize', (['lumen_lumen_file'], {}), '(lumen_lumen_file)\n', (31772, 31790), False, 'import os\n'), ((31853, 31921), 'numpy.loadtxt', 'np.loadtxt', (['lumen_lumen_file'], {'dtype': 'int', 'usecols': '[0, 1]', 'unpack': '(True)'}), '(lumen_lumen_file, dtype=int, usecols=[0, 1], unpack=True)\n', (31863, 31921), True, 'import numpy as np\n'), ((31948, 32002), 'numpy.loadtxt', 'np.loadtxt', (['lumen_lumen_file'], {'dtype': 'float', 'usecols': '[2]'}), '(lumen_lumen_file, dtype=float, usecols=[2])\n', (31958, 32002), True, 'import numpy as np\n'), ((33042, 33111), 'numpy.loadtxt', 'np.loadtxt', (['bridge_lumen_file'], {'dtype': 'int', 'usecols': '[0, 1]', 'unpack': '(True)'}), '(bridge_lumen_file, dtype=int, usecols=[0, 1], unpack=True)\n', (33052, 33111), True, 'import numpy as np\n'), ((33138, 33193), 'numpy.loadtxt', 'np.loadtxt', (['bridge_lumen_file'], {'dtype': 'float', 'usecols': '[2]'}), '(bridge_lumen_file, dtype=float, usecols=[2])\n', (33148, 33193), True, 'import numpy as np\n'), ((34228, 34298), 'numpy.loadtxt', 'np.loadtxt', (['bridge_bridge_file'], {'dtype': 'int', 'usecols': '[0, 1]', 'unpack': '(True)'}), '(bridge_bridge_file, dtype=int, usecols=[0, 1], unpack=True)\n', (34238, 34298), True, 'import numpy as np\n'), ((34325, 34381), 'numpy.loadtxt', 'np.loadtxt', (['bridge_bridge_file'], {'dtype': 'float', 'usecols': '[2]'}), '(bridge_bridge_file, dtype=float, usecols=[2])\n', (34335, 34381), True, 'import numpy as np\n'), ((35641, 35676), 'os.path.join', 'os.path.join', (['out_path', '"""event.dat"""'], {}), "(out_path, 'event.dat')\n", (35653, 35676), False, 'import os\n'), ((36646, 36681), 'os.path.join', 'os.path.join', (['out_path', '"""error.dat"""'], {}), "(out_path, 'error.dat')\n", (36658, 36681), False, 'import os\n'), ((37605, 37639), 'os.path.join', 'os.path.join', (['out_path', '"""area.dat"""'], {}), "(out_path, 'area.dat')\n", (37617, 37639), False, 'import os\n'), ((6949, 6961), 'numpy.sum', 'np.sum', (['flux'], {}), '(flux)\n', (6955, 6961), True, 'import numpy as np\n'), ((7054, 7066), 'numpy.sum', 'np.sum', (['flux'], {}), '(flux)\n', (7060, 7066), True, 'import numpy as np\n'), ((11730, 11752), 'numpy.abs', 'np.abs', (['flux_bridge[i]'], {}), '(flux_bridge[i])\n', (11736, 11752), True, 'import numpy as np\n'), ((28579, 28603), 'numpy.arccos', 'np.arccos', (['self.alpha[i]'], {}), '(self.alpha[i])\n', (28588, 28603), True, 'import numpy as np\n'), ((38854, 38867), 'numpy.sqrt', 'np.sqrt', (['area'], {}), '(area)\n', (38861, 38867), True, 'import numpy as np\n'), ((32033, 32076), 'numpy.column_stack', 'np.column_stack', (['[lumen_1, lumen_2, length]'], {}), '([lumen_1, lumen_2, length])\n', (32048, 32076), True, 'import numpy as np\n'), ((33225, 33265), 'numpy.column_stack', 'np.column_stack', (['[bridge, lumen, length]'], {}), '([bridge, lumen, length])\n', (33240, 33265), True, 'import numpy as np\n'), ((34414, 34457), 'numpy.column_stack', 'np.column_stack', (['[bridge1, bridge2, length]'], {}), '([bridge1, bridge2, length])\n', (34429, 34457), True, 'import numpy as np\n'), ((29209, 29234), 'numpy.sin', 'np.sin', (['(2 * self.theta[i])'], {}), '(2 * self.theta[i])\n', (29215, 29234), True, 'import numpy as np\n'), ((29737, 29766), 'numpy.sin', 'np.sin', (['(2 * self.theta[lumen])'], {}), '(2 * self.theta[lumen])\n', (29743, 29766), True, 'import numpy as np\n')]
|
# Illustrate upsampling in 2d
# Code from <NAME>
# https://machinelearningmastery.com/generative_adversarial_networks/
import tensorflow as tf
from tensorflow import keras
from numpy import asarray
#from keras.models import Sequential
from tensorflow.keras.models import Sequential
#from keras.layers import UpSampling2D
from tensorflow.keras.layers import UpSampling2D
X = asarray([[1, 2],
[3, 4]])
X = asarray([[1, 2, 3],
[4, 5, 6],
[7,8,9]])
print(X)
nr = X.shape[0]
nc = X.shape[1]
# reshape input data into one sample a sample with a channel
X = X.reshape((1, nr, nc, 1))
model = Sequential()
model.add(UpSampling2D(input_shape=(nr, nc, 1))) # nearest neighbor
yhat = model.predict(X)
yhat = yhat.reshape((2*nr, 2*nc))
print(yhat)
model = Sequential()
model.add(UpSampling2D(input_shape=(nc, nc, 1), interpolation='bilinear'))
yhat = model.predict(X)
yhat = yhat.reshape((2*nr, 2*nc))
print(yhat)
|
[
"tensorflow.keras.layers.UpSampling2D",
"numpy.asarray",
"tensorflow.keras.models.Sequential"
] |
[((380, 405), 'numpy.asarray', 'asarray', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (387, 405), False, 'from numpy import asarray\n'), ((415, 457), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (422, 457), False, 'from numpy import asarray\n'), ((616, 628), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (626, 628), False, 'from tensorflow.keras.models import Sequential\n'), ((777, 789), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (787, 789), False, 'from tensorflow.keras.models import Sequential\n'), ((639, 676), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'input_shape': '(nr, nc, 1)'}), '(input_shape=(nr, nc, 1))\n', (651, 676), False, 'from tensorflow.keras.layers import UpSampling2D\n'), ((800, 863), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'input_shape': '(nc, nc, 1)', 'interpolation': '"""bilinear"""'}), "(input_shape=(nc, nc, 1), interpolation='bilinear')\n", (812, 863), False, 'from tensorflow.keras.layers import UpSampling2D\n')]
|
# ======================================================================
# copyright 2020. Triad National Security, LLC. All rights
# reserved. This program was produced under U.S. Government contract
# 89233218CNA000001 for Los Alamos National Laboratory (LANL), which
# is operated by Triad National Security, LLC for the U.S. Department
# of Energy/National Nuclear Security Administration. All rights in
# the program are reserved by Triad National Security, LLC, and the
# U.S. Department of Energy/National Nuclear Security
# Administration. The Government is granted for itself and others
# acting on its behalf a nonexclusive, paid-up, irrevocable worldwide
# license in this material to reproduce, prepare derivative works,
# distribute copies to the public, perform publicly and display
# publicly, and to permit others to do so.
# ======================================================================
# Authors: <NAME> (<EMAIL>)
# Purpose:
# Provides a check of whether a coordinate transformation of the metric
# from code coordinates to Kerr-Schild coordinates produces correct
# metric, consistent with the closed form (as in e.g. Eq.(3)
# McKinney & Gammie 2004, https://arxiv.org/abs/astro-ph/0404512)
#
# Functions:
# - print_matrix
# - check_transformation_matrices
#
from math import *
import numpy as np
def print_matrix(matrix,fmt="%19.11e",tostdout=True) -> str:
"""Pretty-prints a matrix to a string (optinally, to stdout)
Parameters
----------
matrix : numpy.array([N,M])
matrix to print
fmt : str
C-style format of each element (default: "%19.11e")
tostdout : bool
output to stdout (default: true)
Returns
-------
str
formatted output string
"""
N = matrix.shape[0]
M = matrix.shape[1]
s = "["
for i in range(N):
s+= "["
for j in range(M):
s+= (fmt % matrix[i,j])
if j < M - 1: s += ", "
s+= "]"
if i < N - 1: s += ",\n "
s+="]"
if tostdout: print(s)
return s
def check_transformation_matrices(geom, a, ir, jth,
verbose=True, tol=1e-12) -> bool:
"""Transforms the metric to spherical KS and compares with analytic formula
Test 1: covariant metric, gcov, at A = {ir, jth}
1.1 sample gcov and Lambda_h2bl_cov at A
1.2 transform gcov to gks using transofmration matrices
1.3 compare to expected values at {r,th} at A
Parameters
----------
geom : dictionary
nubhlight geom object
a : Float
dimensionless Kerr spin parameter
ir : Integer
index of sample point in radial direction
jth : Integer
index of sample point in angular theta-direction
verbose : bool
output steps to stdout
tol : Float
tolerance to relative error (wrt det g)
Returns
-------
bool
True if all checks passed
Examples
--------
import hdf5_to_dict as io
hdr = io.load_hdr("dump_00000010.h5")
geom = io.load_geom(hdr,recalc=True)
check_transformation_matrices(geom, -1, 64)
"""
# sample gcov and h2bl at point A
gcov_A = geom['gcov'][ir,jth]
h2bl_A = geom['Lambda_h2bl_cov'][ir,jth]
# sample r and theta, compute BL metric-related quantities
r = geom['r'][ir,jth,0]; r2 = r*r
a2 = a*a
th= geom['th'][ir,jth,0]
sth2= sin(th)**2
Delta= r2 - 2*r + a2
Sigma= r2 + a2*cos(th)**2
A = (r2 + a2)**2 - a2*Delta*sin(th)**2
if verbose:
print ("r = %19.11e" % r)
print ("theta = %19.11e" % th)
print ("a = %19.11e" % a)
print ("Delta = %19.11e" % Delta)
print ("Sigma = %19.11e" % Sigma)
print ("A = %19.11e" % A)
# output metric
print ("gcov_A = ")
print_matrix (gcov_A)
print ("")
# output transformation matrix
print ("h2bl_A = ")
print_matrix (h2bl_A)
print ("")
# compute BL metric at A
gks_A = np.zeros([4,4])
for i in range(4):
for j in range(4):
for k in range(4):
for l in range(4):
gks_A[i,j] = gks_A[i,j] + h2bl_A[k,i]*h2bl_A[l,j]*gcov_A[k,l]
if verbose:
print ("gks_A = ")
print_matrix (gks_A)
print("")
# expected values at {r, th}
g_tt = -1. + 2.*r/Sigma
g_rr = 1. + 2.*r/Sigma
g_ff = sth2*(Sigma + a2*g_rr*sth2)
g_thth = Sigma
g_tr = 2*r/Sigma
g_tf = -2*a*r*sth2/Sigma
g_rf = -a*g_rr*sth2
det_g = -Sigma**2*sth2
if verbose:
print ("Expected:")
print (" g_tt = %19.11e" % g_tt )
print (" g_rr = %19.11e" % g_rr )
print (" g_thth = %19.11e" % g_thth)
print (" g_ff = %19.11e" % g_ff )
print (" g_tr = %19.11e" % g_tr )
print (" g_rf = %19.11e" % g_rf )
print (" g_tf = %19.11e" % g_tf )
print ("")
# check gks_A
gks_expected = np.array(
[[ g_tt, g_tr, 0.0, g_tf],
[ g_tr, g_rr, 0.0, g_rf],
[ 0.0, 0.0, g_thth, 0.0],
[ g_tf, g_rf, 0.0, g_ff]]
)
passed = True
for i in range(4):
for j in range(4):
if abs(gks_A[i,j] - gks_expected[i,j])/abs(det_g) > tol:
passed = False
if verbose:
print (f"WARNING: Significant mismatch in gks_A[{i},{j}]:")
print (" -- expected: %19.11e" % gks_expected[i,j])
print (" -- actual: %19.11e" % gks_A[i,j])
return passed
|
[
"numpy.array",
"numpy.zeros"
] |
[((3787, 3803), 'numpy.zeros', 'np.zeros', (['[4, 4]'], {}), '([4, 4])\n', (3795, 3803), True, 'import numpy as np\n'), ((4682, 4796), 'numpy.array', 'np.array', (['[[g_tt, g_tr, 0.0, g_tf], [g_tr, g_rr, 0.0, g_rf], [0.0, 0.0, g_thth, 0.0],\n [g_tf, g_rf, 0.0, g_ff]]'], {}), '([[g_tt, g_tr, 0.0, g_tf], [g_tr, g_rr, 0.0, g_rf], [0.0, 0.0,\n g_thth, 0.0], [g_tf, g_rf, 0.0, g_ff]])\n', (4690, 4796), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""A module for plotting penguins data for modelling with scikit-learn."""
# Imports ---------------------------------------------------------------------
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Constants -------------------------------------------------------------------
SPECIES_COLORS = {
'Adelie': '#4daf4a',
'Gentoo': '#ffb000',
'Chinstrap': '#0084f7'
}
X_AXIS = [30, 60]
Y_AXIS = [12, 22]
# Set style -------------------------------------------------------------------
# Load the style from a file
plt.style.use('./style/eda.mplstyle')
# Alternatively, load the style from the library in ~/.matplotlib/stylelib
# plt.style.use(['eda'])
# Functions -------------------------------------------------------------------
def get_contour_data(model, pipeline, n_points=1000):
"""Create the data used to show the boundary of the decision function."""
x0s = np.linspace(X_AXIS[0], X_AXIS[1], n_points)
x1s = np.linspace(Y_AXIS[0], Y_AXIS[1], n_points)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
df_X = pd.DataFrame(X, columns=['bill_length_mm', 'bill_depth_mm'])
X = pipeline.transform(df_X)
y_pred = model.predict(X).reshape(x0.shape)
y_decision = model.decision_function(X).reshape(x0.shape)
return x0, x1, y_pred, y_decision
def get_target_colors(target):
"""Create a dictionary of colors to use in binary classification plots."""
return {
target : '#984ea3',
'Other': '#ff7f00'
}
# Plots -----------------------------------------------------------------------
def plot_example():
plt.style.reload_library()
plt.style.use(['eda'])
fig, ax = plt.subplots()
ax.set_title('Some random words of the title')
ax.scatter(np.random.normal(0,1,10), np.random.normal(0,1,10))
fig.savefig('plots/test.svg', format='svg')
fig.savefig('plots/test.png', format='png')
plt.close()
def plot_target_by_features(df):
"""Plot the different target species."""
fig, ax = plt.subplots()
ax.set_title(
label='Palmer penguins by species and bill characteristics',
loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=SPECIES_COLORS[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
fig.savefig('plots/target-by-features.png', format='png')
plt.close()
def plot_model(df, model, pipeline, f_score, target, title, filename):
"""Plot the results of a binary classification model."""
fig, ax = plt.subplots()
ax.set_title(title, loc='center')
ax.get_xaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_xlim(X_AXIS[0], X_AXIS[1])
ax.set_xlabel('Bill length (mm)')
ax.get_yaxis().set_major_formatter(
mpl.ticker.FormatStrFormatter('%.0f'))
ax.set_ylim(Y_AXIS[0], Y_AXIS[1])
ax.set_ylabel('Bill depth (mm)')
# Plot the boundary of the decision function
x0, x1, y_pred, y_decision = get_contour_data(model, pipeline)
ax.contourf(x0, x1, y_pred, cmap=plt.cm.PuOr, alpha=0.2)
# This plots the decision score, if needed
# ax.contourf(x0, x1, y_decision, cmap=plt.cm.PuOr, alpha=0.1)
df = df.copy()
df['species'] = df['target'].apply(lambda t: target if t == 1 else 'Other')
colors = get_target_colors(target)
grouped = df.groupby('species')
for key, group in grouped:
ax.scatter(
group['bill_length_mm'],
group['bill_depth_mm'],
c=colors[key],
s=40,
label=key,
alpha=0.55)
ax.legend(loc='lower left', handletextpad=0.2)
bbox_style = {
'boxstyle': 'round',
'facecolor': '#ffffff',
'edgecolor': '#d4d4d4',
'alpha': 0.8
}
ax.text(53, 12.415, '$F_1$ score: {0}'.format(f_score), bbox=bbox_style)
fig.savefig('plots/{0}.png'.format(filename), format='png')
plt.close()
|
[
"numpy.random.normal",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.style.reload_library",
"matplotlib.ticker.FormatStrFormatter",
"pandas.DataFrame",
"numpy.meshgrid",
"matplotlib.pyplot.subplots"
] |
[((608, 645), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""./style/eda.mplstyle"""'], {}), "('./style/eda.mplstyle')\n", (621, 645), True, 'import matplotlib.pyplot as plt\n'), ((977, 1020), 'numpy.linspace', 'np.linspace', (['X_AXIS[0]', 'X_AXIS[1]', 'n_points'], {}), '(X_AXIS[0], X_AXIS[1], n_points)\n', (988, 1020), True, 'import numpy as np\n'), ((1031, 1074), 'numpy.linspace', 'np.linspace', (['Y_AXIS[0]', 'Y_AXIS[1]', 'n_points'], {}), '(Y_AXIS[0], Y_AXIS[1], n_points)\n', (1042, 1074), True, 'import numpy as np\n'), ((1088, 1109), 'numpy.meshgrid', 'np.meshgrid', (['x0s', 'x1s'], {}), '(x0s, x1s)\n', (1099, 1109), True, 'import numpy as np\n'), ((1159, 1219), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': "['bill_length_mm', 'bill_depth_mm']"}), "(X, columns=['bill_length_mm', 'bill_depth_mm'])\n", (1171, 1219), True, 'import pandas as pd\n'), ((1700, 1726), 'matplotlib.pyplot.style.reload_library', 'plt.style.reload_library', ([], {}), '()\n', (1724, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1753), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['eda']"], {}), "(['eda'])\n", (1744, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1768, 1782), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1780, 1782), True, 'import matplotlib.pyplot as plt\n'), ((2001, 2012), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2010, 2012), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2123), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2121, 2123), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2962), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2960, 2962), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3127), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3125, 3127), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4544), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4542, 4544), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1875), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1865, 1875), True, 'import numpy as np\n'), ((1875, 1901), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1891, 1901), True, 'import numpy as np\n'), ((2284, 2321), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (2313, 2321), True, 'import matplotlib as mpl\n'), ((2448, 2485), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (2477, 2485), True, 'import matplotlib as mpl\n'), ((3216, 3253), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (3245, 3253), True, 'import matplotlib as mpl\n'), ((3380, 3417), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (3409, 3417), True, 'import matplotlib as mpl\n')]
|
from ipso_phen.ipapi.base.ipt_abstract import IptBase
from ipso_phen.ipapi.tools import regions
import numpy as np
import cv2
import logging
logger = logging.getLogger(__name__)
from ipso_phen.ipapi.base import ip_common as ipc
class IptFilterContourBySize(IptBase):
def build_params(self):
self.add_enabled_checkbox()
self.add_spin_box(
name="min_threshold",
desc="Lower bound limit",
default_value=0,
minimum=0,
maximum=100000000,
hint="Only contours bigger than lower limit bound will be kept",
)
self.add_spin_box(
name="max_threshold",
desc="Upper bound limit",
default_value=100000000,
minimum=0,
maximum=100000000,
hint="Only contours smaller than lower limit bound will be kept",
)
self.add_roi_selector()
def process_wrapper(self, **kwargs):
"""
Filter contour by size:
'Keep or descard contours according to their size
Real time: False
Keyword Arguments (in parentheses, argument name):
* Activate tool (enabled): Toggle whether or not tool is active
* Lower bound limit (min_threshold): Only contours bigger than lower limit bound will be kept
* Upper bound limit (max_threshold): Only contours smaller than lower limit bound will be kept
* Name of ROI to be used (roi_names): Operation will only be applied inside of ROI
* ROI selection mode (roi_selection_mode):
"""
wrapper = self.init_wrapper(**kwargs)
if wrapper is None:
return False
res = False
try:
if self.get_value_of("enabled") == 1:
mask = self.get_mask()
if mask is None:
logger.error(f"FAIL {self.name}: mask must be initialized")
return
lt, ut = self.get_value_of("min_threshold"), self.get_value_of(
"max_threshold"
)
# Get source contours
contours = [
c
for c in ipc.get_contours(
mask=mask,
retrieve_mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE,
)
if cv2.contourArea(c, True) < 0
]
contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)
colors = ipc.build_color_steps(step_count=len(contours))
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for clr, cnt in zip(colors, contours):
cv2.drawContours(dbg_img, [cnt], 0, clr, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
wrapper.store_image(
image=dbg_img,
text="all_contours",
)
fnt = (cv2.FONT_HERSHEY_SIMPLEX, 0.6)
for cnt in contours:
area_ = cv2.contourArea(cnt)
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
if area_ > 0:
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
(255, 255, 255),
2,
)
wrapper.store_image(
image=dbg_img,
text="all_contours_with_sizes",
)
dbg_img = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
out_mask = np.zeros_like(mask)
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in contours:
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size")
# Discarded contours
size_cnts = np.dstack(
(np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))
)
for cnt in sorted(
contours, key=lambda x: cv2.contourArea(x), reverse=True
):
area_ = cv2.contourArea(cnt)
if area_ < lt:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)
elif area_ > ut:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)
else:
cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)
wrapper.store_image(image=size_cnts, text="cnts_by_size_reversed")
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
# Discarded contours borders
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_MAROON, 4)
# Kept contours
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
cv2.drawContours(out_mask, [cnt], 0, 255, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_GREEN, -1)
else:
cv2.drawContours(out_mask, [cnt], 0, 0, -1)
cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)
dbg_img = np.dstack(
(
cv2.bitwise_and(dbg_img[:, :, 0], mask),
cv2.bitwise_and(dbg_img[:, :, 1], mask),
cv2.bitwise_and(dbg_img[:, :, 2], mask),
)
)
# Discarded sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if not (lt < area_ < ut):
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_RED,
thickness=2,
)
# Kept sizes
for cnt in contours:
area_ = cv2.contourArea(cnt)
if lt < area_ < ut:
x, y, w, h = cv2.boundingRect(cnt)
x += w // 2 - 10
y += h // 2
cv2.putText(
dbg_img,
f"{area_}",
(x, y),
fnt[0],
fnt[1],
ipc.C_LIME,
thickness=2,
)
out_mask = cv2.bitwise_and(
out_mask,
mask,
)
# Apply ROIs if needed
rois = self.get_ipt_roi(
wrapper=wrapper,
roi_names=self.get_value_of("roi_names").replace(" ", "").split(","),
selection_mode=self.get_value_of("roi_selection_mode"),
)
if rois:
untouched_mask = regions.delete_rois(rois=rois, image=self.get_mask())
self.result = cv2.bitwise_or(
untouched_mask, regions.keep_rois(rois=rois, image=out_mask)
)
self.demo_image = cv2.bitwise_or(
dbg_img,
np.dstack((untouched_mask, untouched_mask, untouched_mask)),
)
else:
self.result = out_mask
self.demo_image = dbg_img
wrapper.store_image(image=self.result, text="filtered_contours")
wrapper.store_image(image=self.demo_image, text="tagged_contours")
res = True
else:
wrapper.store_image(wrapper.current_image, "current_image")
res = True
except Exception as e:
res = False
logger.exception(f"Filter contour by size FAILED, exception: {repr(e)}")
else:
pass
finally:
return res
@property
def name(self):
return "Filter contour by size"
@property
def package(self):
return "TPMP"
@property
def real_time(self):
return False
@property
def result_name(self):
return "mask"
@property
def output_kind(self):
return "mask"
@property
def use_case(self):
return [ipc.ToolFamily.MASK_CLEANUP]
@property
def description(self):
return """'Keep or descard contours according to their size"""
|
[
"logging.getLogger",
"numpy.dstack",
"ipso_phen.ipapi.tools.regions.keep_rois",
"cv2.drawContours",
"cv2.bitwise_and",
"cv2.contourArea",
"cv2.putText",
"ipso_phen.ipapi.base.ip_common.get_contours",
"numpy.zeros_like",
"cv2.boundingRect"
] |
[((159, 186), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (176, 186), False, 'import logging\n'), ((4349, 4368), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (4362, 4368), True, 'import numpy as np\n'), ((8412, 8443), 'cv2.bitwise_and', 'cv2.bitwise_and', (['out_mask', 'mask'], {}), '(out_mask, mask)\n', (8427, 8443), False, 'import cv2\n'), ((2910, 2954), 'cv2.drawContours', 'cv2.drawContours', (['dbg_img', '[cnt]', '(0)', 'clr', '(-1)'], {}), '(dbg_img, [cnt], 0, clr, -1)\n', (2926, 2954), False, 'import cv2\n'), ((3515, 3535), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3530, 3535), False, 'import cv2\n'), ((3570, 3591), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (3586, 3591), False, 'import cv2\n'), ((4620, 4640), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4635, 4640), False, 'import cv2\n'), ((5401, 5421), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (5416, 5421), False, 'import cv2\n'), ((5913, 5933), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (5928, 5933), False, 'import cv2\n'), ((6170, 6190), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (6185, 6190), False, 'import cv2\n'), ((6416, 6436), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (6431, 6436), False, 'import cv2\n'), ((7202, 7222), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (7217, 7222), False, 'import cv2\n'), ((7843, 7863), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (7858, 7863), False, 'import cv2\n'), ((2276, 2369), 'ipso_phen.ipapi.base.ip_common.get_contours', 'ipc.get_contours', ([], {'mask': 'mask', 'retrieve_mode': 'cv2.RETR_LIST', 'method': 'cv2.CHAIN_APPROX_SIMPLE'}), '(mask=mask, retrieve_mode=cv2.RETR_LIST, method=cv2.\n CHAIN_APPROX_SIMPLE)\n', (2292, 2369), True, 'from ipso_phen.ipapi.base import ip_common as ipc\n'), ((2751, 2770), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (2764, 2770), True, 'import numpy as np\n'), ((2772, 2791), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (2785, 2791), True, 'import numpy as np\n'), ((2793, 2812), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (2806, 2812), True, 'import numpy as np\n'), ((3041, 3080), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dbg_img[:, :, 0]', 'mask'], {}), '(dbg_img[:, :, 0], mask)\n', (3056, 3080), False, 'import cv2\n'), ((3107, 3146), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dbg_img[:, :, 1]', 'mask'], {}), '(dbg_img[:, :, 1], mask)\n', (3122, 3146), False, 'import cv2\n'), ((3173, 3212), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dbg_img[:, :, 2]', 'mask'], {}), '(dbg_img[:, :, 2], mask)\n', (3188, 3212), False, 'import cv2\n'), ((3723, 3799), 'cv2.putText', 'cv2.putText', (['dbg_img', 'f"""{area_}"""', '(x, y)', 'fnt[0]', 'fnt[1]', '(255, 255, 255)', '(2)'], {}), "(dbg_img, f'{area_}', (x, y), fnt[0], fnt[1], (255, 255, 255), 2)\n", (3734, 3799), False, 'import cv2\n'), ((4239, 4258), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (4252, 4258), True, 'import numpy as np\n'), ((4260, 4279), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (4273, 4279), True, 'import numpy as np\n'), ((4281, 4300), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (4294, 4300), True, 'import numpy as np\n'), ((4471, 4490), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (4484, 4490), True, 'import numpy as np\n'), ((4492, 4511), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (4505, 4511), True, 'import numpy as np\n'), ((4513, 4532), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (4526, 4532), True, 'import numpy as np\n'), ((4702, 4754), 'cv2.drawContours', 'cv2.drawContours', (['size_cnts', '[cnt]', '(0)', 'ipc.C_RED', '(-1)'], {}), '(size_cnts, [cnt], 0, ipc.C_RED, -1)\n', (4718, 4754), False, 'import cv2\n'), ((5156, 5175), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (5169, 5175), True, 'import numpy as np\n'), ((5177, 5196), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (5190, 5196), True, 'import numpy as np\n'), ((5198, 5217), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (5211, 5217), True, 'import numpy as np\n'), ((5483, 5535), 'cv2.drawContours', 'cv2.drawContours', (['size_cnts', '[cnt]', '(0)', 'ipc.C_RED', '(-1)'], {}), '(size_cnts, [cnt], 0, ipc.C_RED, -1)\n', (5499, 5535), False, 'import cv2\n'), ((6006, 6056), 'cv2.drawContours', 'cv2.drawContours', (['dbg_img', '[cnt]', '(0)', 'ipc.C_RED', '(-1)'], {}), '(dbg_img, [cnt], 0, ipc.C_RED, -1)\n', (6022, 6056), False, 'import cv2\n'), ((6263, 6315), 'cv2.drawContours', 'cv2.drawContours', (['dbg_img', '[cnt]', '(0)', 'ipc.C_MAROON', '(4)'], {}), '(dbg_img, [cnt], 0, ipc.C_MAROON, 4)\n', (6279, 6315), False, 'import cv2\n'), ((6503, 6548), 'cv2.drawContours', 'cv2.drawContours', (['out_mask', '[cnt]', '(0)', '(255)', '(-1)'], {}), '(out_mask, [cnt], 0, 255, -1)\n', (6519, 6548), False, 'import cv2\n'), ((6574, 6626), 'cv2.drawContours', 'cv2.drawContours', (['dbg_img', '[cnt]', '(0)', 'ipc.C_GREEN', '(-1)'], {}), '(dbg_img, [cnt], 0, ipc.C_GREEN, -1)\n', (6590, 6626), False, 'import cv2\n'), ((6679, 6722), 'cv2.drawContours', 'cv2.drawContours', (['out_mask', '[cnt]', '(0)', '(0)', '(-1)'], {}), '(out_mask, [cnt], 0, 0, -1)\n', (6695, 6722), False, 'import cv2\n'), ((6748, 6798), 'cv2.drawContours', 'cv2.drawContours', (['dbg_img', '[cnt]', '(0)', 'ipc.C_RED', '(-1)'], {}), '(dbg_img, [cnt], 0, ipc.C_RED, -1)\n', (6764, 6798), False, 'import cv2\n'), ((6885, 6924), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dbg_img[:, :, 0]', 'mask'], {}), '(dbg_img[:, :, 0], mask)\n', (6900, 6924), False, 'import cv2\n'), ((6951, 6990), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dbg_img[:, :, 1]', 'mask'], {}), '(dbg_img[:, :, 1], mask)\n', (6966, 6990), False, 'import cv2\n'), ((7017, 7056), 'cv2.bitwise_and', 'cv2.bitwise_and', (['dbg_img[:, :, 2]', 'mask'], {}), '(dbg_img[:, :, 2], mask)\n', (7032, 7056), False, 'import cv2\n'), ((7308, 7329), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (7324, 7329), False, 'import cv2\n'), ((7434, 7519), 'cv2.putText', 'cv2.putText', (['dbg_img', 'f"""{area_}"""', '(x, y)', 'fnt[0]', 'fnt[1]', 'ipc.C_RED'], {'thickness': '(2)'}), "(dbg_img, f'{area_}', (x, y), fnt[0], fnt[1], ipc.C_RED, thickness=2\n )\n", (7445, 7519), False, 'import cv2\n'), ((7943, 7964), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (7959, 7964), False, 'import cv2\n'), ((8069, 8154), 'cv2.putText', 'cv2.putText', (['dbg_img', 'f"""{area_}"""', '(x, y)', 'fnt[0]', 'fnt[1]', 'ipc.C_LIME'], {'thickness': '(2)'}), "(dbg_img, f'{area_}', (x, y), fnt[0], fnt[1], ipc.C_LIME,\n thickness=2)\n", (8080, 8154), False, 'import cv2\n'), ((9025, 9069), 'ipso_phen.ipapi.tools.regions.keep_rois', 'regions.keep_rois', ([], {'rois': 'rois', 'image': 'out_mask'}), '(rois=rois, image=out_mask)\n', (9042, 9069), False, 'from ipso_phen.ipapi.tools import regions\n'), ((9207, 9266), 'numpy.dstack', 'np.dstack', (['(untouched_mask, untouched_mask, untouched_mask)'], {}), '((untouched_mask, untouched_mask, untouched_mask))\n', (9216, 9266), True, 'import numpy as np\n'), ((2488, 2512), 'cv2.contourArea', 'cv2.contourArea', (['c', '(True)'], {}), '(c, True)\n', (2503, 2512), False, 'import cv2\n'), ((2581, 2599), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (2596, 2599), False, 'import cv2\n'), ((4818, 4871), 'cv2.drawContours', 'cv2.drawContours', (['size_cnts', '[cnt]', '(0)', 'ipc.C_BLUE', '(-1)'], {}), '(size_cnts, [cnt], 0, ipc.C_BLUE, -1)\n', (4834, 4871), False, 'import cv2\n'), ((4924, 4978), 'cv2.drawContours', 'cv2.drawContours', (['size_cnts', '[cnt]', '(0)', 'ipc.C_WHITE', '(-1)'], {}), '(size_cnts, [cnt], 0, ipc.C_WHITE, -1)\n', (4940, 4978), False, 'import cv2\n'), ((5319, 5337), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (5334, 5337), False, 'import cv2\n'), ((5599, 5652), 'cv2.drawContours', 'cv2.drawContours', (['size_cnts', '[cnt]', '(0)', 'ipc.C_BLUE', '(-1)'], {}), '(size_cnts, [cnt], 0, ipc.C_BLUE, -1)\n', (5615, 5652), False, 'import cv2\n'), ((5705, 5759), 'cv2.drawContours', 'cv2.drawContours', (['size_cnts', '[cnt]', '(0)', 'ipc.C_WHITE', '(-1)'], {}), '(size_cnts, [cnt], 0, ipc.C_WHITE, -1)\n', (5721, 5759), False, 'import cv2\n')]
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains functions for generating Qobj representation of a variety
of commonly occuring quantum operators.
"""
__all__ = ['jmat', 'spin_Jx', 'spin_Jy', 'spin_Jz', 'spin_Jm', 'spin_Jp',
'spin_J_set', 'sigmap', 'sigmam', 'sigmax', 'sigmay', 'sigmaz',
'destroy', 'create', 'qeye', 'identity', 'position', 'momentum',
'num', 'squeeze', 'squeezing', 'displace', 'commutator',
'qutrit_ops', 'qdiags', 'phase', 'qzero', 'enr_destroy',
'enr_identity', 'charge', 'tunneling']
import numbers
import numpy as np
import scipy
import scipy.sparse as sp
from qutip.qobj import Qobj
from qutip.fastsparse import fast_csr_matrix, fast_identity
from qutip.dimensions import flatten
#
# Spin operators
#
def jmat(j, *args):
"""Higher-order spin operators:
Parameters
----------
j : float
Spin of operator
args : str
Which operator to return 'x','y','z','+','-'.
If no args given, then output is ['x','y','z']
Returns
-------
jmat : qobj / ndarray
``qobj`` for requested spin operator(s).
Examples
--------
>>> jmat(1) # doctest: +SKIP
[ Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0. 0.70710678 0. ]
[ 0.70710678 0. 0.70710678]
[ 0. 0.70710678 0. ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-0.70710678j 0.+0.j ]
[ 0.+0.70710678j 0.+0.j 0.-0.70710678j]
[ 0.+0.j 0.+0.70710678j 0.+0.j ]]
Quantum object: dims = [[3], [3]], \
shape = [3, 3], type = oper, isHerm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. -1.]]]
Notes
-----
If no 'args' input, then returns array of ['x','y','z'] operators.
"""
if (np.fix(2 * j) != 2 * j) or (j < 0):
raise TypeError('j must be a non-negative integer or half-integer')
if not args:
return jmat(j, 'x'), jmat(j, 'y'), jmat(j, 'z')
if args[0] == '+':
A = _jplus(j)
elif args[0] == '-':
A = _jplus(j).getH()
elif args[0] == 'x':
A = 0.5 * (_jplus(j) + _jplus(j).getH())
elif args[0] == 'y':
A = -0.5 * 1j * (_jplus(j) - _jplus(j).getH())
elif args[0] == 'z':
A = _jz(j)
else:
raise TypeError('Invalid type')
return Qobj(A)
def _jplus(j):
"""
Internal functions for generating the data representing the J-plus
operator.
"""
m = np.arange(j, -j - 1, -1, dtype=complex)
data = (np.sqrt(j * (j + 1.0) - (m + 1.0) * m))[1:]
N = m.shape[0]
ind = np.arange(1, N, dtype=np.int32)
ptr = np.array(list(range(N-1))+[N-1]*2, dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
def _jz(j):
"""
Internal functions for generating the data representing the J-z operator.
"""
N = int(2*j+1)
data = np.array([j-k for k in range(N) if (j-k)!=0], dtype=complex)
# Even shaped matrix
if (N % 2 == 0):
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
# Odd shaped matrix
else:
j = int(j)
ind = np.array(list(range(j))+list(range(j+1,N)), dtype=np.int32)
ptr = np.array(list(range(j+1))+list(range(j,N)), dtype=np.int32)
ptr[-1] = N-1
return fast_csr_matrix((data,ind,ptr), shape=(N,N))
#
# Spin j operators:
#
def spin_Jx(j):
"""Spin-j x operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'x')
def spin_Jy(j):
"""Spin-j y operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'y')
def spin_Jz(j):
"""Spin-j z operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, 'z')
def spin_Jm(j):
"""Spin-j annihilation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '-')
def spin_Jp(j):
"""Spin-j creation operator
Parameters
----------
j : float
Spin of operator
Returns
-------
op : Qobj
``qobj`` representation of the operator.
"""
return jmat(j, '+')
def spin_J_set(j):
"""Set of spin-j operators (x, y, z)
Parameters
----------
j : float
Spin of operators
Returns
-------
list : list of Qobj
list of ``qobj`` representating of the spin operator.
"""
return jmat(j)
#
# Pauli spin 1/2 operators:
#
def sigmap():
"""Creation operator for Pauli spins.
Examples
--------
>>> sigmap() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 0. 0.]]
"""
return jmat(1 / 2., '+')
def sigmam():
"""Annihilation operator for Pauli spins.
Examples
--------
>>> sigmam() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 0.]
[ 1. 0.]]
"""
return jmat(1 / 2., '-')
def sigmax():
"""Pauli spin 1/2 sigma-x operator
Examples
--------
>>> sigmax() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = False
Qobj data =
[[ 0. 1.]
[ 1. 0.]]
"""
return 2.0 * jmat(1.0 / 2, 'x')
def sigmay():
"""Pauli spin 1/2 sigma-y operator.
Examples
--------
>>> sigmay() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.-1.j]
[ 0.+1.j 0.+0.j]]
"""
return 2.0 * jmat(1.0 / 2, 'y')
def sigmaz():
"""Pauli spin 1/2 sigma-z operator.
Examples
--------
>>> sigmaz() # doctest: +SKIP
Quantum object: dims = [[2], [2]], \
shape = [2, 2], type = oper, isHerm = True
Qobj data =
[[ 1. 0.]
[ 0. -1.]]
"""
return 2.0 * jmat(1.0 / 2, 'z')
#
# DESTROY returns annihilation operator for N dimensional Hilbert space
# out = destroy(N), N is integer value & N>0
#
def destroy(N, offset=0):
'''Destruction (lowering) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Qobj for lowering operator.
Examples
--------
>>> destroy(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j]
[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
data = np.sqrt(np.arange(offset+1, N+offset, dtype=complex))
ind = np.arange(1,N, dtype=np.int32)
ptr = np.arange(N+1, dtype=np.int32)
ptr[-1] = N-1
return Qobj(fast_csr_matrix((data,ind,ptr),shape=(N,N)), isherm=False)
#
# create returns creation operator for N dimensional Hilbert space
# out = create(N), N is integer value & N>0
#
def create(N, offset=0):
'''Creation (raising) operator.
Parameters
----------
N : int
Dimension of Hilbert space.
Returns
-------
oper : qobj
Qobj for raising operator.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Examples
--------
>>> create(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 1.00000000+0.j 0.00000000+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 1.41421356+0.j 0.00000000+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.00000000+0.j 1.73205081+0.j 0.00000000+0.j]]
'''
if not isinstance(N, (int, np.integer)): # raise error if N not integer
raise ValueError("Hilbert space dimension must be integer value")
qo = destroy(N, offset=offset) # create operator using destroy function
return qo.dag()
def _implicit_tensor_dimensions(dimensions):
"""
Total flattened size and operator dimensions for operator creation routines
that automatically perform tensor products.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
First dimension of an operator which can create an implicit tensor
product. If the type is `int`, it is promoted first to `[dimensions]`.
From there, it should be one of the two-elements `dims` parameter of a
`qutip.Qobj` representing an `oper` or `super`, with possible tensor
products.
Returns
-------
size : int
Dimension of backing matrix required to represent operator.
dimensions : list
Dimension list in the form required by ``Qobj`` creation.
"""
if not isinstance(dimensions, list):
dimensions = [dimensions]
flat = flatten(dimensions)
if not all(isinstance(x, numbers.Integral) and x >= 0 for x in flat):
raise ValueError("All dimensions must be integers >= 0")
return np.prod(flat), [dimensions, dimensions]
def qzero(dimensions):
"""
Zero operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
qzero : qobj
Zero operator Qobj.
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
# A sparse matrix with no data is equal to a zero matrix.
return Qobj(fast_csr_matrix(shape=(size, size), dtype=complex),
dims=dimensions, isherm=True)
#
# QEYE returns identity operator for a Hilbert space with dimensions dims.
# a = qeye(N), N is integer or list of integers & all elements >= 0
#
def qeye(dimensions):
"""
Identity operator.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
Examples
--------
>>> qeye(3) # doctest: +SKIP
Quantum object: dims = [[3], [3]], shape = (3, 3), type = oper, \
isherm = True
Qobj data =
[[ 1. 0. 0.]
[ 0. 1. 0.]
[ 0. 0. 1.]]
>>> qeye([2,2]) # doctest: +SKIP
Quantum object: dims = [[2, 2], [2, 2]], shape = (4, 4), type = oper, \
isherm = True
Qobj data =
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
"""
size, dimensions = _implicit_tensor_dimensions(dimensions)
return Qobj(fast_identity(size),
dims=dimensions, isherm=True, isunitary=True)
def identity(dims):
"""Identity operator. Alternative name to :func:`qeye`.
Parameters
----------
dimensions : (int) or (list of int) or (list of list of int)
Dimension of Hilbert space. If provided as a list of ints, then the
dimension is the product over this list, but the ``dims`` property of
the new Qobj are set to this list. This can produce either `oper` or
`super` depending on the passed `dimensions`.
Returns
-------
oper : qobj
Identity operator Qobj.
"""
return qeye(dims)
def position(N, offset=0):
"""
Position operator x=1/sqrt(2)*(a+a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Position operator as Qobj.
"""
a = destroy(N, offset=offset)
return 1.0 / np.sqrt(2.0) * (a + a.dag())
def momentum(N, offset=0):
"""
Momentum operator p=-1j/sqrt(2)*(a-a.dag())
Parameters
----------
N : int
Number of Fock states in Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Momentum operator as Qobj.
"""
a = destroy(N, offset=offset)
return -1j / np.sqrt(2.0) * (a - a.dag())
def num(N, offset=0):
"""Quantum object for number operator.
Parameters
----------
N : int
The dimension of the Hilbert space.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper: qobj
Qobj for number operator.
Examples
--------
>>> num(4) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[0 0 0 0]
[0 1 0 0]
[0 0 2 0]
[0 0 0 3]]
"""
if offset == 0:
data = np.arange(1,N, dtype=complex)
ind = np.arange(1,N, dtype=np.int32)
ptr = np.array([0]+list(range(0,N)), dtype=np.int32)
ptr[-1] = N-1
else:
data = np.arange(offset, offset + N, dtype=complex)
ind = np.arange(N, dtype=np.int32)
ptr = np.arange(N+1,dtype=np.int32)
ptr[-1] = N
return Qobj(fast_csr_matrix((data,ind,ptr), shape=(N,N)), isherm=True)
def squeeze(N, z, offset=0):
"""Single-mode Squeezing operator.
Parameters
----------
N : int
Dimension of hilbert space.
z : float/complex
Squeezing parameter.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
Examples
--------
>>> squeeze(4, 0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.98441565+0.j 0.00000000+0.j 0.17585742+0.j 0.00000000+0.j]
[ 0.00000000+0.j 0.95349007+0.j 0.00000000+0.j 0.30142443+0.j]
[-0.17585742+0.j 0.00000000+0.j 0.98441565+0.j 0.00000000+0.j]
[ 0.00000000+0.j -0.30142443+0.j 0.00000000+0.j 0.95349007+0.j]]
"""
a = destroy(N, offset=offset)
op = (1 / 2.0) * np.conj(z) * (a ** 2) - (1 / 2.0) * z * (a.dag()) ** 2
return op.expm()
def squeezing(a1, a2, z):
"""Generalized squeezing operator.
.. math::
S(z) = \\exp\\left(\\frac{1}{2}\\left(z^*a_1a_2
- za_1^\\dagger a_2^\\dagger\\right)\\right)
Parameters
----------
a1 : :class:`qutip.qobj.Qobj`
Operator 1.
a2 : :class:`qutip.qobj.Qobj`
Operator 2.
z : float/complex
Squeezing parameter.
Returns
-------
oper : :class:`qutip.qobj.Qobj`
Squeezing operator.
"""
b = 0.5 * (np.conj(z) * (a1 * a2) - z * (a1.dag() * a2.dag()))
return b.expm()
def displace(N, alpha, offset=0):
"""Single-mode displacement operator.
Parameters
----------
N : int
Dimension of Hilbert space.
alpha : float/complex
Displacement amplitude.
offset : int (default 0)
The lowest number state that is included in the finite number state
representation of the operator.
Returns
-------
oper : qobj
Displacement operator.
Examples
---------
>>> displace(4,0.25) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isHerm = False
Qobj data =
[[ 0.96923323+0.j -0.24230859+0.j 0.04282883+0.j -0.00626025+0.j]
[ 0.24230859+0.j 0.90866411+0.j -0.33183303+0.j 0.07418172+0.j]
[ 0.04282883+0.j 0.33183303+0.j 0.84809499+0.j -0.41083747+0.j]
[ 0.00626025+0.j 0.07418172+0.j 0.41083747+0.j 0.90866411+0.j]]
"""
a = destroy(N, offset=offset)
D = (alpha * a.dag() - np.conj(alpha) * a).expm()
return D
def commutator(A, B, kind="normal"):
"""
Return the commutator of kind `kind` (normal, anti) of the
two operators A and B.
"""
if kind == 'normal':
return A * B - B * A
elif kind == 'anti':
return A * B + B * A
else:
raise TypeError("Unknown commutator kind '%s'" % kind)
def qutrit_ops():
"""
Operators for a three level system (qutrit).
Returns
-------
opers: array
`array` of qutrit operators.
"""
from qutip.states import qutrit_basis
one, two, three = qutrit_basis()
sig11 = one * one.dag()
sig22 = two * two.dag()
sig33 = three * three.dag()
sig12 = one * two.dag()
sig23 = two * three.dag()
sig31 = three * one.dag()
return np.array([sig11, sig22, sig33, sig12, sig23, sig31],
dtype=object)
def qdiags(diagonals, offsets, dims=None, shape=None):
"""
Constructs an operator from an array of diagonals.
Parameters
----------
diagonals : sequence of array_like
Array of elements to place along the selected diagonals.
offsets : sequence of ints
Sequence for diagonals to be set:
- k=0 main diagonal
- k>0 kth upper diagonal
- k<0 kth lower diagonal
dims : list, optional
Dimensions for operator
shape : list, tuple, optional
Shape of operator. If omitted, a square operator large enough
to contain the diagonals is generated.
See Also
--------
scipy.sparse.diags : for usage information.
Notes
-----
This function requires SciPy 0.11+.
Examples
--------
>>> qdiags(sqrt(range(1, 4)), 1) # doctest: +SKIP
Quantum object: dims = [[4], [4]], \
shape = [4, 4], type = oper, isherm = False
Qobj data =
[[ 0. 1. 0. 0. ]
[ 0. 0. 1.41421356 0. ]
[ 0. 0. 0. 1.73205081]
[ 0. 0. 0. 0. ]]
"""
data = sp.diags(diagonals, offsets, shape, format='csr', dtype=complex)
if not dims:
dims = [[], []]
if not shape:
shape = []
return Qobj(data, dims, list(shape))
def phase(N, phi0=0):
"""
Single-mode Pegg-Barnett phase operator.
Parameters
----------
N : int
Number of basis states in Hilbert space.
phi0 : float
Reference phase.
Returns
-------
oper : qobj
Phase operator with respect to reference phase.
Notes
-----
The Pegg-Barnett phase operator is Hermitian on a truncated Hilbert space.
"""
phim = phi0 + (2.0 * np.pi * np.arange(N)) / N # discrete phase angles
n = np.arange(N).reshape((N, 1))
states = np.array([np.sqrt(kk) / np.sqrt(N) * np.exp(1.0j * n * kk)
for kk in phim])
ops = np.array([np.outer(st, st.conj()) for st in states])
return Qobj(np.sum(ops, axis=0))
def enr_destroy(dims, excitations):
"""
Generate annilation operators for modes in a excitation-number-restricted
state space. For example, consider a system consisting of 4 modes, each
with 5 states. The total hilbert space size is 5**4 = 625. If we are
only interested in states that contain up to 2 excitations, we only need
to include states such as
(0, 0, 0, 0)
(0, 0, 0, 1)
(0, 0, 0, 2)
(0, 0, 1, 0)
(0, 0, 1, 1)
(0, 0, 2, 0)
...
This function creates annihilation operators for the 4 modes that act
within this state space:
a1, a2, a3, a4 = enr_destroy([5, 5, 5, 5], excitations=2)
From this point onwards, the annihiltion operators a1, ..., a4 can be
used to setup a Hamiltonian, collapse operators and expectation-value
operators, etc., following the usual pattern.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
Returns
-------
a_ops : list of qobj
A list of annihilation operators for each mode in the composite
quantum system described by dims.
"""
from qutip.states import enr_state_dictionaries
nstates, state2idx, idx2state = enr_state_dictionaries(dims, excitations)
a_ops = [sp.lil_matrix((nstates, nstates), dtype=np.complex)
for _ in range(len(dims))]
for n1, state1 in idx2state.items():
for n2, state2 in idx2state.items():
for idx, a in enumerate(a_ops):
s1 = [s for idx2, s in enumerate(state1) if idx != idx2]
s2 = [s for idx2, s in enumerate(state2) if idx != idx2]
if (state1[idx] == state2[idx] - 1) and (s1 == s2):
a_ops[idx][n1, n2] = np.sqrt(state2[idx])
return [Qobj(a, dims=[dims, dims]) for a in a_ops]
def enr_identity(dims, excitations):
"""
Generate the identity operator for the excitation-number restricted
state space defined by the `dims` and `exciations` arguments. See the
docstring for enr_fock for a more detailed description of these arguments.
Parameters
----------
dims : list
A list of the dimensions of each subsystem of a composite quantum
system.
excitations : integer
The maximum number of excitations that are to be included in the
state space.
state : list of integers
The state in the number basis representation.
Returns
-------
op : Qobj
A Qobj instance that represent the identity operator in the
exication-number-restricted state space defined by `dims` and
`exciations`.
"""
from qutip.states import enr_state_dictionaries
nstates, _, _ = enr_state_dictionaries(dims, excitations)
data = sp.eye(nstates, nstates, dtype=np.complex)
return Qobj(data, dims=[dims, dims])
def charge(Nmax, Nmin=None, frac = 1):
"""
Generate the diagonal charge operator over charge states
from Nmin to Nmax.
Parameters
----------
Nmax : int
Maximum charge state to consider.
Nmin : int (default = -Nmax)
Lowest charge state to consider.
frac : float (default = 1)
Specify fractional charge if needed.
Returns
-------
C : Qobj
Charge operator over [Nmin,Nmax].
Notes
-----
.. versionadded:: 3.2
"""
if Nmin is None:
Nmin = -Nmax
diag = np.arange(Nmin, Nmax+1, dtype=float)
if frac != 1:
diag *= frac
C = sp.diags(diag, 0, format='csr', dtype=complex)
return Qobj(C, isherm=True)
def tunneling(N, m=1):
"""
Tunneling operator with elements of the form
:math:`\\sum |N><N+m| + |N+m><N|`.
Parameters
----------
N : int
Number of basis states in Hilbert space.
m : int (default = 1)
Number of excitations in tunneling event.
Returns
-------
T : Qobj
Tunneling operator.
Notes
-----
.. versionadded:: 3.2
"""
diags = [np.ones(N-m,dtype=int),np.ones(N-m,dtype=int)]
T = sp.diags(diags,[m,-m],format='csr', dtype=complex)
return Qobj(T, isherm=True)
# Break circular dependencies by a trailing import.
# Note that we use a relative import here to deal with that
# qutip.tensor is the *function* tensor, not the module.
from qutip.tensor import tensor
|
[
"numpy.prod",
"scipy.sparse.lil_matrix",
"numpy.sqrt",
"numpy.ones",
"qutip.states.enr_state_dictionaries",
"qutip.dimensions.flatten",
"qutip.qobj.Qobj",
"scipy.sparse.eye",
"numpy.fix",
"numpy.conj",
"qutip.fastsparse.fast_csr_matrix",
"qutip.states.qutrit_basis",
"qutip.fastsparse.fast_identity",
"numpy.exp",
"numpy.array",
"numpy.sum",
"scipy.sparse.diags",
"numpy.arange"
] |
[((4281, 4288), 'qutip.qobj.Qobj', 'Qobj', (['A'], {}), '(A)\n', (4285, 4288), False, 'from qutip.qobj import Qobj\n'), ((4415, 4454), 'numpy.arange', 'np.arange', (['j', '(-j - 1)', '(-1)'], {'dtype': 'complex'}), '(j, -j - 1, -1, dtype=complex)\n', (4424, 4454), True, 'import numpy as np\n'), ((4540, 4571), 'numpy.arange', 'np.arange', (['(1)', 'N'], {'dtype': 'np.int32'}), '(1, N, dtype=np.int32)\n', (4549, 4571), True, 'import numpy as np\n'), ((4662, 4709), 'qutip.fastsparse.fast_csr_matrix', 'fast_csr_matrix', (['(data, ind, ptr)'], {'shape': '(N, N)'}), '((data, ind, ptr), shape=(N, N))\n', (4677, 4709), False, 'from qutip.fastsparse import fast_csr_matrix, fast_identity\n'), ((5293, 5340), 'qutip.fastsparse.fast_csr_matrix', 'fast_csr_matrix', (['(data, ind, ptr)'], {'shape': '(N, N)'}), '((data, ind, ptr), shape=(N, N))\n', (5308, 5340), False, 'from qutip.fastsparse import fast_csr_matrix, fast_identity\n'), ((9507, 9538), 'numpy.arange', 'np.arange', (['(1)', 'N'], {'dtype': 'np.int32'}), '(1, N, dtype=np.int32)\n', (9516, 9538), True, 'import numpy as np\n'), ((9548, 9580), 'numpy.arange', 'np.arange', (['(N + 1)'], {'dtype': 'np.int32'}), '(N + 1, dtype=np.int32)\n', (9557, 9580), True, 'import numpy as np\n'), ((11755, 11774), 'qutip.dimensions.flatten', 'flatten', (['dimensions'], {}), '(dimensions)\n', (11762, 11774), False, 'from qutip.dimensions import flatten\n'), ((19729, 19743), 'qutip.states.qutrit_basis', 'qutrit_basis', ([], {}), '()\n', (19741, 19743), False, 'from qutip.states import qutrit_basis\n'), ((19931, 19997), 'numpy.array', 'np.array', (['[sig11, sig22, sig33, sig12, sig23, sig31]'], {'dtype': 'object'}), '([sig11, sig22, sig33, sig12, sig23, sig31], dtype=object)\n', (19939, 19997), True, 'import numpy as np\n'), ((21223, 21287), 'scipy.sparse.diags', 'sp.diags', (['diagonals', 'offsets', 'shape'], {'format': '"""csr"""', 'dtype': 'complex'}), "(diagonals, offsets, shape, format='csr', dtype=complex)\n", (21231, 21287), True, 'import scipy.sparse as sp\n'), ((23557, 23598), 'qutip.states.enr_state_dictionaries', 'enr_state_dictionaries', (['dims', 'excitations'], {}), '(dims, excitations)\n', (23579, 23598), False, 'from qutip.states import enr_state_dictionaries\n'), ((25062, 25103), 'qutip.states.enr_state_dictionaries', 'enr_state_dictionaries', (['dims', 'excitations'], {}), '(dims, excitations)\n', (25084, 25103), False, 'from qutip.states import enr_state_dictionaries\n'), ((25115, 25157), 'scipy.sparse.eye', 'sp.eye', (['nstates', 'nstates'], {'dtype': 'np.complex'}), '(nstates, nstates, dtype=np.complex)\n', (25121, 25157), True, 'import scipy.sparse as sp\n'), ((25169, 25198), 'qutip.qobj.Qobj', 'Qobj', (['data'], {'dims': '[dims, dims]'}), '(data, dims=[dims, dims])\n', (25173, 25198), False, 'from qutip.qobj import Qobj\n'), ((25762, 25800), 'numpy.arange', 'np.arange', (['Nmin', '(Nmax + 1)'], {'dtype': 'float'}), '(Nmin, Nmax + 1, dtype=float)\n', (25771, 25800), True, 'import numpy as np\n'), ((25846, 25892), 'scipy.sparse.diags', 'sp.diags', (['diag', '(0)'], {'format': '"""csr"""', 'dtype': 'complex'}), "(diag, 0, format='csr', dtype=complex)\n", (25854, 25892), True, 'import scipy.sparse as sp\n'), ((25904, 25924), 'qutip.qobj.Qobj', 'Qobj', (['C'], {'isherm': '(True)'}), '(C, isherm=True)\n', (25908, 25924), False, 'from qutip.qobj import Qobj\n'), ((26405, 26458), 'scipy.sparse.diags', 'sp.diags', (['diags', '[m, -m]'], {'format': '"""csr"""', 'dtype': 'complex'}), "(diags, [m, -m], format='csr', dtype=complex)\n", (26413, 26458), True, 'import scipy.sparse as sp\n'), ((26467, 26487), 'qutip.qobj.Qobj', 'Qobj', (['T'], {'isherm': '(True)'}), '(T, isherm=True)\n', (26471, 26487), False, 'from qutip.qobj import Qobj\n'), ((4467, 4505), 'numpy.sqrt', 'np.sqrt', (['(j * (j + 1.0) - (m + 1.0) * m)'], {}), '(j * (j + 1.0) - (m + 1.0) * m)\n', (4474, 4505), True, 'import numpy as np\n'), ((4966, 4994), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (4975, 4994), True, 'import numpy as np\n'), ((5009, 5041), 'numpy.arange', 'np.arange', (['(N + 1)'], {'dtype': 'np.int32'}), '(N + 1, dtype=np.int32)\n', (5018, 5041), True, 'import numpy as np\n'), ((9451, 9499), 'numpy.arange', 'np.arange', (['(offset + 1)', '(N + offset)'], {'dtype': 'complex'}), '(offset + 1, N + offset, dtype=complex)\n', (9460, 9499), True, 'import numpy as np\n'), ((9613, 9660), 'qutip.fastsparse.fast_csr_matrix', 'fast_csr_matrix', (['(data, ind, ptr)'], {'shape': '(N, N)'}), '((data, ind, ptr), shape=(N, N))\n', (9628, 9660), False, 'from qutip.fastsparse import fast_csr_matrix, fast_identity\n'), ((11925, 11938), 'numpy.prod', 'np.prod', (['flat'], {}), '(flat)\n', (11932, 11938), True, 'import numpy as np\n'), ((12619, 12669), 'qutip.fastsparse.fast_csr_matrix', 'fast_csr_matrix', ([], {'shape': '(size, size)', 'dtype': 'complex'}), '(shape=(size, size), dtype=complex)\n', (12634, 12669), False, 'from qutip.fastsparse import fast_csr_matrix, fast_identity\n'), ((13900, 13919), 'qutip.fastsparse.fast_identity', 'fast_identity', (['size'], {}), '(size)\n', (13913, 13919), False, 'from qutip.fastsparse import fast_csr_matrix, fast_identity\n'), ((16158, 16188), 'numpy.arange', 'np.arange', (['(1)', 'N'], {'dtype': 'complex'}), '(1, N, dtype=complex)\n', (16167, 16188), True, 'import numpy as np\n'), ((16202, 16233), 'numpy.arange', 'np.arange', (['(1)', 'N'], {'dtype': 'np.int32'}), '(1, N, dtype=np.int32)\n', (16211, 16233), True, 'import numpy as np\n'), ((16341, 16385), 'numpy.arange', 'np.arange', (['offset', '(offset + N)'], {'dtype': 'complex'}), '(offset, offset + N, dtype=complex)\n', (16350, 16385), True, 'import numpy as np\n'), ((16400, 16428), 'numpy.arange', 'np.arange', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (16409, 16428), True, 'import numpy as np\n'), ((16443, 16475), 'numpy.arange', 'np.arange', (['(N + 1)'], {'dtype': 'np.int32'}), '(N + 1, dtype=np.int32)\n', (16452, 16475), True, 'import numpy as np\n'), ((16510, 16557), 'qutip.fastsparse.fast_csr_matrix', 'fast_csr_matrix', (['(data, ind, ptr)'], {'shape': '(N, N)'}), '((data, ind, ptr), shape=(N, N))\n', (16525, 16557), False, 'from qutip.fastsparse import fast_csr_matrix, fast_identity\n'), ((22128, 22147), 'numpy.sum', 'np.sum', (['ops'], {'axis': '(0)'}), '(ops, axis=0)\n', (22134, 22147), True, 'import numpy as np\n'), ((23613, 23664), 'scipy.sparse.lil_matrix', 'sp.lil_matrix', (['(nstates, nstates)'], {'dtype': 'np.complex'}), '((nstates, nstates), dtype=np.complex)\n', (23626, 23664), True, 'import scipy.sparse as sp\n'), ((24125, 24151), 'qutip.qobj.Qobj', 'Qobj', (['a'], {'dims': '[dims, dims]'}), '(a, dims=[dims, dims])\n', (24129, 24151), False, 'from qutip.qobj import Qobj\n'), ((26350, 26375), 'numpy.ones', 'np.ones', (['(N - m)'], {'dtype': 'int'}), '(N - m, dtype=int)\n', (26357, 26375), True, 'import numpy as np\n'), ((26373, 26398), 'numpy.ones', 'np.ones', (['(N - m)'], {'dtype': 'int'}), '(N - m, dtype=int)\n', (26380, 26398), True, 'import numpy as np\n'), ((3735, 3748), 'numpy.fix', 'np.fix', (['(2 * j)'], {}), '(2 * j)\n', (3741, 3748), True, 'import numpy as np\n'), ((15005, 15017), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (15012, 15017), True, 'import numpy as np\n'), ((15491, 15503), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (15498, 15503), True, 'import numpy as np\n'), ((21908, 21920), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (21917, 21920), True, 'import numpy as np\n'), ((17526, 17536), 'numpy.conj', 'np.conj', (['z'], {}), '(z)\n', (17533, 17536), True, 'import numpy as np\n'), ((18099, 18109), 'numpy.conj', 'np.conj', (['z'], {}), '(z)\n', (18106, 18109), True, 'import numpy as np\n'), ((21857, 21869), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (21866, 21869), True, 'import numpy as np\n'), ((21987, 22008), 'numpy.exp', 'np.exp', (['(1.0j * n * kk)'], {}), '(1.0j * n * kk)\n', (21993, 22008), True, 'import numpy as np\n'), ((19131, 19145), 'numpy.conj', 'np.conj', (['alpha'], {}), '(alpha)\n', (19138, 19145), True, 'import numpy as np\n'), ((21960, 21971), 'numpy.sqrt', 'np.sqrt', (['kk'], {}), '(kk)\n', (21967, 21971), True, 'import numpy as np\n'), ((21974, 21984), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (21981, 21984), True, 'import numpy as np\n'), ((24091, 24111), 'numpy.sqrt', 'np.sqrt', (['state2[idx]'], {}), '(state2[idx])\n', (24098, 24111), True, 'import numpy as np\n')]
|
from abc import ABCMeta, abstractmethod
import random
import json
import pickle
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import load_model
nltk.download('punkt', quiet=True)
nltk.download('wordnet', quiet=True)
class IAssistant(metaclass=ABCMeta):
@abstractmethod
def train_model(self):
""" Implemented in child class """
@abstractmethod
def request_tag(self, message):
""" Implemented in child class """
@abstractmethod
def get_tag_by_id(self, id):
""" Implemented in child class """
@abstractmethod
def request_method(self, message):
""" Implemented in child class """
@abstractmethod
def request(self, message):
""" Implemented in child class """
class GenericAssistant(IAssistant):
def __init__(self, intents, intent_methods={}, model_name="assistant_model", *, json_encoding='utf-8'):
self.intents = intents
self.intent_methods = intent_methods
self.model_name = model_name
self.json_encoding = json_encoding
if intents.endswith(".json"):
self.load_json_intents(intents)
self.lemmatizer = WordNetLemmatizer()
def load_json_intents(self, intents):
with open(intents, encoding=self.json_encoding) as f:
self.intents = json.load(f)
def train_model(self):
self.words = []
self.classes = []
documents = []
ignore_letters = ['!', '?', ',', '.']
for intent in self.intents['intents']:
for pattern in intent['patterns']:
word = nltk.word_tokenize(pattern)
self.words.extend(word)
documents.append((word, intent['tag']))
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]
self.words = sorted(list(set(self.words)))
self.classes = sorted(list(set(self.classes)))
training = []
output_empty = [0] * len(self.classes)
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in self.words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
self.model = Sequential()
self.model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
self.hist = self.model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
def save_model(self, model_name=None):
if model_name is None:
self.model.save(f"{self.model_name}.h5", self.hist)
with open(f'{self.model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{self.model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
else:
self.model.save(f"{model_name}.h5", self.hist)
with open(f'{model_name}_words.pkl', 'wb') as f:
pickle.dump(self.words, f)
with open(f'{model_name}_classes.pkl', 'wb') as f:
pickle.dump(self.classes, f)
def load_model(self, model_name=None):
if model_name is None:
with open(f'{self.model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{self.model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{self.model_name}.h5')
else:
with open(f'{model_name}_words.pkl', 'rb') as f:
self.words = pickle.load(f)
with open(f'{model_name}_classes.pkl', 'rb') as f:
self.classes = pickle.load(f)
self.model = load_model(f'{model_name}.h5')
def _clean_up_sentence(self, sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def _bag_of_words(self, sentence, words):
sentence_words = self._clean_up_sentence(sentence)
bag = [0] * len(words)
for s in sentence_words:
for i, word in enumerate(words):
if word == s:
bag[i] = 1
return np.array(bag)
def _predict_class(self, sentence):
p = self._bag_of_words(sentence, self.words)
res = self.model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.1
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': self.classes[r[0]], 'probability': str(r[1])})
return return_list
def _get_response(self, ints, intents_json):
try:
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if i['tag'] == tag:
result = random.choice(i['responses'])
break
except IndexError:
result = "I don't understand!"
return result
def request_tag(self, message):
pass
def get_tag_by_id(self, id):
pass
def request_method(self, message):
pass
def request(self, message):
ints = self._predict_class(message)
if ints[0]['intent'] in self.intent_methods.keys():
self.intent_methods[ints[0]['intent']]()
else:
return self._get_response(ints, self.intents)
|
[
"random.choice",
"pickle.dump",
"random.shuffle",
"nltk.download",
"nltk.word_tokenize",
"tensorflow.keras.layers.Dropout",
"nltk.stem.WordNetLemmatizer",
"pickle.load",
"tensorflow.keras.optimizers.SGD",
"numpy.array",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"json.load",
"tensorflow.keras.models.Sequential"
] |
[((396, 430), 'nltk.download', 'nltk.download', (['"""punkt"""'], {'quiet': '(True)'}), "('punkt', quiet=True)\n", (409, 430), False, 'import nltk\n'), ((431, 467), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {'quiet': '(True)'}), "('wordnet', quiet=True)\n", (444, 467), False, 'import nltk\n'), ((1406, 1425), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (1423, 1425), False, 'from nltk.stem import WordNetLemmatizer\n'), ((2806, 2830), 'random.shuffle', 'random.shuffle', (['training'], {}), '(training)\n', (2820, 2830), False, 'import random\n'), ((2850, 2868), 'numpy.array', 'np.array', (['training'], {}), '(training)\n', (2858, 2868), True, 'import numpy as np\n'), ((2970, 2982), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2980, 2982), False, 'from tensorflow.keras.models import Sequential\n'), ((3280, 3334), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (3283, 3334), False, 'from tensorflow.keras.optimizers import SGD\n'), ((4902, 4930), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sentence'], {}), '(sentence)\n', (4920, 4930), False, 'import nltk\n'), ((5346, 5359), 'numpy.array', 'np.array', (['bag'], {}), '(bag)\n', (5354, 5359), True, 'import numpy as np\n'), ((1558, 1570), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1567, 1570), False, 'import json\n'), ((3092, 3104), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3099, 3104), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((3129, 3157), 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (3134, 3157), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((3182, 3194), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3189, 3194), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((3467, 3484), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (3475, 3484), True, 'import numpy as np\n'), ((3486, 3503), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (3494, 3503), True, 'import numpy as np\n'), ((4512, 4547), 'tensorflow.keras.models.load_model', 'load_model', (['f"""{self.model_name}.h5"""'], {}), "(f'{self.model_name}.h5')\n", (4522, 4547), False, 'from tensorflow.keras.models import load_model\n'), ((4801, 4831), 'tensorflow.keras.models.load_model', 'load_model', (['f"""{model_name}.h5"""'], {}), "(f'{model_name}.h5')\n", (4811, 4831), False, 'from tensorflow.keras.models import load_model\n'), ((1837, 1864), 'nltk.word_tokenize', 'nltk.word_tokenize', (['pattern'], {}), '(pattern)\n', (1855, 1864), False, 'import nltk\n'), ((3763, 3789), 'pickle.dump', 'pickle.dump', (['self.words', 'f'], {}), '(self.words, f)\n', (3774, 3789), False, 'import pickle\n'), ((3874, 3902), 'pickle.dump', 'pickle.dump', (['self.classes', 'f'], {}), '(self.classes, f)\n', (3885, 3902), False, 'import pickle\n'), ((4053, 4079), 'pickle.dump', 'pickle.dump', (['self.words', 'f'], {}), '(self.words, f)\n', (4064, 4079), False, 'import pickle\n'), ((4159, 4187), 'pickle.dump', 'pickle.dump', (['self.classes', 'f'], {}), '(self.classes, f)\n', (4170, 4187), False, 'import pickle\n'), ((4358, 4372), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4369, 4372), False, 'import pickle\n'), ((4472, 4486), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4483, 4486), False, 'import pickle\n'), ((4652, 4666), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4663, 4666), False, 'import pickle\n'), ((4761, 4775), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4772, 4775), False, 'import pickle\n'), ((5487, 5500), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (5495, 5500), True, 'import numpy as np\n'), ((6092, 6121), 'random.choice', 'random.choice', (["i['responses']"], {}), "(i['responses'])\n", (6105, 6121), False, 'import random\n')]
|
"""
@brief Generate Fe55 images and associated darks and bias images
according to section 5.4 of the E/O document (Dec 19, 2012 version).
@author <NAME> <<EMAIL>>
"""
import os
import numpy as np
from sim_inputs import *
from sim_tools import *
def generate_Fe55_images(exptimes, nxrays, outdir, sensorid, gain=gain,
bias_level=bias_level, sys_noise=sys_noise,
dark_current=dark_current):
nexp = len(exptimes)
for i, exptime, nxray in zip(list(range(nexp)), exptimes, nxrays):
#
# Bias images
#
outfile = "Fe55_bias_%s_%02i.fits" % (sensorid, i)
bias_file = os.path.join(outdir, outfile)
bias_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=0, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
bias_segs.append(seg)
bias_output = fitsFile(bias_segs)
bias_output[0].header['GAIN'] = gain
bias_output[0].header['BIASLVL'] = bias_level
bias_output[0].header['SYSNOISE'] = sys_noise
bias_output[0].header['RDNOISE'] = read_noise
bias_output.writeto(bias_file, overwrite=True)
#
# Dark images
#
outfile = "Fe55_dark_%s_%02i.fits" % (sensorid, i)
dark_file = os.path.join(outdir, outfile)
dark_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=exptime, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
seg.add_dark_current(level=dark_current) # dark current
dark_segs.append(seg)
dark_output = fitsFile(dark_segs)
dark_output[0].header['GAIN'] = gain
dark_output[0].header['BIASLVL'] = bias_level
dark_output[0].header['SYSNOISE'] = sys_noise
dark_output[0].header['RDNOISE'] = read_noise
dark_output[0].header['DARKCURR'] = dark_current
dark_output.writeto(dark_file, overwrite=True)
#
# Fe55 exposures
#
outfile = "Fe55_exp_%s_%02i.fits" % (sensorid, i)
Fe55_file = os.path.join(outdir, outfile)
fe55_segs = []
for hdu in range(nhdu):
seg = SegmentExposure(exptime=exptime, gain=gain)
seg.add_bias(level=bias_level, sigma=sys_noise) # electronics
seg.add_bias(level=0, sigma=read_noise) # read noise
seg.add_dark_current(level=dark_current) # dark current
seg.add_Fe55_hits(nxrays=nxray)
fe55_segs.append(seg)
fe55_output = fitsFile(fe55_segs)
fe55_output[0].header['GAIN'] = gain
fe55_output[0].header['BIASLVL'] = bias_level
fe55_output[0].header['SYSNOISE'] = sys_noise
fe55_output[0].header['RDNOISE'] = read_noise
fe55_output[0].header['DARKCURR'] = dark_current
fe55_output[0].header['FE55HITS'] = nxray
fe55_output.writeto(Fe55_file, overwrite=True)
if __name__ == '__main__':
nexp = 10
exptimes = np.linspace(1, 5, nexp)
nxrays = [int(x*1000) for x in exptimes]
generate_Fe55_images(exptimes, nxrays, '.', 'xxx-xx')
|
[
"numpy.linspace",
"os.path.join"
] |
[((3210, 3233), 'numpy.linspace', 'np.linspace', (['(1)', '(5)', 'nexp'], {}), '(1, 5, nexp)\n', (3221, 3233), True, 'import numpy as np\n'), ((659, 688), 'os.path.join', 'os.path.join', (['outdir', 'outfile'], {}), '(outdir, outfile)\n', (671, 688), False, 'import os\n'), ((1406, 1435), 'os.path.join', 'os.path.join', (['outdir', 'outfile'], {}), '(outdir, outfile)\n', (1418, 1435), False, 'import os\n'), ((2293, 2322), 'os.path.join', 'os.path.join', (['outdir', 'outfile'], {}), '(outdir, outfile)\n', (2305, 2322), False, 'import os\n')]
|
import scipy.stats
import numpy as np
def f_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F-value and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
f_value : float
Sx^2 / Sy^2 as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
p_value : float
Let F be the F-distribution with nx, ny df. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise. More extreme F = Sx^2 / Sy^2 values for alternative ox^2 > oy^2 are to the right. More extreme F values for ox^2 < oy^2 are to the left.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
# compute P(F < f_value) with nx-1, ny-1 df
cdf = scipy.stats.f.cdf(f_value, nx-1, ny-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return f_value, p_value
def f1_test(sample_x, sample_y, larger_varx_alt):
"""
Computes the F1-value as defined in 'Fixing the F Test for Equal Variances' and corresponding p-value for a pair of samples and alternative hypothesis.
Parameters
----------
sample_x : list
A random sample x1,...,xnx. Let its (underlying) variance be ox^2 and its sample variance Sx^2.
sample_y : list
A random sample y1,...,yny. Let its (underlying) variance be oy^2 and its sample variance Sy^2.
larger_varx_alt : bool
True if alternative hypothesis is ox^2 > oy^2. False if ox^2 < oy^2.
Returns
-------
p_value : float
Let F be the F-distribution with rx, ry df as specified in equation (1) of 'Fixing the F Test for Equal Variances'. 1 - P(F < f_value) if larger_varx_alt = True, P(F < f_value) otherwise.
"""
# calculate unbiased sample variances (n-1 in the denominator)
sample_var_x = np.var(sample_x, ddof=1)
sample_var_y = np.var(sample_y, ddof=1)
f_value = sample_var_x/sample_var_y
nx = len(sample_x)
ny = len(sample_y)
xmean = np.mean(sample_x)
ymean = np.mean(sample_y)
# compute moment, variance below equatio (1) of Shoemaker paper
fourth_moment = (np.sum((sample_x - xmean)**4) +
np.sum((sample_y - ymean)**4))/(nx + ny)
pooled_var = ((nx-1)*sample_var_x + (ny-1)*sample_var_y)/(nx + ny)
# see equation (1) of Shoemaker paper
rx = 2*nx / ((fourth_moment/pooled_var**2) - ((nx - 3)/(nx - 1)))
ry = 2*ny / ((fourth_moment/pooled_var**2) - ((ny - 3)/(ny - 1)))
# compute P(F < f_value) with rx-1, ry-1 df
cdf = scipy.stats.f.cdf(f_value, rx-1, ry-1)
# More extreme f_value = Sx^2 / Sy^2 values for alternative ox^2 > oy^2. ox^2 being even bigger would be represented by larger quotient Sx^2 / Sy^2.
# More extreme f_value for ox^2 < oy^2 are to the left. ox^2 being even smaller would be represented by smaller quotient.
p_value = 1 - cdf if larger_varx_alt else cdf
return p_value
def count_five(sample_x, sample_y, center):
"""
Computes the extreme counts for samples x and y as defined in 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
Parameters
----------
sample_x : list
A random sample x1,...,xn.
sample_y : list
A random sample y1,...,ym.
center : str
Whether to use 'mean' or 'median' for centering.
Returns
-------
extreme_count_x : int
C_x computed with centering mu being sample mean if center = 'mean' and sample median if center = 'median' as defined in equation (1) of 'A Quick, Compact, Two-Sample Dispersion Test: Count Five'.
extreme_count_y : int
C_y defined analogously to C_x above.
Raises
------
ValueError
If center is neither 'mean' or 'median'.
"""
if center not in {'mean', 'median'}:
raise ValueError('Invalid center %s' % (center))
if center == 'mean':
centering_x = np.mean(sample_x)
centering_y = np.mean(sample_y)
else:
centering_x = np.median(sample_x)
centering_y = np.median(sample_y)
# compute absolute deviations from centering for x, y samples
abs_dev_x = np.abs(np.array(sample_x) - centering_x)
abs_dev_y = np.abs(np.array(sample_y) - centering_y)
# count number of X deviations greater than max Y deviation and vice versa
# see equation (1) of Count Five paper
extreme_count_x = np.sum(np.where(abs_dev_x > np.max(abs_dev_y), 1, 0))
extreme_count_y = np.sum(np.where(abs_dev_y > np.max(abs_dev_x), 1, 0))
return extreme_count_x, extreme_count_y
|
[
"numpy.mean",
"numpy.median",
"numpy.max",
"numpy.array",
"numpy.sum",
"numpy.var"
] |
[((1099, 1123), 'numpy.var', 'np.var', (['sample_x'], {'ddof': '(1)'}), '(sample_x, ddof=1)\n', (1105, 1123), True, 'import numpy as np\n'), ((1143, 1167), 'numpy.var', 'np.var', (['sample_y'], {'ddof': '(1)'}), '(sample_y, ddof=1)\n', (1149, 1167), True, 'import numpy as np\n'), ((2649, 2673), 'numpy.var', 'np.var', (['sample_x'], {'ddof': '(1)'}), '(sample_x, ddof=1)\n', (2655, 2673), True, 'import numpy as np\n'), ((2693, 2717), 'numpy.var', 'np.var', (['sample_y'], {'ddof': '(1)'}), '(sample_y, ddof=1)\n', (2699, 2717), True, 'import numpy as np\n'), ((2816, 2833), 'numpy.mean', 'np.mean', (['sample_x'], {}), '(sample_x)\n', (2823, 2833), True, 'import numpy as np\n'), ((2846, 2863), 'numpy.mean', 'np.mean', (['sample_y'], {}), '(sample_y)\n', (2853, 2863), True, 'import numpy as np\n'), ((4713, 4730), 'numpy.mean', 'np.mean', (['sample_x'], {}), '(sample_x)\n', (4720, 4730), True, 'import numpy as np\n'), ((4753, 4770), 'numpy.mean', 'np.mean', (['sample_y'], {}), '(sample_y)\n', (4760, 4770), True, 'import numpy as np\n'), ((4803, 4822), 'numpy.median', 'np.median', (['sample_x'], {}), '(sample_x)\n', (4812, 4822), True, 'import numpy as np\n'), ((4845, 4864), 'numpy.median', 'np.median', (['sample_y'], {}), '(sample_y)\n', (4854, 4864), True, 'import numpy as np\n'), ((2954, 2985), 'numpy.sum', 'np.sum', (['((sample_x - xmean) ** 4)'], {}), '((sample_x - xmean) ** 4)\n', (2960, 2985), True, 'import numpy as np\n'), ((3007, 3038), 'numpy.sum', 'np.sum', (['((sample_y - ymean) ** 4)'], {}), '((sample_y - ymean) ** 4)\n', (3013, 3038), True, 'import numpy as np\n'), ((4955, 4973), 'numpy.array', 'np.array', (['sample_x'], {}), '(sample_x)\n', (4963, 4973), True, 'import numpy as np\n'), ((5012, 5030), 'numpy.array', 'np.array', (['sample_y'], {}), '(sample_y)\n', (5020, 5030), True, 'import numpy as np\n'), ((5219, 5236), 'numpy.max', 'np.max', (['abs_dev_y'], {}), '(abs_dev_y)\n', (5225, 5236), True, 'import numpy as np\n'), ((5295, 5312), 'numpy.max', 'np.max', (['abs_dev_x'], {}), '(abs_dev_x)\n', (5301, 5312), True, 'import numpy as np\n')]
|
import numpy as np
from kivygames.games import Game
import kivygames.games.noughtsandcrosses.c as c
class CellOccupiedError(Exception):
pass
class NoughtsAndCrosses(Game):
minPlayers = 2
maxPlayers = 2
hasAI = True
gridShape = (3, 3)
def __init__(self):
Game.__init__(self)
self.grid = np.zeros(self.gridShape, dtype="u1")
self.player = 1
def isEmpty(self, position):
return self.grid[position] == 0
async def turn(self):
await self.sendOutput("Player", self.player)
while True:
position = await self.getInput("Position", tuple, self.player)
if self.isEmpty(position):
break
await self.sendOutput("Error", "That space is already full.")
await self.sendOutput("Error", "")
self.grid[position] = self.player
await self.sendOutput("Grid", self.grid)
if c.hasPlayerWon(self.grid, self.player):
await self.sendOutput("End", f"Player {self.player} wins.")
return True
if np.count_nonzero(self.grid) == 9:
await self.sendOutput("End", f"It's a draw!")
return True
self.player = 3 - self.player
return False
def getAIInput(self, name):
if name == "Position":
return c.minimax(self.player, self.player, True, self.grid)[1]
async def game(self):
while True:
ended = await self.turn()
if ended:
break
await self.end()
|
[
"numpy.count_nonzero",
"numpy.zeros",
"kivygames.games.noughtsandcrosses.c.hasPlayerWon",
"kivygames.games.noughtsandcrosses.c.minimax",
"kivygames.games.Game.__init__"
] |
[((294, 313), 'kivygames.games.Game.__init__', 'Game.__init__', (['self'], {}), '(self)\n', (307, 313), False, 'from kivygames.games import Game\n'), ((335, 371), 'numpy.zeros', 'np.zeros', (['self.gridShape'], {'dtype': '"""u1"""'}), "(self.gridShape, dtype='u1')\n", (343, 371), True, 'import numpy as np\n'), ((926, 964), 'kivygames.games.noughtsandcrosses.c.hasPlayerWon', 'c.hasPlayerWon', (['self.grid', 'self.player'], {}), '(self.grid, self.player)\n', (940, 964), True, 'import kivygames.games.noughtsandcrosses.c as c\n'), ((1073, 1100), 'numpy.count_nonzero', 'np.count_nonzero', (['self.grid'], {}), '(self.grid)\n', (1089, 1100), True, 'import numpy as np\n'), ((1332, 1384), 'kivygames.games.noughtsandcrosses.c.minimax', 'c.minimax', (['self.player', 'self.player', '(True)', 'self.grid'], {}), '(self.player, self.player, True, self.grid)\n', (1341, 1384), True, 'import kivygames.games.noughtsandcrosses.c as c\n')]
|
"""
:mod:`meshes` -- Discretization
===============================
Everything related to meshes appropriate for the multigrid solver.
"""
# Copyright 2018-2020 The emg3d Developers.
#
# This file is part of emg3d.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy as np
from copy import deepcopy
from scipy import optimize
__all__ = ['TensorMesh', 'get_hx_h0', 'get_cell_numbers', 'get_stretched_h',
'get_domain', 'get_hx']
class TensorMesh:
"""Rudimentary mesh for multigrid calculation.
The tensor-mesh :class:`discretize.TensorMesh` is a powerful tool,
including sophisticated mesh-generation possibilities in 1D, 2D, and 3D,
plotting routines, and much more. However, in the multigrid solver we have
to generate a mesh at each level, many times over and over again, and we
only need a very limited set of attributes. This tensor-mesh class provides
all required attributes. All attributes here are the same as their
counterparts in :class:`discretize.TensorMesh` (both in name and value).
.. warning::
This is a slimmed-down version of :class:`discretize.TensorMesh`, meant
principally for internal use by the multigrid modeller. It is highly
recommended to use :class:`discretize.TensorMesh` to create the input
meshes instead of this class. There are no input-checks carried out
here, and there is only one accepted input format for `h` and `x0`.
Parameters
----------
h : list of three ndarrays
Cell widths in [x, y, z] directions.
x0 : ndarray of dimension (3, )
Origin (x, y, z).
"""
def __init__(self, h, x0):
"""Initialize the mesh."""
self.x0 = x0
# Width of cells.
self.hx = h[0]
self.hy = h[1]
self.hz = h[2]
# Cell related properties.
self.nCx = int(self.hx.size)
self.nCy = int(self.hy.size)
self.nCz = int(self.hz.size)
self.vnC = np.array([self.hx.size, self.hy.size, self.hz.size])
self.nC = int(self.vnC.prod())
self.vectorCCx = np.r_[0, self.hx[:-1].cumsum()]+self.hx*0.5+self.x0[0]
self.vectorCCy = np.r_[0, self.hy[:-1].cumsum()]+self.hy*0.5+self.x0[1]
self.vectorCCz = np.r_[0, self.hz[:-1].cumsum()]+self.hz*0.5+self.x0[2]
# Node related properties.
self.nNx = self.nCx + 1
self.nNy = self.nCy + 1
self.nNz = self.nCz + 1
self.vnN = np.array([self.nNx, self.nNy, self.nNz], dtype=int)
self.nN = int(self.vnN.prod())
self.vectorNx = np.r_[0., self.hx.cumsum()] + self.x0[0]
self.vectorNy = np.r_[0., self.hy.cumsum()] + self.x0[1]
self.vectorNz = np.r_[0., self.hz.cumsum()] + self.x0[2]
# Edge related properties.
self.vnEx = np.array([self.nCx, self.nNy, self.nNz], dtype=int)
self.vnEy = np.array([self.nNx, self.nCy, self.nNz], dtype=int)
self.vnEz = np.array([self.nNx, self.nNy, self.nCz], dtype=int)
self.nEx = int(self.vnEx.prod())
self.nEy = int(self.vnEy.prod())
self.nEz = int(self.vnEz.prod())
self.vnE = np.array([self.nEx, self.nEy, self.nEz], dtype=int)
self.nE = int(self.vnE.sum())
def __repr__(self):
"""Simple representation."""
return (f"TensorMesh: {self.nCx} x {self.nCy} x {self.nCz} "
f"({self.nC:,})")
def copy(self):
"""Return a copy of the TensorMesh."""
return TensorMesh.from_dict(self.to_dict(True))
def to_dict(self, copy=False):
"""Store the necessary information of the TensorMesh in a dict."""
out = {'hx': self.hx, 'hy': self.hy, 'hz': self.hz, 'x0': self.x0,
'__class__': self.__class__.__name__}
if copy:
return deepcopy(out)
else:
return out
@classmethod
def from_dict(cls, inp):
"""Convert dictionary into :class:`TensorMesh` instance.
Parameters
----------
inp : dict
Dictionary as obtained from :func:`TensorMesh.to_dict`.
The dictionary needs the keys `hx`, `hy`, `hz`, and `x0`.
Returns
-------
obj : :class:`TensorMesh` instance
"""
try:
return cls(h=[inp['hx'], inp['hy'], inp['hz']], x0=inp['x0'])
except KeyError as e:
print(f"* ERROR :: Variable {e} missing in `inp`.")
raise
@property
def vol(self):
"""Construct cell volumes of the 3D model as 1D array."""
if getattr(self, '_vol', None) is None:
self._vol = (self.hx[None, None, :]*self.hy[None, :, None] *
self.hz[:, None, None]).ravel()
return self._vol
def get_hx_h0(freq, res, domain, fixed=0., possible_nx=None, min_width=None,
pps=3, alpha=None, max_domain=100000., raise_error=True, verb=1,
return_info=False):
r"""Return cell widths and origin for given parameters.
Returns cell widths for the provided frequency, resistivity, domain extent,
and other parameters using a flexible amount of cells. See input parameters
for more details. A maximum of three hard/fixed boundaries can be provided
(one of which is the grid center).
The minimum cell width is calculated through :math:`\delta/\rm{pps}`, where
the skin depth is given by :math:`\delta = 503.3 \sqrt{\rho/f}`, and the
parameter `pps` stands for 'points-per-skindepth'. The minimum cell width
can be restricted with the parameter `min_width`.
The actual calculation domain adds a buffer zone around the (survey)
domain. The thickness of the buffer is six times the skin depth. The field
is basically zero after two wavelengths. A wavelength is
:math:`2\pi\delta`, hence roughly 6 times the skin depth. Taking a factor 6
gives therefore almost two wavelengths, as the field travels to the
boundary and back. The actual buffer thickness can be steered with the
`res` parameter.
One has to take into account that the air is very resistive, which has to
be considered not just in the vertical direction, but also in the
horizontal directions, as the airwave will bounce back from the sides
otherwise. In the marine case this issue reduces with increasing water
depth.
See Also
--------
get_stretched_h : Get `hx` for a fixed number `nx` and within a fixed
domain.
Parameters
----------
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
res : float or list
Resistivity (Ohm m) to calculate the skin depth. The skin depth is
used to calculate the minimum cell width and the boundary thicknesses.
Up to three resistivities can be provided:
- float: Same resistivity for everything;
- [min_width, boundaries];
- [min_width, left boundary, right boundary].
domain : list
Contains the survey-domain limits [min, max]. The actual calculation
domain consists of this domain plus a buffer zone around it, which
depends on frequency and resistivity.
fixed : list, optional
Fixed boundaries, one, two, or maximum three values. The grid is
centered around the first value. Hence it is the center location with
the smallest cell. Two more fixed boundaries can be added, at most one
on each side of the first one.
Default is 0.
possible_nx : list, optional
List of possible numbers of cells. See :func:`get_cell_numbers`.
Default is ``get_cell_numbers(500, 5, 3)``, which corresponds to
[16, 24, 32, 40, 48, 64, 80, 96, 128, 160, 192, 256, 320, 384].
min_width : float, list or None, optional
Minimum cell width restriction:
- None : No restriction;
- float : Fixed to this value, ignoring skin depth and `pps`.
- list [min, max] : Lower and upper bounds.
Default is None.
pps : int, optional
Points per skindepth; minimum cell width is calculated via
`dmin = skindepth/pps`.
Default = 3.
alpha : list, optional
Maximum alpha and step size to find a good alpha. The first value is
the maximum alpha of the survey domain, the second value is the maximum
alpha for the buffer zone, and the third value is the step size.
Default = [1, 1.5, .01], hence no stretching within the survey domain
and a maximum stretching of 1.5 in the buffer zone; step size is 0.01.
max_domain : float, optional
Maximum calculation domain from fixed[0] (usually source position).
Default is 100,000.
raise_error : bool, optional
If True, an error is raised if no suitable grid is found. Otherwise it
just prints a message and returns None's.
Default is True.
verb : int, optional
Verbosity, 0 or 1.
Default = 1.
return_info : bool
If True, a dictionary is returned with some grid info (min and max
cell width and alpha).
Returns
-------
hx : ndarray
Cell widths of mesh.
x0 : float
Origin of the mesh.
info : dict
Dictionary with mesh info; only if ``return_info=True``.
Keys:
- `dmin`: Minimum cell width;
- `dmax`: Maximum cell width;
- `amin`: Minimum alpha;
- `amax`: Maximum alpha.
"""
# Get variables with default lists:
if alpha is None:
alpha = [1, 1.5, 0.01]
if possible_nx is None:
possible_nx = get_cell_numbers(500, 5, 3)
# Cast resistivity value(s).
res = np.array(res, ndmin=1)
if res.size == 1:
res_arr = np.array([res[0], res[0], res[0]])
elif res.size == 2:
res_arr = np.array([res[0], res[1], res[1]])
else:
res_arr = np.array([res[0], res[1], res[2]])
# Cast and check fixed.
fixed = np.array(fixed, ndmin=1)
if fixed.size > 2:
# Check length.
if fixed.size > 3:
print("\n* ERROR :: Maximum three fixed boundaries permitted.\n"
f" Provided: {fixed.size}.")
raise ValueError("Wrong input for fixed")
# Sort second and third, so it doesn't matter how it was provided.
fixed = np.array([fixed[0], max(fixed[1:]), min(fixed[1:])])
# Check side.
if np.sign(np.diff(fixed[:2])) == np.sign(np.diff(fixed[::2])):
print("\n* ERROR :: 2nd and 3rd fixed boundaries have to be "
"left and right of the first one.\n "
f"Provided: [{fixed[0]}, {fixed[1]}, {fixed[2]}]")
raise ValueError("Wrong input for fixed")
# Calculate skin depth.
skind = 503.3*np.sqrt(res_arr/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Minimum cell width.
dmin = skind[0]/pps
if min_width is not None: # Respect user input.
min_width = np.array(min_width, ndmin=1)
if min_width.size == 1:
dmin = min_width
else:
dmin = np.clip(dmin, *min_width)
# Survey domain; contains all sources and receivers.
domain = np.array(domain, dtype=float)
# Calculation domain; big enough to avoid boundary effects.
# To avoid boundary effects we want the signal to travel two wavelengths
# from the source to the boundary and back to the receiver.
# => 2*pi*sd ~ 6.3*sd = one wavelength => signal is ~ 0.2 %.
# Two wavelengths we can safely assume it is zero.
#
# The air does not follow the concept of skin depth, as it is a wave rather
# than diffusion. For this is the factor `max_domain`, which restricts
# the domain in each direction to this value from the center.
# (a) Source to edges of domain.
dist_in_domain = abs(domain - fixed[0])
# (b) Two wavelengths.
two_lambda = skind[1:]*4*np.pi
# (c) Required buffer, additional to domain.
dist_buff = np.max([np.zeros(2), (two_lambda - dist_in_domain)/2], axis=0)
# (d) Add buffer to domain.
calc_domain = np.array([domain[0]-dist_buff[0], domain[1]+dist_buff[1]])
# (e) Restrict total domain to max_domain.
calc_domain[0] = max(calc_domain[0], fixed[0]-max_domain)
calc_domain[1] = min(calc_domain[1], fixed[0]+max_domain)
# Initiate flag if terminated.
finished = False
# Initiate alpha variables for survey and calculation domains.
sa, ca = 1.0, 1.0
# Loop over possible cell numbers from small to big.
for nx in np.unique(possible_nx):
# Loop over possible alphas for domain.
for sa in np.arange(1.0, alpha[0]+alpha[2]/2, alpha[2]):
# Get current stretched grid cell sizes.
thxl = dmin*sa**np.arange(nx) # Left of origin.
thxr = dmin*sa**np.arange(nx) # Right of origin.
# 0. Adjust stretching for fixed boundaries.
if fixed.size > 1: # Move mesh to first fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]+np.cumsum(thxr)]
ii = np.argmin(abs(t_nx-fixed[1]))
thxr *= abs(fixed[1]-fixed[0])/np.sum(thxr[:ii])
if fixed.size > 2: # Move mesh to second fixed boundary.
t_nx = np.r_[fixed[0], fixed[0]-np.cumsum(thxl)]
ii = np.argmin(abs(t_nx-fixed[2]))
thxl *= abs(fixed[2]-fixed[0])/np.sum(thxl[:ii])
# 1. Fill from center to left domain.
nl = np.sum((fixed[0]-np.cumsum(thxl)) > domain[0])+1
# 2. Fill from center to right domain.
nr = np.sum((fixed[0]+np.cumsum(thxr)) < domain[1])+1
# 3. Get remaining number of cells and check termination criteria.
nsdc = nl+nr # Number of domain cells.
nx_remain = nx-nsdc
# Not good, try next.
if nx_remain <= 0:
continue
# Create the current hx-array.
hx = np.r_[thxl[:nl][::-1], thxr[:nr]]
hxo = np.r_[thxl[:nl][::-1], thxr[:nr]]
# Get actual domain:
asurv_domain = [fixed[0]-np.sum(thxl[:nl]),
fixed[0]+np.sum(thxr[:nr])]
x0 = float(fixed[0]-np.sum(thxl[:nl]))
# Get actual stretching (differs in case of fixed layers).
sa_adj = np.max([hx[1:]/hx[:-1], hx[:-1]/hx[1:]])
# Loop over possible alphas for calc_domain.
for ca in np.arange(sa, alpha[1]+alpha[2]/2, alpha[2]):
# 4. Fill to left calc_domain.
thxl = hx[0]*ca**np.arange(1, nx_remain+1)
nl = np.sum((asurv_domain[0]-np.cumsum(thxl)) >
calc_domain[0])+1
# 5. Fill to right calc_domain.
thxr = hx[-1]*ca**np.arange(1, nx_remain+1)
nr = np.sum((asurv_domain[1]+np.cumsum(thxr)) <
calc_domain[1])+1
# 6. Get remaining number of cells and check termination
# criteria.
ncdc = nl+nr # Number of calc_domain cells.
nx_remain2 = nx-nsdc-ncdc
if nx_remain2 < 0: # Not good, try next.
continue
# Create hx-array.
nl += int(np.floor(nx_remain2/2)) # If uneven, add one cell
nr += int(np.ceil(nx_remain2/2)) # more on the right.
hx = np.r_[thxl[:nl][::-1], hx, thxr[:nr]]
# Calculate origin.
x0 = float(asurv_domain[0]-np.sum(thxl[:nl]))
# Mark it as finished and break out of the loop.
finished = True
break
if finished:
break
if finished:
break
# Check finished and print info about found grid.
if not finished:
# Throw message if no solution was found.
print("\n* ERROR :: No suitable grid found; relax your criteria.\n")
if raise_error:
raise ArithmeticError("No grid found!")
else:
hx, x0 = None, None
elif verb > 0:
print(f" Skin depth ", end="")
if res.size == 1:
print(f" [m] : {skind[0]:.0f}")
elif res.size == 2:
print(f"(m/l-r) [m] : {skind[0]:.0f} / {skind[1]:.0f}")
else:
print(f"(m/l/r) [m] : {skind[0]:.0f} / {skind[1]:.0f} / "
f"{skind[2]:.0f}")
print(f" Survey domain [m] : {domain[0]:.0f} - "
f"{domain[1]:.0f}")
print(f" Calculation domain [m] : {calc_domain[0]:.0f} - "
f"{calc_domain[1]:.0f}")
print(f" Final extent [m] : {x0:.0f} - "
f"{x0+np.sum(hx):.0f}")
extstr = f" Min/max cell width [m] : {min(hx):.0f} / "
alstr = f" Alpha survey"
nrstr = " Number of cells "
if not np.isclose(sa, sa_adj):
sastr = f"{sa:.3f} ({sa_adj:.3f})"
else:
sastr = f"{sa:.3f}"
print(extstr+f"{max(hxo):.0f} / {max(hx):.0f}")
print(alstr+f"/calc : {sastr} / {ca:.3f}")
print(nrstr+f"(s/c/r) : {nx} ({nsdc}/{ncdc}/{nx_remain2})")
print()
if return_info:
if not fixed.size > 1:
sa_adj = sa
info = {'dmin': dmin,
'dmax': np.nanmax(hx),
'amin': np.nanmin([ca, sa, sa_adj]),
'amax': np.nanmax([ca, sa, sa_adj])}
return hx, x0, info
else:
return hx, x0
def get_cell_numbers(max_nr, max_prime=5, min_div=3):
r"""Returns 'good' cell numbers for the multigrid method.
'Good' cell numbers are numbers which can be divided by 2 as many times as
possible. At the end there will be a low prime number.
The function adds all numbers :math:`p 2^n \leq M` for :math:`p={2, 3, ...,
p_\text{max}}` and :math:`n={n_\text{min}, n_\text{min}+1, ..., \infty}`;
:math:`M, p_\text{max}, n_\text{min}` correspond to `max_nr`, `max_prime`,
and `min_div`, respectively.
Parameters
----------
max_nr : int
Maximum number of cells.
max_prime : int
Highest permitted prime number p for p*2^n. {2, 3, 5, 7} are good upper
limits in order to avoid too big lowest grids in the multigrid method.
Default is 5.
min_div : int
Minimum times the number can be divided by two.
Default is 3.
Returns
-------
numbers : array
Array containing all possible cell numbers from lowest to highest.
"""
# Primes till 20.
primes = np.array([2, 3, 5, 7, 11, 13, 17, 19])
# Sanity check; 19 is already ridiculously high.
if max_prime > primes[-1]:
print(f"* ERROR :: Highest prime is {max_prime}, "
"please use a value < 20.")
raise ValueError("Highest prime too high")
# Restrict to max_prime.
primes = primes[primes <= max_prime]
# Get possible values.
# Currently restricted to prime*2**30 (for prime=2 => 1,073,741,824 cells).
numbers = primes[:, None]*2**np.arange(min_div, 30)
# Get unique values.
numbers = np.unique(numbers)
# Restrict to max_nr and return.
return numbers[numbers <= max_nr]
def get_stretched_h(min_width, domain, nx, x0=0, x1=None, resp_domain=False):
"""Return cell widths for a stretched grid within the domain.
Returns `nx` cell widths within `domain`, where the minimum cell width is
`min_width`. The cells are not stretched within `x0` and `x1`, and outside
uses a power-law stretching. The actual stretching factor and the number of
cells left and right of `x0` and `x1` are find in a minimization process.
The domain is not completely respected. The starting point of the domain
is, but the endpoint of the domain might slightly shift (this is more
likely the case for small `nx`, for big `nx` the shift should be small).
The new endpoint can be obtained with ``domain[0]+np.sum(hx)``. If you want
the domain to be respected absolutely, set ``resp_domain=True``. However,
be aware that this will introduce one stretch-factor which is different
from the other stretch factors, to accommodate the restriction. This
one-off factor is between the left- and right-side of `x0`, or, if `x1` is
provided, just after `x1`.
See Also
--------
get_hx_x0 : Get `hx` and `x0` for a flexible number of `nx` with
given bounds.
Parameters
----------
min_width : float
Minimum cell width. If x1 is provided, the actual minimum cell width
might be smaller than min_width.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
Default is 0.
x1 : float
If provided, then no stretching is applied between `x0` and `x1`. The
non-stretched part starts at `x0` and stops at the first possible
location at or after `x1`. `x1` is restricted to `domain`. This will
min_width so that an integer number of cells fit within x0 and x1.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
# Cast to arrays
domain = np.array(domain, dtype=float)
x0 = np.array(x0, dtype=float)
x0 = np.clip(x0, *domain) # Restrict to model domain
min_width = np.array(min_width, dtype=float)
if x1 is not None:
x1 = np.array(x1, dtype=float)
x1 = np.clip(x1, *domain) # Restrict to model domain
# If x1 is provided (a part is not stretched)
if x1 is not None:
# Store original values
xlim_orig = domain.copy()
nx_orig = int(nx)
x0_orig = x0.copy()
h_min_orig = min_width.copy()
# Get number of non-stretched cells
n_nos = int(np.ceil((x1-x0)/min_width))
# Re-calculate min_width to fit with x0-x1-limits:
min_width = (x1-x0)/n_nos
# Subtract one cell, because the standard scheme provides one
# min_width-cell.
n_nos -= 1
# Reset x0, because the first min_width comes from normal scheme
x0 += min_width
# Reset xmax for normal scheme
domain[1] -= n_nos*min_width
# Reset nx for normal scheme
nx -= n_nos
# If there are not enough points reset to standard procedure. The limit
# of five is arbitrary. However, nx should be much bigger than five
# anyways, otherwise stretched grid doesn't make sense.
if nx <= 5:
print("Warning :: Not enough points for non-stretched part,"
"ignoring therefore `x1`.")
domain = xlim_orig
nx = nx_orig
x0 = x0_orig
x1 = None
min_width = h_min_orig
# Get stretching factor (a = 1+alpha).
if min_width == 0 or min_width > np.diff(domain)/nx:
# If min_width is bigger than the domain-extent divided by nx, no
# stretching is required at all.
alpha = 0
else:
# Wrap _get_dx into a minimization function to call with fsolve.
def find_alpha(alpha, min_width, args):
"""Find alpha such that min(hx) = min_width."""
return min(get_hx(alpha, *args))/min_width-1
# Search for best alpha, must be at least 0
args = (domain, nx, x0)
alpha = max(0, optimize.fsolve(find_alpha, 0.02, (min_width, args)))
# With alpha get actual cell spacing with `resp_domain` to respect the
# users decision.
hx = get_hx(alpha, domain, nx, x0, resp_domain)
# Add the non-stretched center if x1 is provided
if x1 is not None:
hx = np.r_[hx[: np.argmin(hx)], np.ones(n_nos)*min_width,
hx[np.argmin(hx):]]
# Print warning min_width could not be respected.
if abs(hx.min() - min_width) > 0.1:
print(f"Warning :: Minimum cell width ({np.round(hx.min(), 2)} m) is "
"below `min_width`, because `nx` is too big for `domain`.")
return hx
def get_domain(x0=0, freq=1, res=0.3, limits=None, min_width=None,
fact_min=0.2, fact_neg=5, fact_pos=None):
r"""Get domain extent and minimum cell width as a function of skin depth.
Returns the extent of the calculation domain and the minimum cell width as
a multiple of the skin depth, with possible user restrictions on minimum
calculation domain and range of possible minimum cell widths.
.. math::
\delta &= 503.3 \sqrt{\frac{\rho}{f}} , \\
x_\text{start} &= x_0-k_\text{neg}\delta , \\
x_\text{end} &= x_0+k_\text{pos}\delta , \\
h_\text{min} &= k_\text{min} \delta .
Parameters
----------
x0 : float
Center of the calculation domain. Normally the source location.
Default is 0.
freq : float
Frequency (Hz) to calculate the skin depth. The skin depth is a concept
defined in the frequency domain. If a negative frequency is provided,
it is assumed that the calculation is carried out in the Laplace
domain. To calculate the skin depth, the value of `freq` is then
multiplied by :math:`-2\pi`, to simulate the closest
frequency-equivalent.
Default is 1 Hz.
res : float, optional
Resistivity (Ohm m) to calculate skin depth.
Default is 0.3 Ohm m (sea water).
limits : None or list
[start, end] of model domain. This extent represents the minimum extent
of the domain. The domain is therefore only adjusted if it has to reach
outside of [start, end].
Default is None.
min_width : None, float, or list of two floats
Minimum cell width is calculated as a function of skin depth:
fact_min*sd. If `min_width` is a float, this is used. If a list of
two values [min, max] are provided, they are used to restrain
min_width. Default is None.
fact_min, fact_neg, fact_pos : floats
The skin depth is multiplied with these factors to estimate:
- Minimum cell width (`fact_min`, default 0.2)
- Domain-start (`fact_neg`, default 5), and
- Domain-end (`fact_pos`, defaults to `fact_neg`).
Returns
-------
h_min : float
Minimum cell width.
domain : list
Start- and end-points of calculation domain.
"""
# Set fact_pos to fact_neg if not provided.
if fact_pos is None:
fact_pos = fact_neg
# Calculate the skin depth.
skind = 503.3*np.sqrt(res/abs(freq))
if freq < 0: # For Laplace-domain calculations.
skind /= np.sqrt(2*np.pi)
# Estimate minimum cell width.
h_min = fact_min*skind
if min_width is not None: # Respect user input.
if np.array(min_width).size == 1:
h_min = min_width
else:
h_min = np.clip(h_min, *min_width)
# Estimate calculation domain.
domain = [x0-fact_neg*skind, x0+fact_pos*skind]
if limits is not None: # Respect user input.
domain = [min(limits[0], domain[0]), max(limits[1], domain[1])]
return h_min, domain
def get_hx(alpha, domain, nx, x0, resp_domain=True):
r"""Return cell widths for given input.
Find the number of cells left and right of `x0`, `nl` and `nr`
respectively, for the provided alpha. For this, we solve
.. math:: \frac{x_\text{max}-x_0}{x_0-x_\text{min}} =
\frac{a^{nr}-1}{a^{nl}-1}
where :math:`a = 1+\alpha`.
Parameters
----------
alpha : float
Stretching factor `a` is given by ``a=1+alpha``.
domain : list
[start, end] of model domain.
nx : int
Number of cells.
x0 : float
Center of the grid. `x0` is restricted to `domain`.
resp_domain : bool
If False (default), then the domain-end might shift slightly to assure
that the same stretching factor is applied throughout. If set to True,
however, the domain is respected absolutely. This will introduce one
stretch-factor which is different from the other stretch factors, to
accommodate the restriction. This one-off factor is between the left-
and right-side of `x0`, or, if `x1` is provided, just after `x1`.
Returns
-------
hx : ndarray
Cell widths of mesh.
"""
if alpha <= 0.: # If alpha <= 0: equal spacing (no stretching at all)
hx = np.ones(nx)*np.diff(np.squeeze(domain))/nx
else: # Get stretched hx
a = alpha+1
# Get hx depending if x0 is on the domain boundary or not.
if np.isclose(x0, domain[0]) or np.isclose(x0, domain[1]):
# Get al a's
alr = np.diff(domain)*alpha/(a**nx-1)*a**np.arange(nx)
if x0 == domain[1]:
alr = alr[::-1]
# Calculate differences
hx = alr*np.diff(domain)/sum(alr)
else:
# Find number of elements left and right by solving:
# (xmax-x0)/(x0-xmin) = a**nr-1/(a**nl-1)
nr = np.arange(2, nx+1)
er = (domain[1]-x0)/(x0-domain[0]) - (a**nr[::-1]-1)/(a**nr-1)
nl = np.argmin(abs(np.floor(er)))+1
nr = nx-nl
# Get all a's
al = a**np.arange(nl-1, -1, -1)
ar = a**np.arange(1, nr+1)
# Calculate differences
if resp_domain:
# This version honours domain[0] and domain[1], but to achieve
# this it introduces one stretch-factor which is different from
# all the others between al to ar.
hx = np.r_[al*(x0-domain[0])/sum(al),
ar*(domain[1]-x0)/sum(ar)]
else:
# This version moves domain[1], but each stretch-factor is
# exactly the same.
fact = (x0-domain[0])/sum(al) # Take distance from al.
hx = np.r_[al, ar]*fact
# Note: this hx is equivalent as providing the following h
# to TensorMesh:
# h = [(min_width, nl-1, -a), (min_width, n_nos+1),
# (min_width, nr, a)]
return hx
|
[
"numpy.clip",
"numpy.sqrt",
"numpy.array",
"copy.deepcopy",
"numpy.nanmin",
"numpy.arange",
"numpy.diff",
"numpy.max",
"numpy.nanmax",
"numpy.argmin",
"numpy.ceil",
"numpy.ones",
"numpy.floor",
"numpy.squeeze",
"scipy.optimize.fsolve",
"numpy.isclose",
"numpy.unique",
"numpy.sum",
"numpy.zeros",
"numpy.cumsum"
] |
[((10528, 10550), 'numpy.array', 'np.array', (['res'], {'ndmin': '(1)'}), '(res, ndmin=1)\n', (10536, 10550), True, 'import numpy as np\n'), ((10807, 10831), 'numpy.array', 'np.array', (['fixed'], {'ndmin': '(1)'}), '(fixed, ndmin=1)\n', (10815, 10831), True, 'import numpy as np\n'), ((12111, 12140), 'numpy.array', 'np.array', (['domain'], {'dtype': 'float'}), '(domain, dtype=float)\n', (12119, 12140), True, 'import numpy as np\n'), ((13019, 13081), 'numpy.array', 'np.array', (['[domain[0] - dist_buff[0], domain[1] + dist_buff[1]]'], {}), '([domain[0] - dist_buff[0], domain[1] + dist_buff[1]])\n', (13027, 13081), True, 'import numpy as np\n'), ((13469, 13491), 'numpy.unique', 'np.unique', (['possible_nx'], {}), '(possible_nx)\n', (13478, 13491), True, 'import numpy as np\n'), ((19562, 19600), 'numpy.array', 'np.array', (['[2, 3, 5, 7, 11, 13, 17, 19]'], {}), '([2, 3, 5, 7, 11, 13, 17, 19])\n', (19570, 19600), True, 'import numpy as np\n'), ((20115, 20133), 'numpy.unique', 'np.unique', (['numbers'], {}), '(numbers)\n', (20124, 20133), True, 'import numpy as np\n'), ((22736, 22765), 'numpy.array', 'np.array', (['domain'], {'dtype': 'float'}), '(domain, dtype=float)\n', (22744, 22765), True, 'import numpy as np\n'), ((22775, 22800), 'numpy.array', 'np.array', (['x0'], {'dtype': 'float'}), '(x0, dtype=float)\n', (22783, 22800), True, 'import numpy as np\n'), ((22810, 22830), 'numpy.clip', 'np.clip', (['x0', '*domain'], {}), '(x0, *domain)\n', (22817, 22830), True, 'import numpy as np\n'), ((22875, 22907), 'numpy.array', 'np.array', (['min_width'], {'dtype': 'float'}), '(min_width, dtype=float)\n', (22883, 22907), True, 'import numpy as np\n'), ((2493, 2545), 'numpy.array', 'np.array', (['[self.hx.size, self.hy.size, self.hz.size]'], {}), '([self.hx.size, self.hy.size, self.hz.size])\n', (2501, 2545), True, 'import numpy as np\n'), ((2976, 3027), 'numpy.array', 'np.array', (['[self.nNx, self.nNy, self.nNz]'], {'dtype': 'int'}), '([self.nNx, self.nNy, self.nNz], dtype=int)\n', (2984, 3027), True, 'import numpy as np\n'), ((3318, 3369), 'numpy.array', 'np.array', (['[self.nCx, self.nNy, self.nNz]'], {'dtype': 'int'}), '([self.nCx, self.nNy, self.nNz], dtype=int)\n', (3326, 3369), True, 'import numpy as np\n'), ((3390, 3441), 'numpy.array', 'np.array', (['[self.nNx, self.nCy, self.nNz]'], {'dtype': 'int'}), '([self.nNx, self.nCy, self.nNz], dtype=int)\n', (3398, 3441), True, 'import numpy as np\n'), ((3462, 3513), 'numpy.array', 'np.array', (['[self.nNx, self.nNy, self.nCz]'], {'dtype': 'int'}), '([self.nNx, self.nNy, self.nCz], dtype=int)\n', (3470, 3513), True, 'import numpy as np\n'), ((3656, 3707), 'numpy.array', 'np.array', (['[self.nEx, self.nEy, self.nEz]'], {'dtype': 'int'}), '([self.nEx, self.nEy, self.nEz], dtype=int)\n', (3664, 3707), True, 'import numpy as np\n'), ((10591, 10625), 'numpy.array', 'np.array', (['[res[0], res[0], res[0]]'], {}), '([res[0], res[0], res[0]])\n', (10599, 10625), True, 'import numpy as np\n'), ((11750, 11768), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (11757, 11768), True, 'import numpy as np\n'), ((11891, 11919), 'numpy.array', 'np.array', (['min_width'], {'ndmin': '(1)'}), '(min_width, ndmin=1)\n', (11899, 11919), True, 'import numpy as np\n'), ((13560, 13609), 'numpy.arange', 'np.arange', (['(1.0)', '(alpha[0] + alpha[2] / 2)', 'alpha[2]'], {}), '(1.0, alpha[0] + alpha[2] / 2, alpha[2])\n', (13569, 13609), True, 'import numpy as np\n'), ((22944, 22969), 'numpy.array', 'np.array', (['x1'], {'dtype': 'float'}), '(x1, dtype=float)\n', (22952, 22969), True, 'import numpy as np\n'), ((22983, 23003), 'numpy.clip', 'np.clip', (['x1', '*domain'], {}), '(x1, *domain)\n', (22990, 23003), True, 'import numpy as np\n'), ((28143, 28161), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (28150, 28161), True, 'import numpy as np\n'), ((4310, 4323), 'copy.deepcopy', 'deepcopy', (['out'], {}), '(out)\n', (4318, 4323), False, 'from copy import deepcopy\n'), ((10668, 10702), 'numpy.array', 'np.array', (['[res[0], res[1], res[1]]'], {}), '([res[0], res[1], res[1]])\n', (10676, 10702), True, 'import numpy as np\n'), ((10731, 10765), 'numpy.array', 'np.array', (['[res[0], res[1], res[2]]'], {}), '([res[0], res[1], res[2]])\n', (10739, 10765), True, 'import numpy as np\n'), ((12014, 12039), 'numpy.clip', 'np.clip', (['dmin', '*min_width'], {}), '(dmin, *min_width)\n', (12021, 12039), True, 'import numpy as np\n'), ((12913, 12924), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (12921, 12924), True, 'import numpy as np\n'), ((15271, 15315), 'numpy.max', 'np.max', (['[hx[1:] / hx[:-1], hx[:-1] / hx[1:]]'], {}), '([hx[1:] / hx[:-1], hx[:-1] / hx[1:]])\n', (15277, 15315), True, 'import numpy as np\n'), ((15392, 15440), 'numpy.arange', 'np.arange', (['sa', '(alpha[1] + alpha[2] / 2)', 'alpha[2]'], {}), '(sa, alpha[1] + alpha[2] / 2, alpha[2])\n', (15401, 15440), True, 'import numpy as np\n'), ((18306, 18319), 'numpy.nanmax', 'np.nanmax', (['hx'], {}), '(hx)\n', (18315, 18319), True, 'import numpy as np\n'), ((18345, 18372), 'numpy.nanmin', 'np.nanmin', (['[ca, sa, sa_adj]'], {}), '([ca, sa, sa_adj])\n', (18354, 18372), True, 'import numpy as np\n'), ((18398, 18425), 'numpy.nanmax', 'np.nanmax', (['[ca, sa, sa_adj]'], {}), '([ca, sa, sa_adj])\n', (18407, 18425), True, 'import numpy as np\n'), ((20052, 20074), 'numpy.arange', 'np.arange', (['min_div', '(30)'], {}), '(min_div, 30)\n', (20061, 20074), True, 'import numpy as np\n'), ((23330, 23360), 'numpy.ceil', 'np.ceil', (['((x1 - x0) / min_width)'], {}), '((x1 - x0) / min_width)\n', (23337, 23360), True, 'import numpy as np\n'), ((24890, 24942), 'scipy.optimize.fsolve', 'optimize.fsolve', (['find_alpha', '(0.02)', '(min_width, args)'], {}), '(find_alpha, 0.02, (min_width, args))\n', (24905, 24942), False, 'from scipy import optimize\n'), ((28382, 28408), 'numpy.clip', 'np.clip', (['h_min', '*min_width'], {}), '(h_min, *min_width)\n', (28389, 28408), True, 'import numpy as np\n'), ((30129, 30154), 'numpy.isclose', 'np.isclose', (['x0', 'domain[0]'], {}), '(x0, domain[0])\n', (30139, 30154), True, 'import numpy as np\n'), ((30158, 30183), 'numpy.isclose', 'np.isclose', (['x0', 'domain[1]'], {}), '(x0, domain[1])\n', (30168, 30183), True, 'import numpy as np\n'), ((30579, 30599), 'numpy.arange', 'np.arange', (['(2)', '(nx + 1)'], {}), '(2, nx + 1)\n', (30588, 30599), True, 'import numpy as np\n'), ((11286, 11304), 'numpy.diff', 'np.diff', (['fixed[:2]'], {}), '(fixed[:2])\n', (11293, 11304), True, 'import numpy as np\n'), ((11317, 11336), 'numpy.diff', 'np.diff', (['fixed[::2]'], {}), '(fixed[::2])\n', (11324, 11336), True, 'import numpy as np\n'), ((17861, 17883), 'numpy.isclose', 'np.isclose', (['sa', 'sa_adj'], {}), '(sa, sa_adj)\n', (17871, 17883), True, 'import numpy as np\n'), ((24380, 24395), 'numpy.diff', 'np.diff', (['domain'], {}), '(domain)\n', (24387, 24395), True, 'import numpy as np\n'), ((28287, 28306), 'numpy.array', 'np.array', (['min_width'], {}), '(min_width)\n', (28295, 28306), True, 'import numpy as np\n'), ((29946, 29957), 'numpy.ones', 'np.ones', (['nx'], {}), '(nx)\n', (29953, 29957), True, 'import numpy as np\n'), ((30791, 30816), 'numpy.arange', 'np.arange', (['(nl - 1)', '(-1)', '(-1)'], {}), '(nl - 1, -1, -1)\n', (30800, 30816), True, 'import numpy as np\n'), ((30835, 30855), 'numpy.arange', 'np.arange', (['(1)', '(nr + 1)'], {}), '(1, nr + 1)\n', (30844, 30855), True, 'import numpy as np\n'), ((13689, 13702), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (13698, 13702), True, 'import numpy as np\n'), ((13750, 13763), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (13759, 13763), True, 'import numpy as np\n'), ((14074, 14091), 'numpy.sum', 'np.sum', (['thxr[:ii]'], {}), '(thxr[:ii])\n', (14080, 14091), True, 'import numpy as np\n'), ((14326, 14343), 'numpy.sum', 'np.sum', (['thxl[:ii]'], {}), '(thxl[:ii])\n', (14332, 14343), True, 'import numpy as np\n'), ((15052, 15069), 'numpy.sum', 'np.sum', (['thxl[:nl]'], {}), '(thxl[:nl])\n', (15058, 15069), True, 'import numpy as np\n'), ((15108, 15125), 'numpy.sum', 'np.sum', (['thxr[:nr]'], {}), '(thxr[:nr])\n', (15114, 15125), True, 'import numpy as np\n'), ((15159, 15176), 'numpy.sum', 'np.sum', (['thxl[:nl]'], {}), '(thxl[:nl])\n', (15165, 15176), True, 'import numpy as np\n'), ((16229, 16253), 'numpy.floor', 'np.floor', (['(nx_remain2 / 2)'], {}), '(nx_remain2 / 2)\n', (16237, 16253), True, 'import numpy as np\n'), ((16306, 16329), 'numpy.ceil', 'np.ceil', (['(nx_remain2 / 2)'], {}), '(nx_remain2 / 2)\n', (16313, 16329), True, 'import numpy as np\n'), ((25211, 25225), 'numpy.ones', 'np.ones', (['n_nos'], {}), '(n_nos)\n', (25218, 25225), True, 'import numpy as np\n'), ((29966, 29984), 'numpy.squeeze', 'np.squeeze', (['domain'], {}), '(domain)\n', (29976, 29984), True, 'import numpy as np\n'), ((30263, 30276), 'numpy.arange', 'np.arange', (['nx'], {}), '(nx)\n', (30272, 30276), True, 'import numpy as np\n'), ((30399, 30414), 'numpy.diff', 'np.diff', (['domain'], {}), '(domain)\n', (30406, 30414), True, 'import numpy as np\n'), ((15519, 15546), 'numpy.arange', 'np.arange', (['(1)', '(nx_remain + 1)'], {}), '(1, nx_remain + 1)\n', (15528, 15546), True, 'import numpy as np\n'), ((15738, 15765), 'numpy.arange', 'np.arange', (['(1)', '(nx_remain + 1)'], {}), '(1, nx_remain + 1)\n', (15747, 15765), True, 'import numpy as np\n'), ((16491, 16508), 'numpy.sum', 'np.sum', (['thxl[:nl]'], {}), '(thxl[:nl])\n', (16497, 16508), True, 'import numpy as np\n'), ((25195, 25208), 'numpy.argmin', 'np.argmin', (['hx'], {}), '(hx)\n', (25204, 25208), True, 'import numpy as np\n'), ((25259, 25272), 'numpy.argmin', 'np.argmin', (['hx'], {}), '(hx)\n', (25268, 25272), True, 'import numpy as np\n'), ((30228, 30243), 'numpy.diff', 'np.diff', (['domain'], {}), '(domain)\n', (30235, 30243), True, 'import numpy as np\n'), ((30704, 30716), 'numpy.floor', 'np.floor', (['er'], {}), '(er)\n', (30712, 30716), True, 'import numpy as np\n'), ((13959, 13974), 'numpy.cumsum', 'np.cumsum', (['thxr'], {}), '(thxr)\n', (13968, 13974), True, 'import numpy as np\n'), ((14211, 14226), 'numpy.cumsum', 'np.cumsum', (['thxl'], {}), '(thxl)\n', (14220, 14226), True, 'import numpy as np\n'), ((14429, 14444), 'numpy.cumsum', 'np.cumsum', (['thxl'], {}), '(thxl)\n', (14438, 14444), True, 'import numpy as np\n'), ((14547, 14562), 'numpy.cumsum', 'np.cumsum', (['thxr'], {}), '(thxr)\n', (14556, 14562), True, 'import numpy as np\n'), ((17689, 17699), 'numpy.sum', 'np.sum', (['hx'], {}), '(hx)\n', (17695, 17699), True, 'import numpy as np\n'), ((15590, 15605), 'numpy.cumsum', 'np.cumsum', (['thxl'], {}), '(thxl)\n', (15599, 15605), True, 'import numpy as np\n'), ((15809, 15824), 'numpy.cumsum', 'np.cumsum', (['thxr'], {}), '(thxr)\n', (15818, 15824), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils import data
from torch import optim
import torchvision.models as models
from torch.autograd import Variable
import torchvision as tv
import random
import math
import time
from datetime import datetime
import os
import argparse
import subprocess
from util.LFUtil import *
import numpy as np
from networks.LFMNet import LFMNet
def main(args=None):
# # Arguments
# parser = argparse.ArgumentParser()
# # Number of epochs
# parser.add_argument('--epochs', type=int, default=1000)
# # Validate every n percentage of the data
# parser.add_argument('--valEvery', type=float, default=0.25)
# # Image indices to use for training and validation
# parser.add_argument('--imagesToUse', nargs='+', type=int, default=list(range(0,5,1)))
# # List of GPUs to use: 0 1 2 for example
# parser.add_argument('--GPUs', nargs='+', type=int, default=None)
# # Batch size
# parser.add_argument('--batchSize', type=int, default=128)
# # Perentage of the data to use for validation, from 0 to 1
# parser.add_argument('--validationSplit', type=float, default=0.1)
# # Bias initialization value
# parser.add_argument('--biasVal', type=float, default=0.1)
# # Learning rate
# parser.add_argument('--learningRate', type=float, default=0.001)
# # Use bias flag
# parser.add_argument('--useBias', type=str2bool, default=True)
# # Use skip connections flag
# parser.add_argument('--useSkipCon', type=str2bool, default=False)
# # User selected random seed
# parser.add_argument('--randomSeed', type=int, default=None)
# # fov of input or neighboarhood around lenslet to reconstruct
# parser.add_argument('--fovInput', type=int, default=9)
# # nT number of lenslets to reconstruct simultaneously use at training time
# parser.add_argument('--neighShape', type=int, default=3)
# # Flag to use shallow or large U-net
# parser.add_argument('--useShallowUnet', type=str2bool, default=True)
# # Lower threshold of GT stacks, to get rid of autofluorescence
# parser.add_argument('--ths', type=float, default=0.03)
# # Path to dataset
# parser.add_argument('--datasetPath', nargs='?', default="BrainLFMConfocalDataset/Brain_40x_64Depths_362imgs.h5")
# # Path to directory where models and tensorboard logs are stored
# parser.add_argument('--outputPath', nargs='?', default="runs/")
# # Prefix for current output folder
# parser.add_argument('--outputPrefix', nargs='?', default="")
# # Path to model in case of continuing a training
# parser.add_argument('--checkpointPath', nargs='?', default=None)
# args = parser.parse_args()
nImgs = len(args.imagesToUse)
# Setup multithreading
num_workers = getThreads()
if num_workers!=0:
torch.set_num_threads(num_workers)
if not torch.cuda.is_available():
print("GPU initialization error")
exit(-1)
if torch.cuda.is_available():
print ("Cuda is available")
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
# Select GPUs to use
args.GPUs = list(range(torch.cuda.device_count())) if args.GPUs is None else args.GPUs
print('Using GPUs: ' + str(args.GPUs))
device_ids = args.GPUs
# Set common random seed
if args.randomSeed is not None:
np.random.seed(args.randomSeed)
torch.manual_seed(args.randomSeed)
# Load checkpoint if provided
if args.checkpointPath is not None:
checkpointPath = args.checkpointPath
checkpoint = torch.load(checkpointPath)
# overwrite args
args = checkpoint['args']
args.checkpointPath = checkpointPath
# set Device to use
device = torch.device("cuda:"+str(device_ids[0]) if torch.cuda.is_available() else "cpu")
# Create unique label
today = datetime.now()
# Get commit number
# label = subprocess.check_output(["git", "describe", "--always"]).strip()
#specific to MBL lab workstation
label = subprocess.check_output(["C:/Program Files/git/bin/git", "describe", "--always"]).strip()
comment = today.strftime('%Y_%m_%d__%H%M%S') + "_"+ str(args.useBias) +"B_"+str(args.biasVal)+"bias_" + str(nImgs) + \
"I_"+ str(args.batchSize)+"BS_"+str(args.useSkipCon)+"Sk_" + str(args.fovInput) + "FOV_" + str(args.neighShape) + "nT_" \
+ str(args.ths) + "ths_" + str(label.decode("utf-8") ) + "_commit__" + args.outputPrefix
# Create output folder
save_folder = args.outputPath + "/" + comment
# If asked to continue a training, save in the same folder
if args.checkpointPath is not None:
save_folder = os.path.split(args.checkpointPath)[0]
print(save_folder)
# Create summary writer to log stuff
writer = SummaryWriter(log_dir=save_folder)
writer.add_text('Description',comment,0)
writer.flush()
# Load dataset
all_data = Dataset(args.datasetPath, args.randomSeed, \
fov=args.fovInput, neighShape=args.neighShape, img_indices=args.imagesToUse, get_full_imgs=False, center_region=None)
# Split validation and testing
train_size = int((1 - args.validationSplit) * len(all_data))
test_size = len(all_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(all_data, [train_size, test_size])
# Create data loaders
train_dataset = data.DataLoader(train_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
test_dataset = data.DataLoader(test_dataset, batch_size=args.batchSize,
shuffle=True, num_workers=num_workers, pin_memory=True)
validate_every = np.round(len(train_dataset)*args.valEvery)
# Get Dataset information
nDepths = all_data.get_n_depths()
volShape, LFshape = all_data.__shape__()
LFshape = LFshape[0:4]
lateralTile = int(math.sqrt(nDepths))
# Find normalization values
maxInputTrain, maxVolumeTrain = all_data.get_max()
maxInputTest, maxVolumeTest = all_data.get_max()
# Create network
net = LFMNet(nDepths, args.useBias, args.useSkipCon, LFshape, LFfov=args.fovInput, use_small_unet=args.useShallowUnet).to(device)
optimizer = optim.Adam(net.parameters(), lr=args.learningRate)
lossFunction = nn.L1Loss()
# Create SSIM criteria
ssim = SSIM()
ssim.eval()
# Init bias and weights if needed
if args.useBias:
def bias_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
if m.bias is not None:
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
nn.init.constant_(m.bias.data, args.biasVal)
nn.init.kaiming_normal_(m.weight)
net.apply(bias_init)
# Load network from checkpoint
if args.checkpointPath is not None:
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epochStart = checkpoint['epoch']
epochs = args.epochs + epochStart
train_loss = checkpoint['loss']
# Start distributed data parallel, as it's faster than DataParallel
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '1234'+str(device_ids[0])
torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)
# Move network to distributed data parallel
net = nn.parallel.DistributedDataParallel(net, device_ids=args.GPUs, output_device=args.GPUs[0]).to(device)
# timers
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
global_it_counter = 0
# define indices to grab for tensorboard visualization
indices_to_show = torch.randperm(test_size)[0:8]
# Init arrays to store losses
train_losses, test_losses = [], []
test_loss = 0
epochStart = 0
# Start training
for epoch in range(epochStart, args.epochs):
net.train()
torch.set_grad_enabled(True)
torch.cuda.empty_cache()
train_loss = 0
print('Training')
global_it_counter = 0
for nBatch,(inputs,labels) in enumerate(train_dataset):
# compute current iteration
curr_it = epoch*len(train_dataset) + nBatch
# start timer
start.record()
print('ep: ' + str(epoch) + ' ' + str(nBatch+1) + '/' + str(len(train_dataset)) + ' currIt: ' + str(curr_it))
optimizer.zero_grad()
# load data to gpu and normalize from 0 to 1
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
if args.ths!=0:
outputsGT = imadjust(outputsGT, args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
# Predict
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
loss.backward()
train_loss += loss.item() / nDepths
optimizer.step()
global_it_counter += inputs.shape[0]
# Record training time
end.record()
torch.cuda.synchronize()
end_time = start.elapsed_time(end)
# Compute time per sample
elapsed_time = end_time/inputs.shape[0]
# Check if validation is required
if nBatch%validate_every==0:
print(comment)
# Write training images to tensorboard
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# Select some images in the batch for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:4]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
gt = outputsGT[0,:,:,:,:].sum(3).repeat(3,1,1)
gt /= gt.max()
# Write to tensorboard
writer.add_image('z_proj_train',gt,curr_it)
writer.add_image('images_train_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_train', gridPred, curr_it)
writer.add_image('outputRGB_train_GT', gridGT, curr_it)
writer.add_image('input_train', gridInput, curr_it)
writer.add_scalar('Loss/train', train_loss/global_it_counter, curr_it)
writer.add_scalar('times/train', elapsed_time, curr_it)
# Restart
train_loss = 0.0
global_it_counter = 0
print('Validating')
net.eval()
with torch.no_grad():
avg_psnr = 0
avg_ssim = 0
test_loss = 0
start.record()
for nBatch,(inputs,labels) in enumerate(test_dataset):
inputGPU = inputs.float().to(device) / maxInputTest
outputsGT = labels.float().to(device) / maxVolumeTrain
# Threshold GT to get rid of autofluorescence
outputsGT = imadjust(outputsGT,args.ths,outputsGT.max(), outputsGT.min(), outputsGT.max())
outputsVol = net(inputGPU)
loss = lossFunction(outputsGT,outputsVol)
test_loss += loss.item() / nDepths
# Compute PSNR
lossMSE = nn.functional.mse_loss(outputsVol.to(device).detach(), outputsGT.to(device).detach())
avg_psnr += 10 * math.log10(1 / lossMSE.item())
# Compute ssim
avg_ssim += ssim(outputsVol[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device), outputsGT[:,0,:,:,:].permute(0,3,1,2).contiguous().detach().to(device)).sum()
end.record()
torch.cuda.synchronize()
lastBatchSize = min(outputsGT.shape[0],4)
gridOut2 = torch.cat((outputsGT[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach(), outputsVol[0:lastBatchSize, :, :, :, :].sum(2).cpu().data.detach()), dim=0)
gridOut2 = tv.utils.make_grid(gridOut2, normalize=True, scale_each=False)
# process some for showing
indices_to_display = torch.randperm(inputGPU.shape[0])[0:lastBatchSize]
outputsGT = F.interpolate(outputsGT[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
outputsVol = F.interpolate(outputsVol[indices_to_display, :, :, :, :],[LFshape[0]*2,LFshape[1]*2,volShape[2]])
inputGPU = inputGPU[indices_to_display,:,:,:,:,:]
currPred = convert3Dto2DTiles(outputsVol, [lateralTile, lateralTile])
currGT = convert3Dto2DTiles(outputsGT, [lateralTile, lateralTile])
inputGrid = LF2Spatial(inputGPU, inputGPU.shape[2:])
gridPred = tv.utils.make_grid(currPred,normalize=True, scale_each=False)
gridGT = tv.utils.make_grid(currGT,normalize=True, scale_each=False)
gridInput = tv.utils.make_grid(inputGrid,normalize=True, scale_each=False)
# Write to tensorboard
writer.add_image('images_val_YZ_projection', gridOut2, curr_it)
writer.add_image('outputRGB_test', gridPred, curr_it)
writer.add_image('outputRGB_test_GT', gridGT, curr_it)
writer.add_image('input_test', gridInput, curr_it)
writer.add_scalar('Loss/test', test_loss/len(test_dataset), curr_it)
writer.add_scalar('Loss/psnr_val', avg_psnr/len(test_dataset), curr_it)
writer.add_scalar('Loss/ssim_val', avg_ssim/len(test_dataset), curr_it)
writer.add_scalar('LearningRate', args.learningRate, curr_it)
writer.add_scalar('times/val', start.elapsed_time(end)/test_size, curr_it)
net.train()
if epoch%2==0:
torch.save({
'epoch': epoch,
'args' : args,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
'dataset_path': args.datasetPath},
save_folder + '/model_'+str(epoch))
print(f"Epoch {epoch + 1}/{args.epochs}.. "
f"Train loss: {train_loss / len(train_dataset):.7f}.. "
f"Test loss: {test_loss / len(test_dataset):.7f}.. ")
if __name__ == '__main__':
main()
|
[
"torch.randperm",
"torch.nn.init.constant_",
"torch.nn.L1Loss",
"math.sqrt",
"torch.cuda.device_count",
"torch.cuda.synchronize",
"torch.cuda.is_available",
"torch.nn.functional.interpolate",
"torchvision.utils.make_grid",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.init.kaiming_normal_",
"torch.set_num_threads",
"os.path.split",
"numpy.random.seed",
"torch.cuda.current_device",
"networks.LFMNet.LFMNet",
"torch.cuda.Event",
"subprocess.check_output",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.random_split",
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.manual_seed",
"torch.load",
"datetime.datetime.now",
"torch.utils.data.DataLoader",
"torch.set_grad_enabled",
"torch.distributed.init_process_group",
"torch.cuda.get_device_properties"
] |
[((3063, 3088), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3086, 3088), False, 'import torch\n'), ((4383, 4397), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4395, 4397), False, 'from datetime import datetime\n'), ((5320, 5354), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'save_folder'}), '(log_dir=save_folder)\n', (5333, 5354), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((5805, 5869), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['all_data', '[train_size, test_size]'], {}), '(all_data, [train_size, test_size])\n', (5834, 5869), False, 'import torch\n'), ((5916, 6033), 'torch.utils.data.DataLoader', 'data.DataLoader', (['train_dataset'], {'batch_size': 'args.batchSize', 'shuffle': '(True)', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(train_dataset, batch_size=args.batchSize, shuffle=True,\n num_workers=num_workers, pin_memory=True)\n', (5931, 6033), False, 'from torch.utils import data\n'), ((6085, 6201), 'torch.utils.data.DataLoader', 'data.DataLoader', (['test_dataset'], {'batch_size': 'args.batchSize', 'shuffle': '(True)', 'num_workers': 'num_workers', 'pin_memory': '(True)'}), '(test_dataset, batch_size=args.batchSize, shuffle=True,\n num_workers=num_workers, pin_memory=True)\n', (6100, 6201), False, 'from torch.utils import data\n'), ((6860, 6871), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (6869, 6871), True, 'import torch.nn as nn\n'), ((8325, 8361), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (8341, 8361), False, 'import torch\n'), ((8372, 8408), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (8388, 8408), False, 'import torch\n'), ((2914, 2948), 'torch.set_num_threads', 'torch.set_num_threads', (['num_workers'], {}), '(num_workers)\n', (2935, 2948), False, 'import torch\n'), ((2961, 2986), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2984, 2986), False, 'import torch\n'), ((3146, 3173), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (3171, 3173), False, 'import torch\n'), ((3199, 3242), 'torch.cuda.get_device_properties', 'torch.cuda.get_device_properties', (['device_id'], {}), '(device_id)\n', (3231, 3242), False, 'import torch\n'), ((3878, 3909), 'numpy.random.seed', 'np.random.seed', (['args.randomSeed'], {}), '(args.randomSeed)\n', (3892, 3909), True, 'import numpy as np\n'), ((3918, 3952), 'torch.manual_seed', 'torch.manual_seed', (['args.randomSeed'], {}), '(args.randomSeed)\n', (3935, 3952), False, 'import torch\n'), ((4094, 4120), 'torch.load', 'torch.load', (['checkpointPath'], {}), '(checkpointPath)\n', (4104, 4120), False, 'import torch\n'), ((6458, 6476), 'math.sqrt', 'math.sqrt', (['nDepths'], {}), '(nDepths)\n', (6467, 6476), False, 'import math\n'), ((7842, 7867), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (7865, 7867), False, 'import torch\n'), ((8054, 8128), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'rank': '(0)', 'world_size': '(1)'}), "(backend='nccl', rank=0, world_size=1)\n", (8090, 8128), False, 'import torch\n'), ((8516, 8541), 'torch.randperm', 'torch.randperm', (['test_size'], {}), '(test_size)\n', (8530, 8541), False, 'import torch\n'), ((8756, 8784), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (8778, 8784), False, 'import torch\n'), ((8793, 8817), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8815, 8817), False, 'import torch\n'), ((4306, 4331), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4329, 4331), False, 'import torch\n'), ((4551, 4636), 'subprocess.check_output', 'subprocess.check_output', (["['C:/Program Files/git/bin/git', 'describe', '--always']"], {}), "(['C:/Program Files/git/bin/git', 'describe',\n '--always'])\n", (4574, 4636), False, 'import subprocess\n'), ((5204, 5238), 'os.path.split', 'os.path.split', (['args.checkpointPath'], {}), '(args.checkpointPath)\n', (5217, 5238), False, 'import os\n'), ((6650, 6766), 'networks.LFMNet.LFMNet', 'LFMNet', (['nDepths', 'args.useBias', 'args.useSkipCon', 'LFshape'], {'LFfov': 'args.fovInput', 'use_small_unet': 'args.useShallowUnet'}), '(nDepths, args.useBias, args.useSkipCon, LFshape, LFfov=args.fovInput,\n use_small_unet=args.useShallowUnet)\n', (6656, 6766), False, 'from networks.LFMNet import LFMNet\n'), ((7900, 7925), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (7923, 7925), False, 'import torch\n'), ((9994, 10018), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10016, 10018), False, 'import torch\n'), ((3670, 3695), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3693, 3695), False, 'import torch\n'), ((7208, 7241), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {}), '(m.weight)\n', (7231, 7241), True, 'import torch.nn as nn\n'), ((7308, 7352), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias.data', 'args.biasVal'], {}), '(m.bias.data, args.biasVal)\n', (7325, 7352), True, 'import torch.nn as nn\n'), ((7369, 7402), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {}), '(m.weight)\n', (7392, 7402), True, 'import torch.nn as nn\n'), ((8196, 8290), 'torch.nn.parallel.DistributedDataParallel', 'nn.parallel.DistributedDataParallel', (['net'], {'device_ids': 'args.GPUs', 'output_device': 'args.GPUs[0]'}), '(net, device_ids=args.GPUs,\n output_device=args.GPUs[0])\n', (8231, 8290), True, 'import torch.nn as nn\n'), ((10596, 10658), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['gridOut2'], {'normalize': '(True)', 'scale_each': '(False)'}), '(gridOut2, normalize=True, scale_each=False)\n', (10614, 10658), True, 'import torchvision as tv\n'), ((10825, 10933), 'torch.nn.functional.interpolate', 'F.interpolate', (['outputsGT[indices_to_display, :, :, :, :]', '[LFshape[0] * 2, LFshape[1] * 2, volShape[2]]'], {}), '(outputsGT[indices_to_display, :, :, :, :], [LFshape[0] * 2, \n LFshape[1] * 2, volShape[2]])\n', (10838, 10933), True, 'import torch.nn.functional as F\n'), ((10951, 11060), 'torch.nn.functional.interpolate', 'F.interpolate', (['outputsVol[indices_to_display, :, :, :, :]', '[LFshape[0] * 2, LFshape[1] * 2, volShape[2]]'], {}), '(outputsVol[indices_to_display, :, :, :, :], [LFshape[0] * 2, \n LFshape[1] * 2, volShape[2]])\n', (10964, 11060), True, 'import torch.nn.functional as F\n'), ((11380, 11442), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['currPred'], {'normalize': '(True)', 'scale_each': '(False)'}), '(currPred, normalize=True, scale_each=False)\n', (11398, 11442), True, 'import torchvision as tv\n'), ((11467, 11527), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['currGT'], {'normalize': '(True)', 'scale_each': '(False)'}), '(currGT, normalize=True, scale_each=False)\n', (11485, 11527), True, 'import torchvision as tv\n'), ((11555, 11618), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['inputGrid'], {'normalize': '(True)', 'scale_each': '(False)'}), '(inputGrid, normalize=True, scale_each=False)\n', (11573, 11618), True, 'import torchvision as tv\n'), ((3397, 3422), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3420, 3422), False, 'import torch\n'), ((7147, 7191), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias.data', 'args.biasVal'], {}), '(m.bias.data, args.biasVal)\n', (7164, 7191), True, 'import torch.nn as nn\n'), ((10758, 10791), 'torch.randperm', 'torch.randperm', (['inputGPU.shape[0]'], {}), '(inputGPU.shape[0])\n', (10772, 10791), False, 'import torch\n'), ((12471, 12486), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12484, 12486), False, 'import torch\n'), ((13730, 13754), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (13752, 13754), False, 'import torch\n'), ((14051, 14113), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['gridOut2'], {'normalize': '(True)', 'scale_each': '(False)'}), '(gridOut2, normalize=True, scale_each=False)\n', (14069, 14113), True, 'import torchvision as tv\n'), ((14285, 14393), 'torch.nn.functional.interpolate', 'F.interpolate', (['outputsGT[indices_to_display, :, :, :, :]', '[LFshape[0] * 2, LFshape[1] * 2, volShape[2]]'], {}), '(outputsGT[indices_to_display, :, :, :, :], [LFshape[0] * 2, \n LFshape[1] * 2, volShape[2]])\n', (14298, 14393), True, 'import torch.nn.functional as F\n'), ((14415, 14524), 'torch.nn.functional.interpolate', 'F.interpolate', (['outputsVol[indices_to_display, :, :, :, :]', '[LFshape[0] * 2, LFshape[1] * 2, volShape[2]]'], {}), '(outputsVol[indices_to_display, :, :, :, :], [LFshape[0] * 2, \n LFshape[1] * 2, volShape[2]])\n', (14428, 14524), True, 'import torch.nn.functional as F\n'), ((14865, 14927), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['currPred'], {'normalize': '(True)', 'scale_each': '(False)'}), '(currPred, normalize=True, scale_each=False)\n', (14883, 14927), True, 'import torchvision as tv\n'), ((14956, 15016), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['currGT'], {'normalize': '(True)', 'scale_each': '(False)'}), '(currGT, normalize=True, scale_each=False)\n', (14974, 15016), True, 'import torchvision as tv\n'), ((15048, 15111), 'torchvision.utils.make_grid', 'tv.utils.make_grid', (['inputGrid'], {'normalize': '(True)', 'scale_each': '(False)'}), '(inputGrid, normalize=True, scale_each=False)\n', (15066, 15111), True, 'import torchvision as tv\n'), ((14202, 14235), 'torch.randperm', 'torch.randperm', (['inputGPU.shape[0]'], {}), '(inputGPU.shape[0])\n', (14216, 14235), False, 'import torch\n')]
|
import pygame
import time
import numpy as np
import sys
gray = (150, 150, 150)
white = (255, 255, 255)
black = (0, 0, 0, )
red_block = (255, 0, 0)
red_border = (76, 0, 19)
block_color = (255, 128, 0)
border_color = (165,42,42)
screen = None
SIDE = 50
BORDER = 5
MARGIN = 5
LINE = 1
h_switch = True
def __draw_horizontal_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_red_block(x,y):
global screen, h_switch
pygame.draw.rect(screen, red_border, pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, red_block, pygame.Rect(MARGIN + y*SIDE + h_switch*BORDER, MARGIN + x*SIDE + BORDER,
SIDE - BORDER, SIDE - 2*BORDER))
h_switch = not h_switch
def __draw_vertical_block(x,y):
global screen
pygame.draw.rect(screen, border_color, pygame.Rect(MARGIN + y*SIDE, MARGIN + x*SIDE, SIDE, 2*SIDE))
pygame.draw.rect(screen, block_color, pygame.Rect(MARGIN + y*SIDE + BORDER, MARGIN + x*SIDE + BORDER,
SIDE - 2*BORDER, 2*SIDE - 2*BORDER))
## Render function for the unblockme_class
def render_unblockme(game_object):
matrix = game_object.internal_state
k, h, _ = game_object.shape
global screen
if screen is None:
pygame.init()
screen = pygame.display.set_mode((2*MARGIN+k*SIDE, 2*MARGIN+h*SIDE))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit(0)
screen.fill(black)
# first we draw the background
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
#draw the exit on the outer border
if selected_block[0] == 0:
if y == 0:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
else:
pygame.draw.rect(screen, white, pygame.Rect(y*SIDE+MARGIN,x*SIDE+MARGIN, SIDE+MARGIN, SIDE))
# Draw the background with the grid pattern
pygame.draw.rect(screen, gray , pygame.Rect(MARGIN + y*SIDE,MARGIN + x*SIDE, SIDE, SIDE))
pygame.draw.rect(screen, white, pygame.Rect(MARGIN + y*SIDE + LINE,MARGIN + x*SIDE + LINE,
SIDE - 2*LINE, SIDE - 2*LINE))
# then we draw the blocks in the grid
for x in range(0,k):
for y in range(0,h):
cell = matrix[x,y,1:]
selected_block = np.where(cell == 1)[0]
if len(selected_block) != 0:
if selected_block[-1] == 1:
__draw_horizontal_block(x,y)
elif selected_block[-1] == 2:
if (x == 0 or not (matrix[x-1,y,1:] == cell).all() ) and \
(x != k-1 and (matrix[x+1,y,1:] == cell).all() ):
__draw_vertical_block(x,y)
elif selected_block[-1] == 0:
__draw_red_block(x,y)
pygame.display.update()
time.sleep(0.1)
if __name__ == "__main__":
from unblockme_class import *
matrix, goal = get_example()
game = unblock_me(matrix, goal)
render_unblockme(game)
|
[
"pygame.init",
"pygame.quit",
"pygame.event.get",
"numpy.where",
"pygame.display.set_mode",
"time.sleep",
"pygame.display.quit",
"sys.exit",
"pygame.display.update",
"pygame.Rect"
] |
[((1755, 1773), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1771, 1773), False, 'import pygame\n'), ((3536, 3559), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3557, 3559), False, 'import pygame\n'), ((3564, 3579), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3574, 3579), False, 'import time\n'), ((412, 473), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE)', '(MARGIN + x * SIDE)', 'SIDE', 'SIDE'], {}), '(MARGIN + y * SIDE, MARGIN + x * SIDE, SIDE, SIDE)\n', (423, 473), False, 'import pygame\n'), ((513, 629), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE + h_switch * BORDER)', '(MARGIN + x * SIDE + BORDER)', '(SIDE - BORDER)', '(SIDE - 2 * BORDER)'], {}), '(MARGIN + y * SIDE + h_switch * BORDER, MARGIN + x * SIDE +\n BORDER, SIDE - BORDER, SIDE - 2 * BORDER)\n', (524, 629), False, 'import pygame\n'), ((800, 861), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE)', '(MARGIN + x * SIDE)', 'SIDE', 'SIDE'], {}), '(MARGIN + y * SIDE, MARGIN + x * SIDE, SIDE, SIDE)\n', (811, 861), False, 'import pygame\n'), ((899, 1015), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE + h_switch * BORDER)', '(MARGIN + x * SIDE + BORDER)', '(SIDE - BORDER)', '(SIDE - 2 * BORDER)'], {}), '(MARGIN + y * SIDE + h_switch * BORDER, MARGIN + x * SIDE +\n BORDER, SIDE - BORDER, SIDE - 2 * BORDER)\n', (910, 1015), False, 'import pygame\n'), ((1183, 1248), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE)', '(MARGIN + x * SIDE)', 'SIDE', '(2 * SIDE)'], {}), '(MARGIN + y * SIDE, MARGIN + x * SIDE, SIDE, 2 * SIDE)\n', (1194, 1248), False, 'import pygame\n'), ((1287, 1401), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE + BORDER)', '(MARGIN + x * SIDE + BORDER)', '(SIDE - 2 * BORDER)', '(2 * SIDE - 2 * BORDER)'], {}), '(MARGIN + y * SIDE + BORDER, MARGIN + x * SIDE + BORDER, SIDE - \n 2 * BORDER, 2 * SIDE - 2 * BORDER)\n', (1298, 1401), False, 'import pygame\n'), ((1646, 1659), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1657, 1659), False, 'import pygame\n'), ((1677, 1748), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(2 * MARGIN + k * SIDE, 2 * MARGIN + h * SIDE)'], {}), '((2 * MARGIN + k * SIDE, 2 * MARGIN + h * SIDE))\n', (1700, 1748), False, 'import pygame\n'), ((1825, 1846), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (1844, 1846), False, 'import pygame\n'), ((1859, 1872), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1870, 1872), False, 'import pygame\n'), ((1885, 1896), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1893, 1896), False, 'import sys\n'), ((2073, 2092), 'numpy.where', 'np.where', (['(cell == 1)'], {}), '(cell == 1)\n', (2081, 2092), True, 'import numpy as np\n'), ((2615, 2676), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE)', '(MARGIN + x * SIDE)', 'SIDE', 'SIDE'], {}), '(MARGIN + y * SIDE, MARGIN + x * SIDE, SIDE, SIDE)\n', (2626, 2676), False, 'import pygame\n'), ((2717, 2818), 'pygame.Rect', 'pygame.Rect', (['(MARGIN + y * SIDE + LINE)', '(MARGIN + x * SIDE + LINE)', '(SIDE - 2 * LINE)', '(SIDE - 2 * LINE)'], {}), '(MARGIN + y * SIDE + LINE, MARGIN + x * SIDE + LINE, SIDE - 2 *\n LINE, SIDE - 2 * LINE)\n', (2728, 2818), False, 'import pygame\n'), ((3027, 3046), 'numpy.where', 'np.where', (['(cell == 1)'], {}), '(cell == 1)\n', (3035, 3046), True, 'import numpy as np\n'), ((2318, 2379), 'pygame.Rect', 'pygame.Rect', (['(y * SIDE)', '(x * SIDE + MARGIN)', '(SIDE + MARGIN)', 'SIDE'], {}), '(y * SIDE, x * SIDE + MARGIN, SIDE + MARGIN, SIDE)\n', (2329, 2379), False, 'import pygame\n'), ((2454, 2524), 'pygame.Rect', 'pygame.Rect', (['(y * SIDE + MARGIN)', '(x * SIDE + MARGIN)', '(SIDE + MARGIN)', 'SIDE'], {}), '(y * SIDE + MARGIN, x * SIDE + MARGIN, SIDE + MARGIN, SIDE)\n', (2465, 2524), False, 'import pygame\n')]
|
# -*- coding: utf-8 -*-
"""Test GUI component."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
#from contextlib import contextmanager
from pytest import yield_fixture, fixture, raises
import numpy as np
from numpy.testing import assert_array_equal as ae
from .. import supervisor as _supervisor
from ..supervisor import (Supervisor,
TaskLogger,
ClusterView,
SimilarityView,
ActionCreator,
)
from phy.gui import GUI
from phy.gui.widgets import Barrier
from phy.gui.qt import qInstallMessageHandler
from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready
from phy.utils.context import Context
from phylib.utils import connect, Bunch, emit
def handler(msg_type, msg_log_context, msg_string):
pass
qInstallMessageHandler(handler)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@yield_fixture
def gui(tempdir, qtbot):
# NOTE: mock patch show box exec_
_supervisor._show_box = lambda _: _
gui = GUI(position=(200, 100), size=(500, 500), config_dir=tempdir)
gui.set_default_actions()
gui.show()
qtbot.waitForWindowShown(gui)
yield gui
qtbot.wait(5)
gui.close()
del gui
qtbot.wait(5)
@fixture
def supervisor(qtbot, gui, cluster_ids, cluster_groups, cluster_labels,
similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
s = Supervisor(
spike_clusters,
cluster_groups=cluster_groups,
cluster_labels=cluster_labels,
similarity=similarity,
context=Context(tempdir),
sort=('id', 'desc'),
)
s.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=s.cluster_view)
connect(b('similarity_view'), event='ready', sender=s.similarity_view)
b.wait()
return s
#------------------------------------------------------------------------------
# Test tasks
#------------------------------------------------------------------------------
@fixture
def tl():
class MockClusterView(object):
_selected = [0]
def select(self, cl, callback=None, **kwargs):
self._selected = cl
callback({'selected': cl, 'next': cl[-1] + 1})
def next(self, callback=None):
callback({'selected': [self._selected[-1] + 1], 'next': self._selected[-1] + 2})
def previous(self, callback=None): # pragma: no cover
callback({'selected': [self._selected[-1] - 1], 'next': self._selected[-1]})
class MockSimilarityView(MockClusterView):
pass
class MockSupervisor(object):
def merge(self, cluster_ids, to, callback=None):
callback(Bunch(deleted=cluster_ids, added=[to]))
def split(self, old_cluster_ids, new_cluster_ids, callback=None):
callback(Bunch(deleted=old_cluster_ids, added=new_cluster_ids))
def move(self, which, group, callback=None):
callback(Bunch(metadata_changed=which, metadata_value=group))
def undo(self, callback=None):
callback(Bunch())
def redo(self, callback=None):
callback(Bunch())
out = TaskLogger(MockClusterView(), MockSimilarityView(), MockSupervisor())
return out
def test_task_1(tl):
assert tl.last_state(None) is None
def test_task_2(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.process()
assert tl.last_state() == ([0], 1, None, None)
def test_task_3(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
def test_task_merge(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'merge', [0, 100], 1000)
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
tl.enqueue(tl.supervisor, 'undo')
tl.process()
assert tl.last_state() == ([0], 1, [100], 101)
tl.enqueue(tl.supervisor, 'redo')
tl.process()
assert tl.last_state() == ([1000], 1001, None, None)
def test_task_split(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'split', [0, 100], [1000, 1001])
tl.process()
assert tl.last_state() == ([1000, 1001], 1002, None, None)
def test_task_move_1(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.supervisor, 'move', [0], 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_best(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'best', 'good')
tl.process()
assert tl.last_state() == ([1], 2, None, None)
def test_task_move_similar(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'similar', 'good')
tl.process()
assert tl.last_state() == ([0], 1, [101], 102)
def test_task_move_all(tl):
tl.enqueue(tl.cluster_view, 'select', [0])
tl.enqueue(tl.similarity_view, 'select', [100])
tl.enqueue(tl.supervisor, 'move', 'all', 'good')
tl.process()
assert tl.last_state() == ([1], 2, [101], 102)
#------------------------------------------------------------------------------
# Test cluster and similarity views
#------------------------------------------------------------------------------
@fixture
def data():
_data = [{"id": i,
"n_spikes": 100 - 10 * i,
"group": {2: 'noise', 3: 'noise', 5: 'mua', 8: 'good'}.get(i, None),
"is_masked": i in (2, 3, 5),
} for i in range(10)]
return _data
def test_cluster_view_1(qtbot, gui, data):
cv = ClusterView(gui, data=data)
_wait_until_table_ready(qtbot, cv)
cv.sort_by('n_spikes', 'asc')
cv.select([1])
qtbot.wait(10)
assert cv.state == {'current_sort': ('n_spikes', 'asc'), 'selected': [1]}
cv.set_state({'current_sort': ('id', 'desc'), 'selected': [2]})
assert cv.state == {'current_sort': ('id', 'desc'), 'selected': [2]}
def test_similarity_view_1(qtbot, gui, data):
sv = SimilarityView(gui, data=data)
_wait_until_table_ready(qtbot, sv)
@connect(sender=sv)
def on_request_similar_clusters(sender, cluster_id):
return [{'id': id} for id in (100 + cluster_id, 110 + cluster_id, 102 + cluster_id)]
sv.reset([5])
_assert(sv.get_ids, [105, 115, 107])
def test_cluster_view_extra_columns(qtbot, gui, data):
for cl in data:
cl['my_metrics'] = cl['id'] * 1000
cv = ClusterView(gui, data=data, columns=['id', 'n_spikes', 'my_metrics'])
_wait_until_table_ready(qtbot, cv)
#------------------------------------------------------------------------------
# Test ActionCreator
#------------------------------------------------------------------------------
def test_action_creator_1(qtbot, gui):
ac = ActionCreator()
ac.attach(gui)
gui.show()
#------------------------------------------------------------------------------
# Test GUI component
#------------------------------------------------------------------------------
def _select(supervisor, cluster_ids, similar=None):
supervisor.task_logger.enqueue(supervisor.cluster_view, 'select', cluster_ids)
if similar is not None:
supervisor.task_logger.enqueue(supervisor.similarity_view, 'select', similar)
supervisor.task_logger.process()
supervisor.block()
supervisor.task_logger.show_history()
assert supervisor.task_logger.last_state()[0] == cluster_ids
assert supervisor.task_logger.last_state()[2] == similar
def _assert_selected(supervisor, sel):
assert supervisor.selected == sel
def test_select(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
def test_supervisor_busy(qtbot, supervisor):
_select(supervisor, [30], [20])
o = object()
emit('is_busy', o, True)
assert supervisor._is_busy
# The action fails while the supervisor is busy.
with raises(RuntimeError):
emit('action', supervisor.action_creator, 'merge')
emit('is_busy', o, False)
assert not supervisor._is_busy
# The action succeeds because the supervisor is no longer busy.
emit('action', supervisor.action_creator, 'merge')
supervisor.block()
assert not supervisor._is_busy
def test_supervisor_cluster_metrics(
qtbot, gui, cluster_ids, cluster_groups, similarity, tempdir):
spike_clusters = np.repeat(cluster_ids, 2)
def my_metrics(cluster_id):
return cluster_id ** 2
cluster_metrics = {'my_metrics': my_metrics}
mc = Supervisor(spike_clusters,
cluster_groups=cluster_groups,
cluster_metrics=cluster_metrics,
similarity=similarity,
context=Context(tempdir),
)
mc.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=mc.cluster_view)
connect(b('similarity_view'), event='ready', sender=mc.similarity_view)
b.wait()
assert 'my_metrics' in mc.columns
def test_supervisor_select_1(qtbot, supervisor):
# WARNING: always use actions in tests, because this doesn't call
# the supervisor method directly, but raises an event, enqueue the task,
# and call TaskLogger.process() which handles the cascade of callbacks.
supervisor.select_actions.select([0])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.task_logger.show_history()
def test_supervisor_color(qtbot, supervisor):
supervisor.view_actions.colormap_linear()
supervisor.view_actions.color_field_n_spikes()
supervisor.view_actions.toggle_categorical_colormap(False)
supervisor.view_actions.toggle_logarithmic_colormap(True)
def test_supervisor_select_2(qtbot, supervisor):
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [30])
def test_supervisor_select_order(qtbot, supervisor):
_select(supervisor, [1, 0])
_assert_selected(supervisor, [1, 0])
_select(supervisor, [0, 1])
_assert_selected(supervisor, [0, 1])
def test_supervisor_edge_cases(supervisor):
# Empty selection at first.
ae(supervisor.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])
_select(supervisor, [0])
supervisor.undo()
supervisor.block()
supervisor.redo()
supervisor.block()
# Merge.
supervisor.merge()
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([])
supervisor.block()
_assert_selected(supervisor, [0])
supervisor.merge([10])
supervisor.block()
_assert_selected(supervisor, [0])
# Split.
supervisor.split([])
supervisor.block()
_assert_selected(supervisor, [0])
# Move.
supervisor.move('ignored', [])
supervisor.block()
supervisor.save()
def test_supervisor_save(qtbot, gui, supervisor):
emit('request_save', gui)
def test_supervisor_skip(qtbot, gui, supervisor):
# yield [0, 1, 2, 10, 11, 20, 30]
# # i, g, N, i, g, N, N
expected = [30, 20, 11, 2, 1]
for clu in expected:
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [clu])
def test_supervisor_sort(qtbot, supervisor):
supervisor.sort('id', 'desc')
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
supervisor.select_actions.sort_by_n_spikes()
qtbot.wait(50)
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
def test_supervisor_filter(qtbot, supervisor):
supervisor.filter('5 <= id && id <= 20')
qtbot.wait(50)
_cl = []
supervisor.cluster_view.get_ids(lambda cluster_ids: _cl.extend(cluster_ids))
qtbot.wait(50)
assert _cl == [20, 11, 10]
def test_supervisor_merge_1(qtbot, supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.actions.redo()
supervisor.block()
supervisor.task_logger.show_history()
_assert_selected(supervisor, [31])
assert supervisor.is_dirty()
def test_supervisor_merge_event(qtbot, supervisor):
_select(supervisor, [30], [20])
_l = []
@connect(sender=supervisor)
def on_select(sender, cluster_ids):
_l.append(cluster_ids)
supervisor.actions.merge()
supervisor.block()
# After a merge, there should be only one select event.
assert len(_l) == 1
def test_supervisor_merge_move(qtbot, supervisor):
"""Check that merge then move selects the next cluster in the original
cluster view, not the updated cluster view."""
_select(supervisor, [20, 11], [])
_assert_selected(supervisor, [20, 11])
supervisor.actions.merge()
supervisor.block()
_assert_selected(supervisor, [31])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move('good', 'all')
supervisor.block()
_assert_selected(supervisor, [2])
def test_supervisor_split_0(qtbot, supervisor):
_select(supervisor, [1, 2])
_assert_selected(supervisor, [1, 2])
supervisor.actions.split([1, 2])
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [1, 2])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_1(supervisor):
supervisor.select_actions.select([1, 2])
supervisor.block()
@connect(sender=supervisor)
def on_request_split(sender):
return [1, 2]
supervisor.actions.split()
supervisor.block()
_assert_selected(supervisor, [31, 32, 33])
def test_supervisor_split_2(gui, similarity):
spike_clusters = np.array([0, 0, 1])
supervisor = Supervisor(spike_clusters,
similarity=similarity,
)
supervisor.attach(gui)
b = Barrier()
connect(b('cluster_view'), event='ready', sender=supervisor.cluster_view)
connect(b('similarity_view'), event='ready', sender=supervisor.similarity_view)
b.wait()
supervisor.actions.split([0])
supervisor.block()
_assert_selected(supervisor, [2, 3])
def test_supervisor_state(tempdir, qtbot, gui, supervisor):
supervisor.select(1)
cv = supervisor.cluster_view
assert supervisor.state.cluster_view.current_sort == ('id', 'desc')
assert supervisor.state.cluster_view.selected == [1]
cv.sort_by('id')
assert supervisor.state.cluster_view.current_sort == ('id', 'asc')
cv.set_state({'current_sort': ('n_spikes', 'desc')})
assert supervisor.state.cluster_view.current_sort == ('n_spikes', 'desc')
cv.sort_by('id', 'desc')
assert supervisor.all_cluster_ids == [30, 20, 11, 10, 2, 1, 0]
def test_supervisor_label(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
supervisor.label("my_field", 1.23, cluster_ids=30)
supervisor.block()
assert 'my_field' in supervisor.fields
assert supervisor.get_labels('my_field')[20] == 3.14
assert supervisor.get_labels('my_field')[30] == 1.23
def test_supervisor_label_cluster_1(supervisor):
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Same value for the old clusters.
l = supervisor.get_labels('my_field')
assert l[20] == l[30] == 3.14
up = supervisor.merge()
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_2(supervisor):
_select(supervisor, [20])
supervisor.label("my_field", 3.14)
supervisor.block()
# One of the parents.
l = supervisor.get_labels('my_field')
assert l[20] == 3.14
assert l[30] is None
up = supervisor.merge([20, 30])
supervisor.block()
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_label_cluster_3(supervisor):
# Conflict: largest cluster wins.
_select(supervisor, [20, 30])
supervisor.label("my_field", 3.14)
supervisor.block()
# Create merged cluster from 20 and 30.
up = supervisor.merge()
new = up.added[0]
supervisor.block()
# It fot the label of its parents.
assert supervisor.get_labels('my_field')[new] == 3.14
# Now, we label a smaller cluster.
supervisor.label("my_field", 2.718, cluster_ids=[10])
# We merge the large and small cluster together.
up = supervisor.merge(up.added + [10])
supervisor.block()
# The new cluster should have the value of the first, merged big cluster, i.e. 3.14.
assert supervisor.get_labels('my_field')[up.added[0]] == 3.14
def test_supervisor_move_1(supervisor):
_select(supervisor, [20])
_assert_selected(supervisor, [20])
assert not supervisor.move('', '')
supervisor.actions.move('noise', 'all')
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [11])
def test_supervisor_move_2(supervisor):
_select(supervisor, [20], [10])
_assert_selected(supervisor, [20, 10])
supervisor.actions.move('noise', 10)
supervisor.block()
_assert_selected(supervisor, [20, 2])
supervisor.actions.undo()
supervisor.block()
_assert_selected(supervisor, [20, 10])
supervisor.actions.redo()
supervisor.block()
_assert_selected(supervisor, [20, 2])
def test_supervisor_move_3(qtbot, supervisor):
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.actions.move_best_to_noise()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.actions.move_best_to_mua()
supervisor.block()
_assert_selected(supervisor, [11])
supervisor.actions.move_best_to_good()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'mua'
supervisor.cluster_meta.get('group', 11) == 'good'
def test_supervisor_move_4(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_similar_to_noise()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.actions.move_similar_to_mua()
supervisor.block()
_assert_selected(supervisor, [30, 2])
supervisor.actions.move_similar_to_good()
supervisor.block()
_assert_selected(supervisor, [30, 1])
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
def test_supervisor_move_5(supervisor):
_select(supervisor, [30], [20])
_assert_selected(supervisor, [30, 20])
supervisor.actions.move_all_to_noise()
supervisor.block()
_assert_selected(supervisor, [11, 2])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [11, 1])
supervisor.actions.move_all_to_mua()
supervisor.block()
_assert_selected(supervisor, [2])
supervisor.actions.move_all_to_good()
supervisor.block()
_assert_selected(supervisor, [])
supervisor.cluster_meta.get('group', 30) == 'noise'
supervisor.cluster_meta.get('group', 20) == 'noise'
supervisor.cluster_meta.get('group', 11) == 'mua'
supervisor.cluster_meta.get('group', 10) == 'mua'
supervisor.cluster_meta.get('group', 2) == 'good'
supervisor.cluster_meta.get('group', 1) == 'good'
def test_supervisor_reset(qtbot, supervisor):
supervisor.select_actions.select([10, 11])
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 20])
supervisor.select_actions.next()
supervisor.block()
_assert_selected(supervisor, [30, 11])
supervisor.select_actions.previous()
supervisor.block()
_assert_selected(supervisor, [30, 20])
def test_supervisor_nav(qtbot, supervisor):
supervisor.select_actions.reset_wizard()
supervisor.block()
_assert_selected(supervisor, [30])
supervisor.select_actions.next_best()
supervisor.block()
_assert_selected(supervisor, [20])
supervisor.select_actions.previous_best()
supervisor.block()
_assert_selected(supervisor, [30])
|
[
"phylib.utils.emit",
"phy.gui.tests.test_widgets._assert",
"phylib.utils.Bunch",
"phy.gui.qt.qInstallMessageHandler",
"numpy.repeat",
"phy.utils.context.Context",
"phy.gui.tests.test_widgets._wait_until_table_ready",
"phy.gui.GUI",
"numpy.array",
"pytest.raises",
"phylib.utils.connect",
"phy.gui.widgets.Barrier",
"numpy.testing.assert_array_equal"
] |
[((978, 1009), 'phy.gui.qt.qInstallMessageHandler', 'qInstallMessageHandler', (['handler'], {}), '(handler)\n', (1000, 1009), False, 'from phy.gui.qt import qInstallMessageHandler\n'), ((1313, 1374), 'phy.gui.GUI', 'GUI', ([], {'position': '(200, 100)', 'size': '(500, 500)', 'config_dir': 'tempdir'}), '(position=(200, 100), size=(500, 500), config_dir=tempdir)\n', (1316, 1374), False, 'from phy.gui import GUI\n'), ((1673, 1698), 'numpy.repeat', 'np.repeat', (['cluster_ids', '(2)'], {}), '(cluster_ids, 2)\n', (1682, 1698), True, 'import numpy as np\n'), ((1948, 1957), 'phy.gui.widgets.Barrier', 'Barrier', ([], {}), '()\n', (1955, 1957), False, 'from phy.gui.widgets import Barrier\n'), ((6187, 6221), 'phy.gui.tests.test_widgets._wait_until_table_ready', '_wait_until_table_ready', (['qtbot', 'cv'], {}), '(qtbot, cv)\n', (6210, 6221), False, 'from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready\n'), ((6607, 6641), 'phy.gui.tests.test_widgets._wait_until_table_ready', '_wait_until_table_ready', (['qtbot', 'sv'], {}), '(qtbot, sv)\n', (6630, 6641), False, 'from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready\n'), ((6648, 6666), 'phylib.utils.connect', 'connect', ([], {'sender': 'sv'}), '(sender=sv)\n', (6655, 6666), False, 'from phylib.utils import connect, Bunch, emit\n'), ((6840, 6876), 'phy.gui.tests.test_widgets._assert', '_assert', (['sv.get_ids', '[105, 115, 107]'], {}), '(sv.get_ids, [105, 115, 107])\n', (6847, 6876), False, 'from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready\n'), ((7082, 7116), 'phy.gui.tests.test_widgets._wait_until_table_ready', '_wait_until_table_ready', (['qtbot', 'cv'], {}), '(qtbot, cv)\n', (7105, 7116), False, 'from phy.gui.tests.test_widgets import _assert, _wait_until_table_ready\n'), ((8363, 8387), 'phylib.utils.emit', 'emit', (['"""is_busy"""', 'o', '(True)'], {}), "('is_busy', o, True)\n", (8367, 8387), False, 'from phylib.utils import connect, Bunch, emit\n'), ((8568, 8593), 'phylib.utils.emit', 'emit', (['"""is_busy"""', 'o', '(False)'], {}), "('is_busy', o, False)\n", (8572, 8593), False, 'from phylib.utils import connect, Bunch, emit\n'), ((8702, 8752), 'phylib.utils.emit', 'emit', (['"""action"""', 'supervisor.action_creator', '"""merge"""'], {}), "('action', supervisor.action_creator, 'merge')\n", (8706, 8752), False, 'from phylib.utils import connect, Bunch, emit\n'), ((8942, 8967), 'numpy.repeat', 'np.repeat', (['cluster_ids', '(2)'], {}), '(cluster_ids, 2)\n', (8951, 8967), True, 'import numpy as np\n'), ((9361, 9370), 'phy.gui.widgets.Barrier', 'Barrier', ([], {}), '()\n', (9368, 9370), False, 'from phy.gui.widgets import Barrier\n'), ((10697, 10761), 'numpy.testing.assert_array_equal', 'ae', (['supervisor.clustering.cluster_ids', '[0, 1, 2, 10, 11, 20, 30]'], {}), '(supervisor.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])\n', (10699, 10761), True, 'from numpy.testing import assert_array_equal as ae\n'), ((11408, 11433), 'phylib.utils.emit', 'emit', (['"""request_save"""', 'gui'], {}), "('request_save', gui)\n", (11412, 11433), False, 'from phylib.utils import connect, Bunch, emit\n'), ((12914, 12940), 'phylib.utils.connect', 'connect', ([], {'sender': 'supervisor'}), '(sender=supervisor)\n', (12921, 12940), False, 'from phylib.utils import connect, Bunch, emit\n'), ((14265, 14291), 'phylib.utils.connect', 'connect', ([], {'sender': 'supervisor'}), '(sender=supervisor)\n', (14272, 14291), False, 'from phylib.utils import connect, Bunch, emit\n'), ((14519, 14538), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (14527, 14538), True, 'import numpy as np\n'), ((14701, 14710), 'phy.gui.widgets.Barrier', 'Barrier', ([], {}), '()\n', (14708, 14710), False, 'from phy.gui.widgets import Barrier\n'), ((8482, 8502), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (8488, 8502), False, 'from pytest import yield_fixture, fixture, raises\n'), ((8512, 8562), 'phylib.utils.emit', 'emit', (['"""action"""', 'supervisor.action_creator', '"""merge"""'], {}), "('action', supervisor.action_creator, 'merge')\n", (8516, 8562), False, 'from phylib.utils import connect, Bunch, emit\n'), ((1869, 1885), 'phy.utils.context.Context', 'Context', (['tempdir'], {}), '(tempdir)\n', (1876, 1885), False, 'from phy.utils.context import Context\n'), ((9294, 9310), 'phy.utils.context.Context', 'Context', (['tempdir'], {}), '(tempdir)\n', (9301, 9310), False, 'from phy.utils.context import Context\n'), ((2989, 3027), 'phylib.utils.Bunch', 'Bunch', ([], {'deleted': 'cluster_ids', 'added': '[to]'}), '(deleted=cluster_ids, added=[to])\n', (2994, 3027), False, 'from phylib.utils import connect, Bunch, emit\n'), ((3125, 3178), 'phylib.utils.Bunch', 'Bunch', ([], {'deleted': 'old_cluster_ids', 'added': 'new_cluster_ids'}), '(deleted=old_cluster_ids, added=new_cluster_ids)\n', (3130, 3178), False, 'from phylib.utils import connect, Bunch, emit\n'), ((3255, 3306), 'phylib.utils.Bunch', 'Bunch', ([], {'metadata_changed': 'which', 'metadata_value': 'group'}), '(metadata_changed=which, metadata_value=group)\n', (3260, 3306), False, 'from phylib.utils import connect, Bunch, emit\n'), ((3369, 3376), 'phylib.utils.Bunch', 'Bunch', ([], {}), '()\n', (3374, 3376), False, 'from phylib.utils import connect, Bunch, emit\n'), ((3439, 3446), 'phylib.utils.Bunch', 'Bunch', ([], {}), '()\n', (3444, 3446), False, 'from phylib.utils import connect, Bunch, emit\n')]
|
# The MIT License (MIT)
# Copyright (c) 2021 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import xarray as xr
def is_empty_cube(cube: xr.Dataset) -> bool:
return len(cube.data_vars) == 0
def strip_cube(cube: xr.Dataset) -> xr.Dataset:
drop_vars = [k for k, v in cube.data_vars.items()
if len(v.shape) < 3
or np.product(v.shape) == 0
or v.shape[-2] < 2
or v.shape[-1] < 2]
if drop_vars:
return cube.drop_vars(drop_vars)
return cube
|
[
"numpy.product"
] |
[((1433, 1452), 'numpy.product', 'np.product', (['v.shape'], {}), '(v.shape)\n', (1443, 1452), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.