code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import tensorflow as tf
import numpy as np
def img2mse(x, y):
return tf.reduce_mean(tf.square(x - y))
def mse2psnr(x):
return -10.*tf.math.log(x)/tf.math.log(10.)
def variance_weighted_loss(tof, gt, c=1.):
tof = outputs['tof_map']
tof_std = tof[..., -1:]
tof = tof[..., :2]
gt = gt[..., :2]
mse = tf.reduce_mean(tf.square(tof - gt) / (2 * tf.square(tof_std)))
return (mse + c * tf.reduce_mean(tf.math.log(tof_std)))
def tof_loss_variance(target_tof, outputs, tof_weight):
img_loss = variance_weighted_loss(outputs['tof_map'], target_tof) * tof_weight
img_loss0 = 0.0
if 'tof_map0' in outputs:
img_loss0 = variance_weighted_loss(outputs['tof_map0'], target_tof) * tof_weight
return img_loss, img_loss0
def tof_loss_default(target_tof, outputs, tof_weight):
img_loss = img2mse(outputs['tof_map'][..., :2], target_tof[..., :2]) * tof_weight
img_loss0 = 0.0
if 'tof_map0' in outputs:
img_loss0 = img2mse(outputs['tof_map0'][..., :2], target_tof[..., :2]) * tof_weight
return img_loss, img_loss0
def color_loss_default(target_color, outputs, color_weight):
img_loss = img2mse(outputs['color_map'], target_color) * color_weight
img_loss0 = 0.0
if 'color_map0' in outputs:
img_loss0 = img2mse(outputs['color_map0'], target_color) * color_weight
return img_loss, img_loss0
def disparity_loss_default(target_depth, outputs, disp_weight, near, far):
target_disp = 1. / np.clip(target_depth, near, far)
target
img_loss = img2mse(outputs['disp_map'], target_disp) * disp_weight
img_loss0 = 0.0
if 'disp_map0' in outputs:
img_loss0 = img2mse(outputs['disp_map0'], target_disp) * disp_weight
return img_loss, img_loss0
def depth_loss_default(target_depth, outputs, depth_weight):
img_loss = img2mse(outputs['depth_map'], target_depth) * depth_weight
img_loss0 = 0.0
if 'depth_map0' in outputs:
img_loss0 = img2mse(outputs['depth_map0'], target_depth) * depth_weight
return img_loss, img_loss0
def empty_space_loss(outputs):
loss = tf.reduce_mean(tf.abs(outputs['acc_map']))
if 'acc_map0' in outputs:
loss += tf.reduce_mean(tf.abs(outputs['acc_map0']))
return loss
def make_pose_loss(model, key):
def loss_fn(_):
return tf.reduce_mean(tf.square(
tf.abs(model.poses[key][1:] - model.poses[key][:-1])
))
return loss_fn
|
[
"tensorflow.math.log",
"tensorflow.abs",
"numpy.clip",
"tensorflow.square"
] |
[((89, 105), 'tensorflow.square', 'tf.square', (['(x - y)'], {}), '(x - y)\n', (98, 105), True, 'import tensorflow as tf\n'), ((156, 173), 'tensorflow.math.log', 'tf.math.log', (['(10.0)'], {}), '(10.0)\n', (167, 173), True, 'import tensorflow as tf\n'), ((1490, 1522), 'numpy.clip', 'np.clip', (['target_depth', 'near', 'far'], {}), '(target_depth, near, far)\n', (1497, 1522), True, 'import numpy as np\n'), ((2134, 2160), 'tensorflow.abs', 'tf.abs', (["outputs['acc_map']"], {}), "(outputs['acc_map'])\n", (2140, 2160), True, 'import tensorflow as tf\n'), ((141, 155), 'tensorflow.math.log', 'tf.math.log', (['x'], {}), '(x)\n', (152, 155), True, 'import tensorflow as tf\n'), ((345, 364), 'tensorflow.square', 'tf.square', (['(tof - gt)'], {}), '(tof - gt)\n', (354, 364), True, 'import tensorflow as tf\n'), ((2224, 2251), 'tensorflow.abs', 'tf.abs', (["outputs['acc_map0']"], {}), "(outputs['acc_map0'])\n", (2230, 2251), True, 'import tensorflow as tf\n'), ((372, 390), 'tensorflow.square', 'tf.square', (['tof_std'], {}), '(tof_std)\n', (381, 390), True, 'import tensorflow as tf\n'), ((430, 450), 'tensorflow.math.log', 'tf.math.log', (['tof_std'], {}), '(tof_std)\n', (441, 450), True, 'import tensorflow as tf\n'), ((2376, 2428), 'tensorflow.abs', 'tf.abs', (['(model.poses[key][1:] - model.poses[key][:-1])'], {}), '(model.poses[key][1:] - model.poses[key][:-1])\n', (2382, 2428), True, 'import tensorflow as tf\n')]
|
'''
Title: Time Series Deconfounder: Estimating Treatment Effects over Time in the Presence of Hidden Confounders
Authors: <NAME>, <NAME>, <NAME>
International Conference on Machine Learning (ICML) 2020
Last Updated Date: July 20th 2020
Code Author: <NAME> (<EMAIL>)
'''
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
import numpy as np
import keras
from tqdm import tqdm
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, DropoutWrapper
from tensorflow.python.ops import rnn
from utils.predictive_checks_utils import compute_test_statistic_all_timesteps
from utils.rnn_utils import AutoregressiveLSTMCell, compute_sequence_length
class FactorModel:
def __init__(self, params, hyperparams):
self.num_treatments = params['num_treatments']
self.num_covariates = params['num_covariates']
self.num_confounders = params['num_confounders']
self.max_sequence_length = params['max_sequence_length']
self.num_epochs = params['num_epochs']
self.rnn_hidden_units = hyperparams['rnn_hidden_units']
self.fc_hidden_units = hyperparams['fc_hidden_units']
self.learning_rate = hyperparams['learning_rate']
self.batch_size = hyperparams['batch_size']
self.rnn_keep_prob = hyperparams['rnn_keep_prob']
tf.compat.v1.reset_default_graph()
self.previous_covariates = tf.compat.v1.placeholder(tf.float32, [None, self.max_sequence_length - 1, self.num_covariates])
self.previous_treatments = tf.compat.v1.placeholder(tf.float32, [None, self.max_sequence_length - 1, self.num_treatments])
self.trainable_init_input = tf.compat.v1.get_variable(name='trainable_init_input',
shape=[self.batch_size, 1,
self.num_covariates + self.num_treatments], trainable=True)
self.current_covariates = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_covariates])
self.target_treatments = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_treatments])
def build_confounders(self, trainable_state=True):
previous_covariates_and_treatments = tf.concat([self.previous_covariates, self.previous_treatments],
axis=-1)
self.rnn_input = tf.concat([self.trainable_init_input, previous_covariates_and_treatments], axis=1)
self.sequence_length = compute_sequence_length(self.rnn_input)
rnn_cell = DropoutWrapper(LSTMCell(self.rnn_hidden_units, state_is_tuple=False),
output_keep_prob=self.rnn_keep_prob,
state_keep_prob=self.rnn_keep_prob, variational_recurrent=True,
dtype=tf.float32)
autoregressive_cell = AutoregressiveLSTMCell(rnn_cell, self.num_confounders)
if trainable_state:
init_state = tf.get_variable(name='init_cell',
shape=[self.batch_size, autoregressive_cell.state_size],
trainable=True)
else:
init_state = autoregressive_cell.zero_state(self.batch_size, dtype=tf.float32)
rnn_output, _ = rnn.dynamic_rnn(
autoregressive_cell,
self.rnn_input,
initial_state=init_state,
dtype=tf.float32,
sequence_length=self.sequence_length)
# Flatten to apply same weights to all time steps.
rnn_output = tf.reshape(rnn_output, [-1, self.num_confounders])
hidden_confounders = rnn_output
covariates = tf.reshape(self.current_covariates, [-1, self.num_covariates])
self.multitask_input = tf.concat([covariates, hidden_confounders], axis=-1)
self.hidden_confounders = tf.reshape(hidden_confounders,
[-1, self.max_sequence_length, self.num_confounders])
def build_treatment_assignments(self):
self.treatment_prob_predictions = dict()
for treatment in range(self.num_treatments):
treatment_network_layer = tf.layers.dense(self.multitask_input, self.fc_hidden_units,
name='treatment_network_%s' % str(treatment),
activation=tf.nn.leaky_relu)
treatment_output = tf.layers.dense(treatment_network_layer, 1, activation=tf.nn.sigmoid,
name='treatment_output_%s' % str(treatment))
self.treatment_prob_predictions[treatment] = treatment_output
self.treatment_prob_predictions = tf.concat(list(self.treatment_prob_predictions.values()), axis=-1)
return self.treatment_prob_predictions
def build_network(self):
self.build_confounders()
self.treatment_prob_predictions = self.build_treatment_assignments()
return self.treatment_prob_predictions
def gen_epoch(self, dataset):
dataset_size = dataset['previous_covariates'].shape[0]
num_batches = int(dataset_size / self.batch_size) + 1
for i in range(num_batches):
if (i == num_batches - 1):
batch_samples = range(dataset_size - self.batch_size, dataset_size)
else:
batch_samples = range(i * self.batch_size, (i + 1) * self.batch_size)
batch_previous_covariates = dataset['previous_covariates'][batch_samples, :, :]
batch_previous_treatments = dataset['previous_treatments'][batch_samples, :, :]
batch_current_covariates = dataset['covariates'][batch_samples, :, :]
batch_target_treatments = dataset['treatments'][batch_samples, :, :].astype(np.int32)
yield (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments)
def eval_network(self, dataset):
validation_losses = []
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
validation_loss= self.sess.run([self.loss], feed_dict=feed_dict)
validation_losses.append(validation_loss)
validation_loss = np.mean(np.array(validation_losses))
return validation_loss
def compute_test_statistic(self, num_samples, target_treatments, feed_dict, predicted_mask):
test_statistic = np.zeros(shape=(self.max_sequence_length,))
for sample_idx in range(num_samples):
[treatment_probability] = self.sess.run(
[self.treatment_prob_predictions], feed_dict=feed_dict)
treatment_probability = np.reshape(treatment_probability, newshape=(
self.batch_size, self.max_sequence_length, self.num_treatments))
test_statistic_sequence = compute_test_statistic_all_timesteps(target_treatments,
treatment_probability,
self.max_sequence_length, predicted_mask)
test_statistic += test_statistic_sequence
test_statistic = test_statistic / num_samples
return test_statistic
def eval_predictive_checks(self, dataset):
num_replications = 50
num_samples = 50
p_values_over_time = np.zeros(shape=(self.max_sequence_length,))
steps = 0
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
mask = tf.sign(tf.reduce_max(tf.abs(self.rnn_input), axis=2))
[seq_lenghts, predicted_mask] = self.sess.run([self.sequence_length, mask], feed_dict=feed_dict)
steps = steps + 1
""" Compute test statistics for replicas """
test_statistic_replicas = np.zeros(shape=(num_replications, self.max_sequence_length))
for replication_idx in range(num_replications):
[treatment_replica, treatment_prob_pred] = self.sess.run(
[self.treatment_realizations, self.treatment_prob_predictions], feed_dict=feed_dict)
treatment_replica = np.reshape(treatment_replica, newshape=(
self.batch_size, self.max_sequence_length, self.num_treatments))
test_statistic_replicas[replication_idx] = self.compute_test_statistic(num_samples, treatment_replica,
feed_dict, predicted_mask)
""" Compute test statistic for target """
test_statistic_target = self.compute_test_statistic(num_samples, batch_target_treatments, feed_dict,
predicted_mask)
probability = np.mean(np.less(test_statistic_replicas, test_statistic_target).astype(np.int32), axis=0)
p_values_over_time += probability
p_values_over_time = p_values_over_time / steps
return p_values_over_time
def train(self, dataset_train, dataset_val, verbose=False):
self.treatment_prob_predictions = self.build_network()
self.treatment_realizations = tf.distributions.Bernoulli(probs=self.treatment_prob_predictions).sample()
self.loss = self.compute_loss(self.target_treatments, self.treatment_prob_predictions)
optimizer = self.get_optimizer()
# Setup tensorflow
tf_device = 'gpu'
if tf_device == "cpu":
tf_config = tf.compat.v1.ConfigProto(log_device_placement=False, device_count={'GPU': 0})
else:
tf_config = tf.compat.v1.ConfigProto(log_device_placement=False, device_count={'GPU': 1})
tf_config.gpu_options.allow_growth = True
self.sess = tf.compat.v1.Session(config=tf_config)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(tf.compat.v1.local_variables_initializer())
for epoch in tqdm(range(self.num_epochs)):
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset_train):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
_, training_loss = self.sess.run([optimizer, self.loss], feed_dict=feed_dict)
if (verbose):
logging.info(
"Epoch {} out of {}: Summary| Training loss = {}".format(
(epoch + 1), self.num_epochs, training_loss))
if ((epoch + 1) % 100 == 0):
validation_loss = self.eval_network(dataset_val)
logging.info(
"Epoch {} out of {}: Summary| Validation loss = {}".format(epoch, self.num_epochs, validation_loss))
def build_feed_dictionary(self, batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments):
feed_dict = {self.previous_covariates: batch_previous_covariates,
self.previous_treatments: batch_previous_treatments,
self.current_covariates: batch_current_covariates,
self.target_treatments: batch_target_treatments}
return feed_dict
def compute_loss(self, target_treatments, treatment_predictions):
target_treatments_reshape = tf.reshape(target_treatments, [-1, self.num_treatments])
mask = tf.sign(tf.reduce_max(tf.abs(self.rnn_input), axis=2))
flat_mask = tf.reshape(mask, [-1, 1])
cross_entropy = - tf.reduce_sum((target_treatments_reshape * tf.math.log(
tf.clip_by_value(treatment_predictions, 1e-10, 1.0)) + (1 - target_treatments_reshape) * (tf.math.log(
tf.clip_by_value(1 - treatment_predictions, 1e-10, 1.0)))) * flat_mask, axis=0)
self.mask = mask
cross_entropy /= tf.reduce_sum(tf.cast(self.sequence_length, tf.float32), axis=0)
return tf.reduce_mean(cross_entropy)
def get_optimizer(self):
optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
return optimizer
def compute_hidden_confounders(self, dataset):
dataset_size = dataset['covariates'].shape[0]
hidden_confounders = np.zeros(
shape=(dataset_size, self.max_sequence_length, self.num_confounders))
num_batches = int(dataset_size / self.batch_size) + 1
batch_id = 0
num_samples = 50
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
total_predicted_hidden_confounders = np.zeros(
shape=(self.batch_size, self.max_sequence_length, self.num_confounders))
for sample in range(num_samples):
predicted_hidden_confounders, predicted_treatment_probs = self.sess.run(
[self.hidden_confounders, self.treatment_prob_predictions], feed_dict=feed_dict)
total_predicted_hidden_confounders += predicted_hidden_confounders
total_predicted_hidden_confounders /= num_samples
if (batch_id == num_batches - 1):
batch_samples = range(dataset_size - self.batch_size, dataset_size)
else:
batch_samples = range(batch_id * self.batch_size, (batch_id + 1) * self.batch_size)
batch_id += 1
hidden_confounders[batch_samples] = total_predicted_hidden_confounders
return hidden_confounders
|
[
"tensorflow.clip_by_value",
"tensorflow.distributions.Bernoulli",
"tensorflow.reshape",
"tensorflow.get_variable",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.abs",
"utils.predictive_checks_utils.compute_test_statistic_all_timesteps",
"tensorflow.compat.v1.placeholder",
"tensorflow.concat",
"tensorflow.placeholder",
"tensorflow.compat.v1.Session",
"tensorflow.cast",
"numpy.reshape",
"numpy.less",
"tensorflow.compat.v1.get_variable",
"tensorflow.reduce_mean",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.python.ops.rnn.dynamic_rnn",
"utils.rnn_utils.AutoregressiveLSTMCell",
"logging.basicConfig",
"utils.rnn_utils.compute_sequence_length",
"numpy.zeros",
"tensorflow.compat.v1.local_variables_initializer",
"numpy.array",
"tensorflow.compat.v1.reset_default_graph",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.train.AdamOptimizer",
"logging.getLogger"
] |
[((287, 362), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s:%(message)s', level=logging.INFO)\n", (306, 362), False, 'import logging\n'), ((363, 382), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (380, 382), False, 'import logging\n'), ((1387, 1421), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (1419, 1421), True, 'import tensorflow as tf\n'), ((1457, 1556), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[None, self.max_sequence_length - 1, self.num_covariates]'], {}), '(tf.float32, [None, self.max_sequence_length - 1,\n self.num_covariates])\n', (1481, 1556), True, 'import tensorflow as tf\n'), ((1588, 1687), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32', '[None, self.max_sequence_length - 1, self.num_treatments]'], {}), '(tf.float32, [None, self.max_sequence_length - 1,\n self.num_treatments])\n', (1612, 1687), True, 'import tensorflow as tf\n'), ((1720, 1866), 'tensorflow.compat.v1.get_variable', 'tf.compat.v1.get_variable', ([], {'name': '"""trainable_init_input"""', 'shape': '[self.batch_size, 1, self.num_covariates + self.num_treatments]', 'trainable': '(True)'}), "(name='trainable_init_input', shape=[self.\n batch_size, 1, self.num_covariates + self.num_treatments], trainable=True)\n", (1745, 1866), True, 'import tensorflow as tf\n'), ((2008, 2094), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.max_sequence_length, self.num_covariates]'], {}), '(tf.float32, [None, self.max_sequence_length, self.\n num_covariates])\n', (2022, 2094), True, 'import tensorflow as tf\n'), ((2123, 2209), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.max_sequence_length, self.num_treatments]'], {}), '(tf.float32, [None, self.max_sequence_length, self.\n num_treatments])\n', (2137, 2209), True, 'import tensorflow as tf\n'), ((2306, 2378), 'tensorflow.concat', 'tf.concat', (['[self.previous_covariates, self.previous_treatments]'], {'axis': '(-1)'}), '([self.previous_covariates, self.previous_treatments], axis=-1)\n', (2315, 2378), True, 'import tensorflow as tf\n'), ((2459, 2545), 'tensorflow.concat', 'tf.concat', (['[self.trainable_init_input, previous_covariates_and_treatments]'], {'axis': '(1)'}), '([self.trainable_init_input, previous_covariates_and_treatments],\n axis=1)\n', (2468, 2545), True, 'import tensorflow as tf\n'), ((2573, 2612), 'utils.rnn_utils.compute_sequence_length', 'compute_sequence_length', (['self.rnn_input'], {}), '(self.rnn_input)\n', (2596, 2612), False, 'from utils.rnn_utils import AutoregressiveLSTMCell, compute_sequence_length\n'), ((2955, 3009), 'utils.rnn_utils.AutoregressiveLSTMCell', 'AutoregressiveLSTMCell', (['rnn_cell', 'self.num_confounders'], {}), '(rnn_cell, self.num_confounders)\n', (2977, 3009), False, 'from utils.rnn_utils import AutoregressiveLSTMCell, compute_sequence_length\n'), ((3384, 3523), 'tensorflow.python.ops.rnn.dynamic_rnn', 'rnn.dynamic_rnn', (['autoregressive_cell', 'self.rnn_input'], {'initial_state': 'init_state', 'dtype': 'tf.float32', 'sequence_length': 'self.sequence_length'}), '(autoregressive_cell, self.rnn_input, initial_state=\n init_state, dtype=tf.float32, sequence_length=self.sequence_length)\n', (3399, 3523), False, 'from tensorflow.python.ops import rnn\n'), ((3661, 3711), 'tensorflow.reshape', 'tf.reshape', (['rnn_output', '[-1, self.num_confounders]'], {}), '(rnn_output, [-1, self.num_confounders])\n', (3671, 3711), True, 'import tensorflow as tf\n'), ((3774, 3836), 'tensorflow.reshape', 'tf.reshape', (['self.current_covariates', '[-1, self.num_covariates]'], {}), '(self.current_covariates, [-1, self.num_covariates])\n', (3784, 3836), True, 'import tensorflow as tf\n'), ((3868, 3920), 'tensorflow.concat', 'tf.concat', (['[covariates, hidden_confounders]'], {'axis': '(-1)'}), '([covariates, hidden_confounders], axis=-1)\n', (3877, 3920), True, 'import tensorflow as tf\n'), ((3956, 4045), 'tensorflow.reshape', 'tf.reshape', (['hidden_confounders', '[-1, self.max_sequence_length, self.num_confounders]'], {}), '(hidden_confounders, [-1, self.max_sequence_length, self.\n num_confounders])\n', (3966, 4045), True, 'import tensorflow as tf\n'), ((6847, 6890), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.max_sequence_length,)'}), '(shape=(self.max_sequence_length,))\n', (6855, 6890), True, 'import numpy as np\n'), ((7809, 7852), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.max_sequence_length,)'}), '(shape=(self.max_sequence_length,))\n', (7817, 7852), True, 'import numpy as np\n'), ((10504, 10542), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (10524, 10542), True, 'import tensorflow as tf\n'), ((12285, 12341), 'tensorflow.reshape', 'tf.reshape', (['target_treatments', '[-1, self.num_treatments]'], {}), '(target_treatments, [-1, self.num_treatments])\n', (12295, 12341), True, 'import tensorflow as tf\n'), ((12433, 12458), 'tensorflow.reshape', 'tf.reshape', (['mask', '[-1, 1]'], {}), '(mask, [-1, 1])\n', (12443, 12458), True, 'import tensorflow as tf\n'), ((12881, 12910), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (12895, 12910), True, 'import tensorflow as tf\n'), ((13185, 13263), 'numpy.zeros', 'np.zeros', ([], {'shape': '(dataset_size, self.max_sequence_length, self.num_confounders)'}), '(shape=(dataset_size, self.max_sequence_length, self.num_confounders))\n', (13193, 13263), True, 'import numpy as np\n'), ((2648, 2701), 'tensorflow.contrib.rnn.LSTMCell', 'LSTMCell', (['self.rnn_hidden_units'], {'state_is_tuple': '(False)'}), '(self.rnn_hidden_units, state_is_tuple=False)\n', (2656, 2701), False, 'from tensorflow.contrib.rnn import LSTMCell, DropoutWrapper\n'), ((3064, 3174), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""init_cell"""', 'shape': '[self.batch_size, autoregressive_cell.state_size]', 'trainable': '(True)'}), "(name='init_cell', shape=[self.batch_size,\n autoregressive_cell.state_size], trainable=True)\n", (3079, 3174), True, 'import tensorflow as tf\n'), ((6663, 6690), 'numpy.array', 'np.array', (['validation_losses'], {}), '(validation_losses)\n', (6671, 6690), True, 'import numpy as np\n'), ((7100, 7213), 'numpy.reshape', 'np.reshape', (['treatment_probability'], {'newshape': '(self.batch_size, self.max_sequence_length, self.num_treatments)'}), '(treatment_probability, newshape=(self.batch_size, self.\n max_sequence_length, self.num_treatments))\n', (7110, 7213), True, 'import numpy as np\n'), ((7265, 7389), 'utils.predictive_checks_utils.compute_test_statistic_all_timesteps', 'compute_test_statistic_all_timesteps', (['target_treatments', 'treatment_probability', 'self.max_sequence_length', 'predicted_mask'], {}), '(target_treatments,\n treatment_probability, self.max_sequence_length, predicted_mask)\n', (7301, 7389), False, 'from utils.predictive_checks_utils import compute_test_statistic_all_timesteps\n'), ((8549, 8609), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_replications, self.max_sequence_length)'}), '(shape=(num_replications, self.max_sequence_length))\n', (8557, 8609), True, 'import numpy as np\n'), ((10235, 10312), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'log_device_placement': '(False)', 'device_count': "{'GPU': 0}"}), "(log_device_placement=False, device_count={'GPU': 0})\n", (10259, 10312), True, 'import tensorflow as tf\n'), ((10351, 10428), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'log_device_placement': '(False)', 'device_count': "{'GPU': 1}"}), "(log_device_placement=False, device_count={'GPU': 1})\n", (10375, 10428), True, 'import tensorflow as tf\n'), ((10565, 10608), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (10606, 10608), True, 'import tensorflow as tf\n'), ((10632, 10674), 'tensorflow.compat.v1.local_variables_initializer', 'tf.compat.v1.local_variables_initializer', ([], {}), '()\n', (10672, 10674), True, 'import tensorflow as tf\n'), ((12814, 12855), 'tensorflow.cast', 'tf.cast', (['self.sequence_length', 'tf.float32'], {}), '(self.sequence_length, tf.float32)\n', (12821, 12855), True, 'import tensorflow as tf\n'), ((13802, 13888), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.batch_size, self.max_sequence_length, self.num_confounders)'}), '(shape=(self.batch_size, self.max_sequence_length, self.\n num_confounders))\n', (13810, 13888), True, 'import numpy as np\n'), ((8886, 8995), 'numpy.reshape', 'np.reshape', (['treatment_replica'], {'newshape': '(self.batch_size, self.max_sequence_length, self.num_treatments)'}), '(treatment_replica, newshape=(self.batch_size, self.\n max_sequence_length, self.num_treatments))\n', (8896, 8995), True, 'import numpy as np\n'), ((9914, 9979), 'tensorflow.distributions.Bernoulli', 'tf.distributions.Bernoulli', ([], {'probs': 'self.treatment_prob_predictions'}), '(probs=self.treatment_prob_predictions)\n', (9940, 9979), True, 'import tensorflow as tf\n'), ((12380, 12402), 'tensorflow.abs', 'tf.abs', (['self.rnn_input'], {}), '(self.rnn_input)\n', (12386, 12402), True, 'import tensorflow as tf\n'), ((12961, 13003), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (12983, 13003), True, 'import tensorflow as tf\n'), ((8281, 8303), 'tensorflow.abs', 'tf.abs', (['self.rnn_input'], {}), '(self.rnn_input)\n', (8287, 8303), True, 'import tensorflow as tf\n'), ((9529, 9584), 'numpy.less', 'np.less', (['test_statistic_replicas', 'test_statistic_target'], {}), '(test_statistic_replicas, test_statistic_target)\n', (9536, 9584), True, 'import numpy as np\n'), ((12554, 12605), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['treatment_predictions', '(1e-10)', '(1.0)'], {}), '(treatment_predictions, 1e-10, 1.0)\n', (12570, 12605), True, 'import tensorflow as tf\n'), ((12669, 12724), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1 - treatment_predictions)', '(1e-10)', '(1.0)'], {}), '(1 - treatment_predictions, 1e-10, 1.0)\n', (12685, 12724), True, 'import tensorflow as tf\n')]
|
from collections import namedtuple
from datasets import VUPDataset, NUPDataset, MLMDataset
import numpy as np
from data_utils import read_dataset
from models.VUPScorer import VUPScorer
from models.NUPScorer import NUPScorer
from models.MLMScorer import MLMScorer
import argparse
import json
from tqdm.auto import tqdm
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def calc_minmax(model, X_data):
scores = []
with torch.no_grad():
for x in tqdm(X_data):
score = model.predict(x)
scores.append(score)
score_dict = {}
keys = scores[0].keys()
for k in keys:
arr = []
for score in scores:
arr.append(score[k]) # score of each metric
# min_s = min(arr)
# max_s = max(arr)
min_s = np.quantile(arr, 0.25).item()
max_s = np.quantile(arr, 0.75).item()
score_dict[k] = {
'min': min_s,
'max': max_s
}
return score_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculating min and max of MLM for normalizatiion')
parser.add_argument('--weight-path', type=str, default='./checkpoints', help='Path to directory that stores the weight')
parser.add_argument('--data-path', type=str, required=True, help='Path to the directory of training set')
parser.add_argument('--output-path', type=str, default='mlm_minmax_score.json', help='Output path for the min max values')
args = parser.parse_args()
xdata = read_dataset(args.data_path)
model = MLMScorer.load_from_checkpoint(checkpoint_path=args.weight_path).to(device)
model.eval()
print ('[!] loading model complete')
scores = calc_minmax(model, xdata)
print ('[!] normalizing complete')
with open(args.output_path, 'w') as f:
f.write(json.dumps(scores, indent=4))
f.close()
print ('[!] complete')
|
[
"numpy.quantile",
"argparse.ArgumentParser",
"data_utils.read_dataset",
"models.MLMScorer.MLMScorer.load_from_checkpoint",
"json.dumps",
"tqdm.auto.tqdm",
"torch.cuda.is_available",
"torch.no_grad"
] |
[((1130, 1223), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculating min and max of MLM for normalizatiion"""'}), "(description=\n 'Calculating min and max of MLM for normalizatiion')\n", (1153, 1223), False, 'import argparse\n'), ((1625, 1653), 'data_utils.read_dataset', 'read_dataset', (['args.data_path'], {}), '(args.data_path)\n', (1637, 1653), False, 'from data_utils import read_dataset\n'), ((437, 462), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (460, 462), False, 'import torch\n'), ((534, 549), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (547, 549), False, 'import torch\n'), ((568, 580), 'tqdm.auto.tqdm', 'tqdm', (['X_data'], {}), '(X_data)\n', (572, 580), False, 'from tqdm.auto import tqdm\n'), ((1667, 1731), 'models.MLMScorer.MLMScorer.load_from_checkpoint', 'MLMScorer.load_from_checkpoint', ([], {'checkpoint_path': 'args.weight_path'}), '(checkpoint_path=args.weight_path)\n', (1697, 1731), False, 'from models.MLMScorer import MLMScorer\n'), ((1940, 1968), 'json.dumps', 'json.dumps', (['scores'], {'indent': '(4)'}), '(scores, indent=4)\n', (1950, 1968), False, 'import json\n'), ((900, 922), 'numpy.quantile', 'np.quantile', (['arr', '(0.25)'], {}), '(arr, 0.25)\n', (911, 922), True, 'import numpy as np\n'), ((946, 968), 'numpy.quantile', 'np.quantile', (['arr', '(0.75)'], {}), '(arr, 0.75)\n', (957, 968), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
from __future__ import print_function, division
from glob import glob
import astropy.io.fits as pyfits
import sys, os
from os import path, remove
from astropy import log
from astropy.table import Table
from subprocess import check_call
import argparse
import re
import numpy as np
# from nicer.values import *
# Array of DET_IDs that are used
IDS = np.array(
[
0,
1,
2,
3,
4,
5,
6,
7,
10,
11,
12,
13,
14,
15,
16,
17,
20,
21,
22,
23,
24,
25,
26,
27,
30,
31,
32,
33,
34,
35,
36,
37,
40,
41,
42,
43,
44,
45,
46,
47,
50,
51,
52,
53,
54,
55,
56,
57,
60,
61,
62,
63,
64,
65,
66,
67,
]
)
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(
description="Compute deadtime correction to an EXPOSURE defined by a GTI extension, for a single OBSID."
)
parser.add_argument("obsdir", help="Directory containing the raw data for this OBSID")
parser.add_argument(
"gtifile",
help="FITS file containing a GTI extension to be used. Can be an event file, PHA file or any FITS file with a 'GTI' extension.",
)
parser.add_argument(
"--mask", help="Mask particular FPMs", nargs="+", type=int, default=[]
)
parser.add_argument("--plot", help="Plot deadtime per FPM", action="store_true")
args = parser.parse_args()
# The GTI file is assumed to apply to all FPMs. This is normally the case since the user
# is operating on a merged event file whose GTI is the AND of all the individual MPU GTIs
# then they may make additional GTI selections that are more restrictive than that.
# So, we can go over each MPU file and apply the GTI before counting up the deadtime.
# Get the names of all the individual MPU files
gstr = path.join(args.obsdir, "xti/event_uf/ni*mpu?_uf.evt*")
log.debug("Glob string {}".format(gstr))
ufiles = glob(gstr)
ufiles.sort()
log.info(
"Reading unfiltered events from these files :\n\t{}".format("\n\t".join(ufiles))
)
if len(ufiles) != 7:
log.error("Did not find 7 MPU files!")
fpm_deadtime = np.zeros(len(IDS))
t_mpu = -1
log.info("Mask {}".format(args.mask))
for i, det_id in enumerate(IDS):
if det_id in args.mask:
continue
mpu = det_id // 10
log.debug("{} DET_ID {} MPU {} File {}".format(i, det_id, mpu, ufiles[mpu]))
# Only read the raw MPU file once per MPU since all the FPMs for this MPU are in this file
if mpu != t_mpu:
cmd = "niextract-events {0} eventsout={1} timefile='{2}[GTI]' clobber=yes".format(
ufiles[mpu], "tmp.evt", args.gtifile
)
st = check_call(cmd, shell=True)
if st != 0:
log.error("niextract-events failed!")
t = Table.read("tmp.evt", hdu=1)
t_mpu = mpu
dets = t["DET_ID"]
if not np.any(dets == det_id):
fpm_deadtime[i] = 0.0
else:
fpm_deadtime[i] = (t["DEADTIME"][dets == det_id]).sum()
gtitable = Table.read("{}".format(args.gtifile), hdu="GTI")
exp = (gtitable["STOP"] - gtitable["START"]).sum()
log.debug("exp {}".format(exp))
active = np.where(fpm_deadtime > 0)[0]
if not np.any(fpm_deadtime > 0):
deadtime = 0.0
mindead = 0.0
maxdead = 0.0
stddead = 0.0
else:
deadtime = fpm_deadtime[active].mean()
mindead = fpm_deadtime[active].min()
maxdead = fpm_deadtime[active].max()
stddead = fpm_deadtime[active].std()
if args.plot:
if exp > 0:
plt.plot(IDS, 100 * fpm_deadtime / exp, "s")
plt.xlabel("DET_ID")
plt.ylabel("Deadtime %")
plt.title(t.meta["OBS_ID"])
# plt.savefig("deadtimeplots/{0}_deadtimes.png".format(t.meta["OBS_ID"]))
plt.show()
if exp == 0.0:
percent_frac = 0.0
else:
percent_frac = 100.0 * deadtime / exp
print(
"\nFile {} Exposure {:12.5f}, Mean Deadtime {:12.5f} ({:.3f} %) -> Livetime {:12.5f}".format(
args.gtifile, exp, deadtime, percent_frac, exp - deadtime
)
)
print(
"Deadtime Statistics for {} FPM: Min {:12.5f} Max {:12.5f} Std {:12.5f}".format(
len(active), mindead, maxdead, stddead
)
)
|
[
"matplotlib.pyplot.title",
"astropy.table.Table.read",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.any",
"numpy.where",
"numpy.array",
"astropy.log.error",
"glob.glob",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"subprocess.check_call"
] |
[((372, 611), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 20, 21, 22, 23, 24,\n 25, 26, 27, 30, 31, 32, 33, 34, 35, 36, 37, 40, 41, 42, 43, 44, 45, 46,\n 47, 50, 51, 52, 53, 54, 55, 56, 57, 60, 61, 62, 63, 64, 65, 66, 67]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 20, 21, \n 22, 23, 24, 25, 26, 27, 30, 31, 32, 33, 34, 35, 36, 37, 40, 41, 42, 43,\n 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 56, 57, 60, 61, 62, 63, 64, 65,\n 66, 67])\n', (380, 611), True, 'import numpy as np\n'), ((1119, 1258), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute deadtime correction to an EXPOSURE defined by a GTI extension, for a single OBSID."""'}), "(description=\n 'Compute deadtime correction to an EXPOSURE defined by a GTI extension, for a single OBSID.'\n )\n", (1142, 1258), False, 'import argparse\n'), ((2125, 2179), 'os.path.join', 'path.join', (['args.obsdir', '"""xti/event_uf/ni*mpu?_uf.evt*"""'], {}), "(args.obsdir, 'xti/event_uf/ni*mpu?_uf.evt*')\n", (2134, 2179), False, 'from os import path, remove\n'), ((2230, 2240), 'glob.glob', 'glob', (['gstr'], {}), '(gstr)\n', (2234, 2240), False, 'from glob import glob\n'), ((2377, 2415), 'astropy.log.error', 'log.error', (['"""Did not find 7 MPU files!"""'], {}), "('Did not find 7 MPU files!')\n", (2386, 2415), False, 'from astropy import log\n'), ((3434, 3460), 'numpy.where', 'np.where', (['(fpm_deadtime > 0)'], {}), '(fpm_deadtime > 0)\n', (3442, 3460), True, 'import numpy as np\n'), ((3471, 3495), 'numpy.any', 'np.any', (['(fpm_deadtime > 0)'], {}), '(fpm_deadtime > 0)\n', (3477, 3495), True, 'import numpy as np\n'), ((3829, 3849), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""DET_ID"""'], {}), "('DET_ID')\n", (3839, 3849), True, 'import matplotlib.pyplot as plt\n'), ((3854, 3878), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Deadtime %"""'], {}), "('Deadtime %')\n", (3864, 3878), True, 'import matplotlib.pyplot as plt\n'), ((3883, 3910), 'matplotlib.pyplot.title', 'plt.title', (["t.meta['OBS_ID']"], {}), "(t.meta['OBS_ID'])\n", (3892, 3910), True, 'import matplotlib.pyplot as plt\n'), ((3993, 4003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4001, 4003), True, 'import matplotlib.pyplot as plt\n'), ((2960, 2987), 'subprocess.check_call', 'check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (2970, 2987), False, 'from subprocess import check_call\n'), ((3070, 3098), 'astropy.table.Table.read', 'Table.read', (['"""tmp.evt"""'], {'hdu': '(1)'}), "('tmp.evt', hdu=1)\n", (3080, 3098), False, 'from astropy.table import Table\n'), ((3153, 3175), 'numpy.any', 'np.any', (['(dets == det_id)'], {}), '(dets == det_id)\n', (3159, 3175), True, 'import numpy as np\n'), ((3780, 3824), 'matplotlib.pyplot.plot', 'plt.plot', (['IDS', '(100 * fpm_deadtime / exp)', '"""s"""'], {}), "(IDS, 100 * fpm_deadtime / exp, 's')\n", (3788, 3824), True, 'import matplotlib.pyplot as plt\n'), ((3020, 3057), 'astropy.log.error', 'log.error', (['"""niextract-events failed!"""'], {}), "('niextract-events failed!')\n", (3029, 3057), False, 'from astropy import log\n')]
|
import tensorflow as tf
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageColor
import cv2
import time
from styx_msgs.msg import TrafficLight
class TLClassifier(object):
def __init__(self):
self.current_light = TrafficLight.UNKNOWN
SSD_GRAPH_FILE = './frozen_inference_graph.pb'
self.detection_graph = self.load_graph(SSD_GRAPH_FILE)
# The input placeholder for the image.
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
def load_graph(self,graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
with tf.Session(graph=self.detection_graph) as sess:
# Actual detection.
(boxes, scores, classes) = sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
# confidence_cutoff = 0.8
# # Filter boxes with a confidence score less than `confidence_cutoff`
# boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes)
min_score_thresh = .5
count = 0
count1 = 0
# print(scores)
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_thresh:
count1 += 1
class_name = self.category_index[classes[i]]['name']
# Traffic light thing
if class_name == 'Red':
count += 1
# print(count)
if count < count1 - count:
self.current_light = TrafficLight.GREEN
else:
self.current_light = TrafficLight.RED
return self.current_light
|
[
"numpy.asarray",
"tensorflow.Session",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.GraphDef"
] |
[((1342, 1352), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1350, 1352), True, 'import tensorflow as tf\n'), ((1413, 1426), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1424, 1426), True, 'import tensorflow as tf\n'), ((2063, 2096), 'numpy.asarray', 'np.asarray', (['image'], {'dtype': 'np.uint8'}), '(image, dtype=np.uint8)\n', (2073, 2096), True, 'import numpy as np\n'), ((2114, 2152), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (2124, 2152), True, 'import tensorflow as tf\n'), ((2484, 2501), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (2494, 2501), True, 'import numpy as np\n'), ((2523, 2541), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (2533, 2541), True, 'import numpy as np\n'), ((2564, 2583), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (2574, 2583), True, 'import numpy as np\n'), ((1444, 1476), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['graph_file', '"""rb"""'], {}), "(graph_file, 'rb')\n", (1458, 1476), True, 'import tensorflow as tf\n'), ((1610, 1652), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1629, 1652), True, 'import tensorflow as tf\n')]
|
'''
Created on Feb. 25, 2020
@author: cefect
helper functions w/ Qgis api
'''
#==============================================================================
# imports------------
#==============================================================================
#python
import os, configparser, logging, inspect, copy, datetime, re
import pandas as pd
import numpy as np
#qgis
from qgis.core import *
from qgis.analysis import QgsNativeAlgorithms
from qgis.gui import QgisInterface
from PyQt5.QtCore import QVariant, QMetaType
from PyQt5.QtWidgets import QProgressBar
"""throws depceciationWarning"""
import processing
#==============================================================================
# customs
#==============================================================================
mod_logger = logging.getLogger('Q') #get the root logger
from hlpr.exceptions import QError as Error
import hlpr.basic as basic
from hlpr.basic import get_valid_filename
#==============================================================================
# globals
#==============================================================================
fieldn_max_d = {'SpatiaLite':50, 'ESRI Shapefile':10, 'Memory storage':50, 'GPKG':50}
npc_pytype_d = {'?':bool,
'b':int,
'd':float,
'e':float,
'f':float,
'q':int,
'h':int,
'l':int,
'i':int,
'g':float,
'U':str,
'B':int,
'L':int,
'Q':int,
'H':int,
'I':int,
'O':str, #this is the catchall 'object'
}
type_qvar_py_d = {10:str, 2:int, 135:float, 6:float, 4:int, 1:bool, 16:datetime.datetime, 12:str} #QVariant.types to pythonic types
#parameters for lots of statistic algos
stat_pars_d = {'First': 0, 'Last': 1, 'Count': 2, 'Sum': 3, 'Mean': 4, 'Median': 5,
'St dev (pop)': 6, 'Minimum': 7, 'Maximum': 8, 'Range': 9, 'Minority': 10,
'Majority': 11, 'Variety': 12, 'Q1': 13, 'Q3': 14, 'IQR': 15}
#==============================================================================
# classes -------------
#==============================================================================
class Qcoms(basic.ComWrkr): #baseclass for working w/ pyqgis outside the native console
driverName = 'SpatiaLite' #default data creation driver type
out_dName = driverName #default output driver/file type
q_hndls = ['crs', 'crsid', 'algo_init', 'qap', 'vlay_drivers']
algo_init = False #flag indicating whether the algos have been initialized
qap = None
mstore = None
def __init__(self,
feedback=None,
#init controls
init_q_d = {}, #container of initilzied objects
crsid = 'EPSG:4326', #default crsID if no init_q_d is passed
**kwargs
):
""""
#=======================================================================
# plugin use
#=======================================================================
QprojPlugs don't execute super cascade
#=======================================================================
# Qgis inheritance
#=======================================================================
for single standalone runs
all the handles will be generated and Qgis instanced
for console runs
handles should be passed to avoid re-instancing Qgis
for session standalone runs
handles passed
for swapping crs
run set_crs() on the session prior to spawning the child
"""
#=======================================================================
# defaults
#=======================================================================
if feedback is None:
"""by default, building our own feedbacker
passed to ComWrkr.setup_feedback()
"""
feedback = MyFeedBackQ()
#=======================================================================
# cascade
#=======================================================================
super().__init__(
feedback = feedback,
**kwargs) #initilzie teh baseclass
log = self.logger
#=======================================================================
# attachments
#=======================================================================
self.fieldn_max_d=fieldn_max_d
self.crsid=crsid
#=======================================================================
# Qgis setup COMMON
#=======================================================================
"""both Plugin and StandAlone runs should call these"""
self.qproj = QgsProject.instance()
"""
each worker will have their own store
used to wipe any intermediate layers
"""
self.mstore = QgsMapLayerStore() #build a new map store
#do your own init (standalone r uns)
if len(init_q_d) == 0:
self._init_standalone()
else:
#check everything is there
miss_l = set(self.q_hndls).difference(init_q_d.keys())
assert len(miss_l)==0, 'init_q_d missing handles: %s'%miss_l
#set the handles
for k,v in init_q_d.items():
setattr(self, k, v)
self._upd_qd()
self.proj_checks()
#=======================================================================
# attach inputs
#=======================================================================
self.logger.debug('Qcoms.__init__ finished w/ out_dir: \n %s'%self.out_dir)
return
#==========================================================================
# standalone methods-----------
#==========================================================================
def _init_standalone(self, #setup for qgis runs
crsid = None,
):
"""
WARNING! do not call twice (phantom crash)
"""
log = self.logger.getChild('_init_standalone')
if crsid is None: crsid = self.crsid
#=======================================================================
# #crs
#=======================================================================
crs = QgsCoordinateReferenceSystem(crsid)
assert isinstance(crs, QgsCoordinateReferenceSystem), 'bad crs type'
assert crs.isValid()
self.crs = crs
self.qproj.setCrs(crs)
log.info('crs set to \'%s\''%self.crs.authid())
#=======================================================================
# setup qgis
#=======================================================================
self.qap = self.init_qgis()
self.algo_init = self.init_algos()
self.set_vdrivers()
#=======================================================================
# wrap
#=======================================================================
self._upd_qd()
log.debug('Qproj._init_standalone finished')
return
def _upd_qd(self): #set a fresh parameter set
self.init_q_d = {k:getattr(self, k) for k in self.q_hndls}
def init_qgis(self, #instantiate qgis
gui = False):
"""
WARNING: need to hold this app somewhere. call in the module you're working in (scripts)
"""
log = self.logger.getChild('init_qgis')
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
log.info(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate')
def init_algos(self): #initiilize processing and add providers
"""
crashing without raising an Exception
"""
log = self.logger.getChild('init_algos')
if not isinstance(self.qap, QgsApplication):
raise Error('qgis has not been properly initlized yet')
from processing.core.Processing import Processing
Processing.initialize() #crashing without raising an Exception
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
assert not self.feedback is None, 'instance needs a feedback method for algos to work'
log.info('processing initilzied w/ feedback: \'%s\''%(type(self.feedback).__name__))
return True
def set_vdrivers(self):
log = self.logger.getChild('set_vdrivers')
#build vector drivers list by extension
"""couldnt find a good built-in to link extensions with drivers"""
vlay_drivers = {'SpatiaLite':'sqlite', 'OGR':'shp'}
#vlay_drivers = {'sqlite':'SpatiaLite', 'shp':'OGR','csv':'delimitedtext'}
for ext in QgsVectorFileWriter.supportedFormatExtensions():
dname = QgsVectorFileWriter.driverForExtension(ext)
if not dname in vlay_drivers.keys():
vlay_drivers[dname] = ext
#add in missing/duplicated
for vdriver in QgsVectorFileWriter.ogrDriverList():
if not vdriver.driverName in vlay_drivers.keys():
vlay_drivers[vdriver.driverName] ='?'
self.vlay_drivers = vlay_drivers
log.debug('built driver:extensions dict: \n %s'%vlay_drivers)
return
def set_crs(self, #load, build, and set the project crs
crsid = None, #integer
crs = None, #QgsCoordinateReferenceSystem
logger=None,
):
#=======================================================================
# setup and defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('set_crs')
if crsid is None:
crsid = self.crsid
#=======================================================================
# if not isinstance(crsid, int):
# raise IOError('expected integer for crs')
#=======================================================================
#=======================================================================
# build it
#=======================================================================
if crs is None:
crs = QgsCoordinateReferenceSystem(crsid)
assert isinstance(crs, QgsCoordinateReferenceSystem)
self.crs=crs #overwrite
if not self.crs.isValid():
raise IOError('CRS built from %i is invalid'%self.crs.authid())
#=======================================================================
# attach to project
#=======================================================================
self.qproj.setCrs(self.crs)
self.crsid = self.crs.authid()
if not self.qproj.crs().description() == self.crs.description():
raise Error('qproj crs does not match sessions')
log.info('crs set to EPSG: %s, \'%s\''%(self.crs.authid(), self.crs.description()))
self._upd_qd()
self.proj_checks(logger=log)
return self.crs
def proj_checks(self,
logger=None):
#log = self.logger.getChild('proj_checks')
if not self.driverName in self.vlay_drivers:
raise Error('unrecognized driver name')
if not self.out_dName in self.vlay_drivers:
raise Error('unrecognized driver name')
assert self.algo_init
assert not self.feedback is None
assert not self.progressBar is None
#=======================================================================
# crs checks
#=======================================================================
assert isinstance(self.crs, QgsCoordinateReferenceSystem)
assert self.crs.isValid()
assert self.crs.authid()==self.qproj.crs().authid(), 'crs mismatch'
assert self.crs.authid() == self.crsid, 'crs mismatch'
assert not self.crs.authid()=='', 'got empty CRS!'
#=======================================================================
# handle checks
#=======================================================================
assert isinstance(self.init_q_d, dict)
miss_l = set(self.q_hndls).difference(self.init_q_d.keys())
assert len(miss_l)==0, 'init_q_d missing handles: %s'%miss_l
for k,v in self.init_q_d.items():
assert getattr(self, k) == v, k
#log.info('project passed all checks')
return
def print_qt_version(self):
import inspect
from PyQt5 import Qt
vers = ['%s = %s' % (k,v) for k,v in vars(Qt).items() if k.lower().find('version') >= 0 and not inspect.isbuiltin(v)]
print('\n'.join(sorted(vers)))
#===========================================================================
# LOAD/WRITE LAYERS-----------
#===========================================================================
def load_vlay(self,
fp,
logger=None,
providerLib='ogr',
aoi_vlay = None,
allow_none=True, #control check in saveselectedfeastures
addSpatialIndex=True,
):
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
if logger is None: logger = self.logger
log = logger.getChild('load_vlay')
basefn = os.path.splitext(os.path.split(fp)[1])[0]
log.debug('loading from %s'%fp)
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
#=======================================================================
# # checks
#=======================================================================
if not isinstance(vlay_raw, QgsVectorLayer):
raise IOError
#check if this is valid
if not vlay_raw.isValid():
raise Error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
#check if it has geometry
if vlay_raw.wkbType() == 100:
raise Error('loaded vlay has NoGeometry')
assert isinstance(self.mstore, QgsMapLayerStore)
"""only add intermediate layers to store
self.mstore.addMapLayer(vlay_raw)"""
if not vlay_raw.crs()==self.qproj.crs():
log.warning('crs mismatch: \n %s\n %s'%(
vlay_raw.crs(), self.qproj.crs()))
#=======================================================================
# aoi slice
#=======================================================================
if isinstance(aoi_vlay, QgsVectorLayer):
log.info('slicing by aoi %s'%aoi_vlay.name())
vlay = self.selectbylocation(vlay_raw, aoi_vlay, allow_none=allow_none,
logger=log, result_type='layer')
#check for no selection
if vlay is None:
return None
vlay.setName(vlay_raw.name()) #reset the name
#clear original from memory
self.mstore.addMapLayer(vlay_raw)
self.mstore.removeMapLayers([vlay_raw])
else:
vlay = vlay_raw
#=======================================================================
# clean------
#=======================================================================
#spatial index
if addSpatialIndex and (not vlay_raw.hasSpatialIndex()==QgsFeatureSource.SpatialIndexPresent):
self.createspatialindex(vlay_raw, logger=log)
#=======================================================================
# wrap
#=======================================================================
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay
def load_rlay(self, fp,
aoi_vlay = None,
logger=None):
if logger is None: logger = self.logger
log = logger.getChild('load_rlay')
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
assert QgsRasterLayer.isValidRasterFileName(fp), \
'requested file is not a valid raster file type: %s'%fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
#Import a Raster Layer
log.debug('QgsRasterLayer(%s, %s)'%(fp, basefn))
rlayer = QgsRasterLayer(fp, basefn)
"""
hanging for some reason...
QgsRasterLayer(C:\LS\03_TOOLS\CanFlood\_git\tutorials\1\haz_rast\haz_1000.tif, haz_1000)
"""
#=======================================================================
# rlayer = QgsRasterLayer(r'C:\LS\03_TOOLS\CanFlood\_git\tutorials\1\haz_rast\haz_1000.tif',
# 'haz_1000')
#=======================================================================
#===========================================================================
# check
#===========================================================================
assert isinstance(rlayer, QgsRasterLayer), 'failed to get a QgsRasterLayer'
assert rlayer.isValid(), "Layer failed to load!"
if not rlayer.crs() == self.qproj.crs():
log.warning('loaded layer \'%s\' crs mismatch!'%rlayer.name())
log.debug('loaded \'%s\' from \n %s'%(rlayer.name(), fp))
#=======================================================================
# aoi
#=======================================================================
if not aoi_vlay is None:
log.debug('clipping w/ %s'%aoi_vlay.name())
assert isinstance(aoi_vlay, QgsVectorLayer)
rlay2 = self.cliprasterwithpolygon(rlayer,aoi_vlay, logger=log, layname=rlayer.name())
#clean up
mstore = QgsMapLayerStore() #build a new store
mstore.addMapLayers([rlayer]) #add the layers to the store
mstore.removeAllMapLayers() #remove all the layers
else:
rlay2 = rlayer
return rlay2
def write_rlay(self, #make a local copy of the passed raster layer
rlayer, #raster layer to make a local copy of
extent = 'layer', #write extent control
#'layer': use the current extent (default)
#'mapCanvas': use the current map Canvas
#QgsRectangle: use passed extents
resolution = 'raw', #resolution for output
opts = ["COMPRESS=LZW"], #QgsRasterFileWriter.setCreateOptions
out_dir = None, #directory for puts
newLayerName = None,
logger=None,
):
"""
because processing tools only work on local copies
#=======================================================================
# coordinate transformation
#=======================================================================
NO CONVERSION HERE!
can't get native API to work. use gdal_warp instead
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
if out_dir is None: out_dir = self.out_dir
if newLayerName is None: newLayerName = rlayer.name()
newFn = get_valid_filename('%s.tif'%newLayerName) #clean it
out_fp = os.path.join(out_dir, newFn)
log = logger.getChild('write_rlay')
log.debug('on \'%s\' w/ \n crs:%s \n extents:%s\n xUnits:%.4f'%(
rlayer.name(), rlayer.crs(), rlayer.extent(), rlayer.rasterUnitsPerPixelX()))
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlayer, QgsRasterLayer)
assert os.path.exists(out_dir)
if os.path.exists(out_fp):
msg = 'requested file already exists! and overwrite=%s \n %s'%(
self.overwrite, out_fp)
if self.overwrite:
log.warning(msg)
else:
raise Error(msg)
#=======================================================================
# extract info from layer
#=======================================================================
"""consider loading the layer and duplicating the renderer?
renderer = rlayer.renderer()"""
provider = rlayer.dataProvider()
#build projector
projector = QgsRasterProjector()
#projector.setCrs(provider.crs(), provider.crs())
#build and configure pipe
pipe = QgsRasterPipe()
if not pipe.set(provider.clone()): #Insert a new known interface in default place
raise Error("Cannot set pipe provider")
if not pipe.insert(2, projector): #insert interface at specified index and connect
raise Error("Cannot set pipe projector")
#pipe = rlayer.pipe()
#coordinate transformation
"""see note"""
transformContext = self.qproj.transformContext()
#=======================================================================
# extents
#=======================================================================
if extent == 'layer':
extent = rlayer.extent()
elif extent=='mapCanvas':
assert isinstance(self.iface, QgisInterface), 'bad key for StandAlone?'
#get the extent, transformed to the current CRS
extent = QgsCoordinateTransform(
self.qproj.crs(),
rlayer.crs(),
transformContext
).transformBoundingBox(self.iface.mapCanvas().extent())
assert isinstance(extent, QgsRectangle), 'expected extent=QgsRectangle. got \"%s\''%extent
#expect the requested extent to be LESS THAN what we have in the raw raster
assert rlayer.extent().width()>=extent.width(), 'passed extents too wide'
assert rlayer.extent().height()>=extent.height(), 'passed extents too tall'
#=======================================================================
# resolution
#=======================================================================
#use the resolution of the raw file
if resolution == 'raw':
"""this respects the calculated extents"""
nRows = int(extent.height()/rlayer.rasterUnitsPerPixelY())
nCols = int(extent.width()/rlayer.rasterUnitsPerPixelX())
else:
"""dont think theres any decent API support for the GUI behavior"""
raise Error('not implemented')
#=======================================================================
# #build file writer
#=======================================================================
file_writer = QgsRasterFileWriter(out_fp)
#file_writer.Mode(1) #???
if not opts is None:
file_writer.setCreateOptions(opts)
log.debug('writing to file w/ \n %s'%(
{'nCols':nCols, 'nRows':nRows, 'extent':extent, 'crs':rlayer.crs()}))
#execute write
error = file_writer.writeRaster( pipe, nCols, nRows, extent, rlayer.crs(), transformContext)
log.info('wrote to file \n %s'%out_fp)
#=======================================================================
# wrap
#=======================================================================
if not error == QgsRasterFileWriter.NoError:
raise Error(error)
assert os.path.exists(out_fp)
assert QgsRasterLayer.isValidRasterFileName(out_fp), \
'requested file is not a valid raster file type: %s'%out_fp
return out_fp
def vlay_write(self, #write a VectorLayer
vlay,
out_fp=None,
driverName='GPKG',
fileEncoding = "CP1250",
opts = QgsVectorFileWriter.SaveVectorOptions(), #empty options object
overwrite=None,
logger=None):
"""
help(QgsVectorFileWriter.SaveVectorOptions)
QgsVectorFileWriter.SaveVectorOptions.driverName='GPKG'
opt2 = QgsVectorFileWriter.BoolOption(QgsVectorFileWriter.CreateOrOverwriteFile)
help(QgsVectorFileWriter)
"""
#==========================================================================
# defaults
#==========================================================================
if logger is None: logger=self.logger
log = logger.getChild('vlay_write')
if overwrite is None: overwrite=self.overwrite
if out_fp is None: out_fp = os.path.join(self.out_dir, '%s.gpkg'%vlay.name())
#===========================================================================
# assemble options
#===========================================================================
opts.driverName = driverName
opts.fileEncoding = fileEncoding
#===========================================================================
# checks
#===========================================================================
#file extension
fhead, ext = os.path.splitext(out_fp)
if not 'gpkg' in ext:
raise Error('unexpected extension: %s'%ext)
if os.path.exists(out_fp):
msg = 'requested file path already exists!. overwrite=%s \n %s'%(
overwrite, out_fp)
if overwrite:
log.warning(msg)
os.remove(out_fp) #workaround... should be away to overwrite with the QgsVectorFileWriter
else:
raise Error(msg)
if vlay.dataProvider().featureCount() == 0:
raise Error('\'%s\' has no features!'%(
vlay.name()))
if not vlay.isValid():
Error('passed invalid layer')
#=======================================================================
# write
#=======================================================================
error = QgsVectorFileWriter.writeAsVectorFormatV2(
vlay, out_fp,
QgsCoordinateTransformContext(),
opts,
)
#=======================================================================
# wrap and check
#=======================================================================
if error[0] == QgsVectorFileWriter.NoError:
log.info('layer \' %s \' written to: \n %s'%(vlay.name(),out_fp))
return out_fp
raise Error('FAILURE on writing layer \' %s \' with code:\n %s \n %s'%(vlay.name(),error, out_fp))
def load_dtm(self, #convienece loader for assining the correct attribute
fp,
logger=None,
**kwargs):
if logger is None: logger=self.logger
log=logger.getChild('load_dtm')
self.dtm_rlay = self.load_rlay(fp, logger=log, **kwargs)
return self.dtm_rlay
#==========================================================================
# GENERIC METHODS-----------------
#==========================================================================
def vlay_new_df2(self, #build a vlay from a df
df_raw,
geo_d = None, #container of geometry objects {fid: QgsGeometry}
crs=None,
gkey = None, #data field linking with geo_d (if None.. uses df index)
layname='df',
index = False, #whether to include the index as a field
logger=None,
):
"""
performance enhancement over vlay_new_df
simpler, clearer
although less versatile
"""
#=======================================================================
# setup
#=======================================================================
if crs is None: crs = self.qproj.crs()
if logger is None: logger = self.logger
log = logger.getChild('vlay_new_df')
#=======================================================================
# index fix
#=======================================================================
df = df_raw.copy()
if index:
if not df.index.name is None:
coln = df.index.name
df.index.name = None
else:
coln = 'index'
df[coln] = df.index
#=======================================================================
# precheck
#=======================================================================
#make sure none of hte field names execeed the driver limitations
max_len = self.fieldn_max_d[self.driverName]
#check lengths
boolcol = df_raw.columns.str.len() >= max_len
if np.any(boolcol):
log.warning('passed %i columns which exeed the max length=%i for driver \'%s\'.. truncating: \n %s'%(
boolcol.sum(), max_len, self.driverName, df_raw.columns[boolcol].tolist()))
df.columns = df.columns.str.slice(start=0, stop=max_len-1)
#make sure the columns are unique
assert df.columns.is_unique, 'got duplicated column names: \n %s'%(df.columns.tolist())
#check datatypes
assert np.array_equal(df.columns, df.columns.astype(str)), 'got non-string column names'
#check the geometry
if not geo_d is None:
assert isinstance(geo_d, dict)
if not gkey is None:
assert gkey in df_raw.columns
#assert 'int' in df_raw[gkey].dtype.name
#check gkey match
l = set(df_raw[gkey].drop_duplicates()).difference(geo_d.keys())
assert len(l)==0, 'missing %i \'%s\' keys in geo_d: %s'%(len(l), gkey, l)
#against index
else:
#check gkey match
l = set(df_raw.index).difference(geo_d.keys())
assert len(l)==0, 'missing %i (of %i) fid keys in geo_d: %s'%(len(l), len(df_raw), l)
#===========================================================================
# assemble the fields
#===========================================================================
#column name and python type
fields_d = {coln:np_to_pytype(col.dtype) for coln, col in df.items()}
#fields container
qfields = fields_build_new(fields_d = fields_d, logger=log)
#=======================================================================
# assemble the features
#=======================================================================
#convert form of data
feats_d = dict()
for fid, row in df.iterrows():
feat = QgsFeature(qfields, fid)
#loop and add data
for fieldn, value in row.items():
#skip null values
if pd.isnull(value): continue
#get the index for this field
findx = feat.fieldNameIndex(fieldn)
#get the qfield
qfield = feat.fields().at(findx)
#make the type match
ndata = qtype_to_pytype(value, qfield.type(), logger=log)
#set the attribute
if not feat.setAttribute(findx, ndata):
raise Error('failed to setAttribute')
#setgeometry
if not geo_d is None:
if gkey is None:
gobj = geo_d[fid]
else:
gobj = geo_d[row[gkey]]
feat.setGeometry(gobj)
#stor eit
feats_d[fid]=feat
log.debug('built %i \'%s\' features'%(
len(feats_d),
QgsWkbTypes.geometryDisplayString(feat.geometry().type()),
))
#=======================================================================
# get the geo type
#=======================================================================\
if not geo_d is None:
gtype = QgsWkbTypes().displayString(next(iter(geo_d.values())).wkbType())
else:
gtype='None'
#===========================================================================
# buidl the new layer
#===========================================================================
vlay = vlay_new_mlay(gtype,
crs,
layname,
qfields,
list(feats_d.values()),
logger=log,
)
self.createspatialindex(vlay, logger=log)
#=======================================================================
# post check
#=======================================================================
if not geo_d is None:
if vlay.wkbType() == 100:
raise Error('constructed layer has NoGeometry')
return vlay
def check_aoi(self, #special c hecks for AOI layers
vlay):
assert isinstance(vlay, QgsVectorLayer)
assert 'Polygon' in QgsWkbTypes().displayString(vlay.wkbType())
assert vlay.dataProvider().featureCount()==1
assert vlay.crs() == self.qproj.crs(), 'aoi CRS (%s) does not match project (%s)'%(vlay.crs(), self.qproj.crs())
return
#==========================================================================
# ALGOS--------------
#==========================================================================
def deletecolumn(self,
in_vlay,
fieldn_l, #list of field names
invert=False, #whether to invert selected field names
layname = None,
logger=None,
):
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:deletecolumn'
if logger is None: logger=self.logger
log = logger.getChild('deletecolumn')
self.vlay = in_vlay
#=======================================================================
# field manipulations
#=======================================================================
fieldn_l = self._field_handlr(in_vlay, fieldn_l, invert=invert, logger=log)
if len(fieldn_l) == 0:
log.debug('no fields requsted to drop... skipping')
return self.vlay
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'COLUMN' : fieldn_l,
'INPUT' : in_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
if layname is None:
layname = '%s_delf'%self.vlay.name()
res_vlay.setName(layname) #reset the name
return res_vlay
def joinattributesbylocation(self,
#data definitions
vlay,
join_vlay, #layer from which to extract attribue values onto th ebottom vlay
jlay_fieldn_l, #list of field names to extract from the join_vlay
selected_only = False,
jvlay_selected_only = False, #only consider selected features on the join layer
#algo controls
prefix = '',
method=0, #one-to-many
predicate_l = ['intersects'],#list of geometric serach predicates
discard_nomatch = False, #Discard records which could not be joined
#data expectations
join_nullvs = True, #allow null values on jlay_fieldn_l on join_vlay
join_df = None, #if join_nullvs=FALSE, data to check for nulls (skips making a vlay_get_fdf)
allow_field_rename = False, #allow joiner fields to be renamed when mapped onto the main
allow_none = False,
#geometry expectations
expect_all_hits = False, #wheter every main feature intersects a join feature
expect_j_overlap = False, #wheter to expect the join_vlay to beoverlapping
expect_m_overlap = False, #wheter to expect the mainvlay to have overlaps
logger=None,
):
"""
TODO: really need to clean this up...
discard_nomatch:
TRUE: two resulting layers have no features in common
FALSE: in layer retains all non matchers, out layer only has the non-matchers?
METHOD: Join type
- 0: Create separate feature for each located feature (one-to-many)
- 1: Take attributes of the first located feature only (one-to-one)
"""
#=======================================================================
# presets
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('joinattributesbylocation')
self.vlay = vlay
algo_nm = 'qgis:joinattributesbylocation'
predicate_d = {'intersects':0,'contains':1,'equals':2,'touches':3,'overlaps':4,'within':5, 'crosses':6}
jlay_fieldn_l = self._field_handlr(join_vlay,
jlay_fieldn_l,
invert=False)
#=======================================================================
# jgeot = vlay_get_bgeo_type(join_vlay)
# mgeot = vlay_get_bgeo_type(self.vlay)
#=======================================================================
mfcnt = self.vlay.dataProvider().featureCount()
#jfcnt = join_vlay.dataProvider().featureCount()
mfnl = vlay_fieldnl(self.vlay)
expect_overlaps = expect_j_overlap or expect_m_overlap
#=======================================================================
# geometry expectation prechecks
#=======================================================================
"""should take any geo
if not (jgeot == 'polygon' or mgeot == 'polygon'):
raise Error('one of the layres has to be a polygon')
if not jgeot=='polygon':
if expect_j_overlap:
raise Error('join vlay is not a polygon, expect_j_overlap should =False')
if not mgeot=='polygon':
if expect_m_overlap:
raise Error('main vlay is not a polygon, expect_m_overlap should =False')
if expect_all_hits:
if discard_nomatch:
raise Error('discard_nomatch should =FALSE if you expect all hits')
if allow_none:
raise Error('expect_all_hits=TRUE and allow_none=TRUE')
#method checks
if method==0:
if not jgeot == 'polygon':
raise Error('passed method 1:m but jgeot != polygon')
if not expect_j_overlap:
if not method==0:
raise Error('for expect_j_overlap=False, method must = 0 (1:m) for validation')
"""
#=======================================================================
# data expectation checks
#=======================================================================
#make sure none of the joiner fields are already on the layer
if len(mfnl)>0: #see if there are any fields on the main
l = basic.linr(jlay_fieldn_l, mfnl, result_type='matching')
if len(l) > 0:
#w/a prefix
if not prefix=='':
log.debug('%i fields on the joiner \'%s\' are already on \'%s\'... prefixing w/ \'%s\': \n %s'%(
len(l), join_vlay.name(), self.vlay.name(), prefix, l))
else:
log.debug('%i fields on the joiner \'%s\' are already on \'%s\'...renameing w/ auto-sufix: \n %s'%(
len(l), join_vlay.name(), self.vlay.name(), l))
if not allow_field_rename:
raise Error('%i field names overlap: %s'%(len(l), l))
#make sure that the joiner attributes are not null
if not join_nullvs:
if jvlay_selected_only:
raise Error('not implmeneted')
#pull thedata
if join_df is None:
join_df = vlay_get_fdf(join_vlay, fieldn_l=jlay_fieldn_l, db_f=self.db_f, logger=log)
#slice to the columns of interest
join_df = join_df.loc[:, jlay_fieldn_l]
#check for nulls
booldf = join_df.isna()
if np.any(booldf):
raise Error('got %i nulls on \'%s\' field %s data'%(
booldf.sum().sum(), join_vlay.name(), jlay_fieldn_l))
#=======================================================================
# assemble pars
#=======================================================================
#convert predicate to code
pred_code_l = [predicate_d[name] for name in predicate_l]
#selection flags
if selected_only:
"""WARNING! This will limit the output to only these features
(despite the DISCARD_NONMATCHING flag)"""
main_input = self._get_sel_obj(self.vlay)
else:
main_input = self.vlay
if jvlay_selected_only:
join_input = self._get_sel_obj(join_vlay)
else:
join_input = join_vlay
#assemble pars
ins_d = { 'DISCARD_NONMATCHING' : discard_nomatch,
'INPUT' : main_input,
'JOIN' : join_input,
'JOIN_FIELDS' : jlay_fieldn_l,
'METHOD' : method,
'OUTPUT' : 'TEMPORARY_OUTPUT',
#'NON_MATCHING' : 'TEMPORARY_OUTPUT', #not working as expected. see get_misses
'PREDICATE' : pred_code_l,
'PREFIX' : prefix}
log.info('extracting %i fields from %i feats from \'%s\' to \'%s\' join fields: %s'%
(len(jlay_fieldn_l), join_vlay.dataProvider().featureCount(),
join_vlay.name(), self.vlay.name(), jlay_fieldn_l))
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay, join_cnt = res_d['OUTPUT'], res_d['JOINED_COUNT']
log.debug('got results: \n %s'%res_d)
#===========================================================================
# post checks
#===========================================================================
hit_fcnt = res_vlay.dataProvider().featureCount()
if not expect_overlaps:
if not discard_nomatch:
if not hit_fcnt == mfcnt:
raise Error('in and out fcnts dont match')
else:
pass
#log.debug('expect_overlaps=False, unable to check fcnts')
#all misses
if join_cnt == 0:
log.warning('got no joins from \'%s\' to \'%s\''%(
self.vlay.name(), join_vlay.name()))
if not allow_none:
raise Error('got no joins!')
if discard_nomatch:
if not hit_fcnt == 0:
raise Error('no joins but got some hits')
#some hits
else:
#check there are no nulls
if discard_nomatch and not join_nullvs:
#get data on first joiner
fid_val_ser = vlay_get_fdata(res_vlay, jlay_fieldn_l[0], logger=log, fmt='ser')
if np.any(fid_val_ser.isna()):
raise Error('discard=True and join null=FALSe but got %i (of %i) null \'%s\' values in the reuslt'%(
fid_val_ser.isna().sum(), len(fid_val_ser), fid_val_ser.name
))
#=======================================================================
# get the new field names
#=======================================================================
new_fn_l = set(vlay_fieldnl(res_vlay)).difference(vlay_fieldnl(self.vlay))
#=======================================================================
# wrap
#=======================================================================
log.debug('finished joining %i fields from %i (of %i) feats from \'%s\' to \'%s\' join fields: %s'%
(len(new_fn_l), join_cnt, self.vlay.dataProvider().featureCount(),
join_vlay.name(), self.vlay.name(), new_fn_l))
return res_vlay, new_fn_l, join_cnt
def joinbylocationsummary(self,
vlay, #polygon layer to sample from
join_vlay, #layer from which to extract attribue values onto th ebottom vlay
jlay_fieldn_l, #list of field names to extract from the join_vlay
jvlay_selected_only = False, #only consider selected features on the join layer
predicate_l = ['intersects'],#list of geometric serach predicates
smry_l = ['sum'], #data summaries to apply
discard_nomatch = False, #Discard records which could not be joined
use_raw_fn=False, #whether to convert names back to the originals
layname=None,
):
"""
WARNING: This ressets the fids
discard_nomatch:
TRUE: two resulting layers have no features in common
FALSE: in layer retains all non matchers, out layer only has the non-matchers?
"""
"""
view(join_vlay)
"""
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:joinbylocationsummary'
predicate_d = {'intersects':0,'contains':1,'equals':2,'touches':3,'overlaps':4,'within':5, 'crosses':6}
summaries_d = {'count':0, 'unique':1, 'min':2, 'max':3, 'range':4, 'sum':5, 'mean':6}
log = self.logger.getChild('joinbylocationsummary')
#=======================================================================
# defaults
#=======================================================================
if isinstance(jlay_fieldn_l, set):
jlay_fieldn_l = list(jlay_fieldn_l)
#convert predicate to code
pred_code_l = [predicate_d[pred_name] for pred_name in predicate_l]
#convert summaries to code
sum_code_l = [summaries_d[smry_str] for smry_str in smry_l]
if layname is None: layname = '%s_jsmry'%vlay.name()
#=======================================================================
# prechecks
#=======================================================================
if not isinstance(jlay_fieldn_l, list):
raise Error('expected a list')
#check requested join fields
fn_l = [f.name() for f in join_vlay.fields()]
s = set(jlay_fieldn_l).difference(fn_l)
assert len(s)==0, 'requested join fields not on layer: %s'%s
#check crs
assert join_vlay.crs().authid() == vlay.crs().authid()
#=======================================================================
# assemble pars
#=======================================================================
main_input=vlay
if jvlay_selected_only:
join_input = self._get_sel_obj(join_vlay)
else:
join_input = join_vlay
#assemble pars
ins_d = { 'DISCARD_NONMATCHING' : discard_nomatch,
'INPUT' : main_input,
'JOIN' : join_input,
'JOIN_FIELDS' : jlay_fieldn_l,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREDICATE' : pred_code_l,
'SUMMARIES' : sum_code_l,
}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
res_vlay.setName(layname) #reset the name
#get new field names
nfn_l = set([f.name() for f in res_vlay.fields()]).difference([f.name() for f in vlay.fields()])
"""
view(res_vlay)
"""
#=======================================================================
# post check
#=======================================================================
for fn in nfn_l:
rser = vlay_get_fdata(res_vlay, fieldn=fn, logger=log, fmt='ser')
if rser.isna().all().all():
log.warning('%s \'%s\' got all nulls'%(vlay.name(), fn))
#=======================================================================
# rename fields
#=======================================================================
if use_raw_fn:
assert len(smry_l)==1, 'rename only allowed for single sample stat'
rnm_d = {s:s.replace('_%s'%smry_l[0],'') for s in nfn_l}
s = set(rnm_d.values()).symmetric_difference(jlay_fieldn_l)
assert len(s)==0, 'failed to convert field names'
res_vlay = vlay_rename_fields(res_vlay, rnm_d, logger=log)
nfn_l = jlay_fieldn_l
log.info('sampled \'%s\' w/ \'%s\' (%i hits) and \'%s\'to get %i new fields \n %s'%(
join_vlay.name(), vlay.name(), res_vlay.dataProvider().featureCount(),
smry_l, len(nfn_l), nfn_l))
return res_vlay, nfn_l
def joinattributestable(self, #join csv edata to a vector layer
vlay, table_fp, fieldNm,
method = 1, #join type
#- 0: Create separate feature for each matching feature (one-to-many)
#- 1: Take attributes of the first matching feature only (one-to-one)
csv_params = {'encoding':'System',
'type':'csv',
'maxFields':'10000',
'detectTypes':'yes',
'geomType':'none',
'subsetIndex':'no',
'watchFile':'no'},
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_j'%vlay.name()
algo_nm = 'native:joinattributestable'
log = self.logger.getChild('joinattributestable')
#=======================================================================
# prechecks
#=======================================================================
assert isinstance(vlay, QgsVectorLayer)
assert os.path.exists(table_fp)
assert fieldNm in [f.name() for f in vlay.fields()], 'vlay missing link field %s'%fieldNm
#=======================================================================
# setup table layer
#=======================================================================
uriW = QgsDataSourceUri()
for pName, pValue in csv_params.items():
uriW.setParam(pName, pValue)
table_uri = r'file:///' + table_fp.replace('\\','/') +'?'+ str(uriW.encodedUri(), 'utf-8')
table_vlay = QgsVectorLayer(table_uri,'table',"delimitedtext")
assert fieldNm in [f.name() for f in table_vlay.fields()], 'table missing link field %s'%fieldNm
#=======================================================================
# assemble p ars
#=======================================================================
ins_d = { 'DISCARD_NONMATCHING' : True,
'FIELD' : 'xid', 'FIELDS_TO_COPY' : [],
'FIELD_2' : 'xid',
'INPUT' : vlay,
'INPUT_2' : table_vlay,
'METHOD' : method,
'OUTPUT' : 'TEMPORARY_OUTPUT', 'PREFIX' : '' }
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'native:buffer\' with ins_d: \n %s'%ins_d)
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished w/ %i feats'%res_vlay.dataProvider().featureCount())
return res_vlay
def cliprasterwithpolygon(self,
rlay_raw,
poly_vlay,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
"""
clipping a raster layer with a polygon mask using gdalwarp
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
layname = '%s_clipd'%rlay_raw.name()
algo_nm = 'gdal:cliprasterbymasklayer'
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlay_raw, QgsRasterLayer)
assert isinstance(poly_vlay, QgsVectorLayer)
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'ALPHA_BAND' : False,
'CROP_TO_CUTLINE' : True,
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'KEEP_RESOLUTION' : True,
'MASK' : poly_vlay,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SET_RESOLUTION' : False,
'SOURCE_CRS' : None,
'TARGET_CRS' : None,
'X_RESOLUTION' : None,
'Y_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def cliprasterwithpolygon2(self, #with saga
rlay_raw,
poly_vlay,
ofp = None,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
if not ofp is None:
layname = os.path.splitext(os.path.split(ofp)[1])[0]
else:
layname = '%s_clipd'%rlay_raw.name()
if ofp is None:
ofp = os.path.join(self.out_dir,layname+'.sdat')
if os.path.exists(ofp):
msg = 'requseted filepath exists: %s'%ofp
if self.overwrite:
log.warning('DELETING'+msg)
os.remove(ofp)
else:
raise Error(msg)
algo_nm = 'saga:cliprasterwithpolygon'
#=======================================================================
# precheck
#=======================================================================
if os.path.exists(ofp):
msg = 'requested filepath exists: %s'%ofp
if self.overwrite:
log.warning(msg)
else:
raise Error(msg)
if not os.path.exists(os.path.dirname(ofp)):
os.makedirs(os.path.dirname(ofp))
#assert QgsRasterLayer.isValidRasterFileName(ofp), 'invalid filename: %s'%ofp
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'INPUT' : rlay_raw,
'OUTPUT' : ofp,
'POLYGONS' : poly_vlay }
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def srastercalculator(self,
formula,
rlay_d, #container of raster layers to perform calculations on
logger=None,
layname=None,
ofp=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('srastercalculator')
assert 'a' in rlay_d
if layname is None:
if not ofp is None:
layname = os.path.splitext(os.path.split(ofp)[1])[0]
else:
layname = '%s_calc'%rlay_d['a'].name()
if ofp is None:
ofp = os.path.join(self.out_dir, layname+'.sdat')
if not os.path.exists(os.path.dirname(ofp)):
log.info('building basedir: %s'%os.path.dirname(ofp))
os.makedirs(os.path.dirname(ofp))
if os.path.exists(ofp):
msg = 'requseted filepath exists: %s'%ofp
if self.overwrite:
log.warning(msg)
os.remove(ofp)
else:
raise Error(msg)
#=======================================================================
# execute
#=======================================================================
algo_nm = 'saga:rastercalculator'
ins_d = { 'FORMULA' : formula,
'GRIDS' : rlay_d.pop('a'),
'RESAMPLING' : 3,
'RESULT' : ofp,
'TYPE' : 7,
'USE_NODATA' : False,
'XGRIDS' : list(rlay_d.values())}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def grastercalculator(self, #GDAL raster calculator
formula,
rlay_d, #container of raster layers to perform calculations on
nodata=0,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('grastercalculator')
algo_nm = 'gdal:rastercalculator'
if layname is None:
layname = '%s_calc'%rlay_d['a'].name()
#=======================================================================
# prechecks
#=======================================================================
assert 'A' in rlay_d
#=======================================================================
# populate
#=======================================================================
for rtag in ('A', 'B', 'C', 'D', 'E', 'F'):
#set dummy placeholders for missing rasters
if not rtag in rlay_d:
rlay_d[rtag] = None
#check what the usre pasased
else:
assert isinstance(rlay_d[rtag], QgsRasterLayer), 'passed bad %s'%rtag
assert rtag in formula, 'formula is missing a reference to \'%s\''%rtag
#=======================================================================
# execute
#=======================================================================
ins_d = { 'BAND_A' : 1, 'BAND_B' : -1, 'BAND_C' : -1, 'BAND_D' : -1, 'BAND_E' : -1, 'BAND_F' : -1,
'EXTRA' : '',
'FORMULA' : formula,
'INPUT_A' : rlay_d['A'], 'INPUT_B' : rlay_d['B'], 'INPUT_C' : rlay_d['C'],
'INPUT_D' : rlay_d['D'], 'INPUT_E' : rlay_d['E'], 'INPUT_F' : rlay_d['F'],
'NO_DATA' : nodata,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RTYPE' : 5 }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
assert os.path.exists(res_d['OUTPUT']), 'failed to get result'
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def qrastercalculator(self, #QGIS native raster calculator
formula,
ref_layer = None, #reference layer
logger=None,
layname=None,
):
"""executes the algorhithim... better to use the constructor directly
QgsRasterCalculator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('qrastercalculator')
algo_nm = 'qgis:rastercalculator'
if layname is None:
if ref_layer is None:
layname = 'qrastercalculator'
else:
layname = '%s_calc'%ref_layer.name()
#=======================================================================
# execute
#=======================================================================
"""
formula = '\'haz_100yr_cT2@1\'-\'dtm_cT1@1\''
"""
ins_d = { 'CELLSIZE' : 0,
'CRS' : None,
'EXPRESSION' : formula,
'EXTENT' : None,
'LAYERS' : [ref_layer], #referecnce layer
'OUTPUT' : 'TEMPORARY_OUTPUT' }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def addgeometrycolumns(self, #add geometry data as columns
vlay,
layname=None,
logger=None,
):
if logger is None: logger=self.logger
log = logger.getChild('addgeometrycolumns')
algo_nm = 'qgis:exportaddgeometrycolumns'
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'CALC_METHOD' : 0, #use layer's crs
'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
if layname is None:
layname = '%s_gcol'%self.vlay.name()
res_vlay.setName(layname) #reset the name
return res_vlay
def buffer(self, vlay,
distance, #buffer distance to apply
dissolve = False,
end_cap_style = 0,
join_style = 0,
miter_limit = 2,
segments = 5,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_buf'%vlay.name()
algo_nm = 'native:buffer'
log = self.logger.getChild('buffer')
distance = float(distance)
#=======================================================================
# prechecks
#=======================================================================
if distance==0 or np.isnan(distance):
raise Error('got no buffer!')
#=======================================================================
# build ins
#=======================================================================
"""
distance = 3.0
dcopoy = copy.copy(distance)
"""
ins_d = {
'INPUT': vlay,
'DISSOLVE' : dissolve,
'DISTANCE' : distance,
'END_CAP_STYLE' : end_cap_style,
'JOIN_STYLE' : join_style,
'MITER_LIMIT' : miter_limit,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SEGMENTS' : segments}
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'native:buffer\' with ins_d: \n %s'%ins_d)
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished')
return res_vlay
def selectbylocation(self, #select features (from main laye) by geoemtric relation with comp_vlay
vlay, #vlay to select features from
comp_vlay, #vlay to compare
result_type = 'select',
method= 'new', #Modify current selection by
pred_l = ['intersect'], #list of geometry predicate names
#expectations
allow_none = False,
logger = None,
):
#=======================================================================
# setups and defaults
#=======================================================================
if logger is None: logger=self.logger
algo_nm = 'native:selectbylocation'
log = logger.getChild('selectbylocation')
#===========================================================================
# #set parameter translation dictoinaries
#===========================================================================
meth_d = {'new':0}
pred_d = {
'are within':6,
'intersect':0,
'overlap':5,
}
#predicate (name to value)
pred_l = [pred_d[pred_nm] for pred_nm in pred_l]
#=======================================================================
# setup
#=======================================================================
ins_d = {
'INPUT' : vlay,
'INTERSECT' : comp_vlay,
'METHOD' : meth_d[method],
'PREDICATE' : pred_l }
log.debug('executing \'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#===========================================================================
# #execute
#===========================================================================
_ = processing.run(algo_nm, ins_d, feedback=self.feedback)
#=======================================================================
# check
#=======================================================================
fcnt = vlay.selectedFeatureCount()
if fcnt == 0:
msg = 'No features selected!'
if allow_none:
log.warning(msg)
else:
raise Error(msg)
#=======================================================================
# wrap
#=======================================================================
log.debug('selected %i (of %i) features from %s'
%(vlay.selectedFeatureCount(),vlay.dataProvider().featureCount(), vlay.name()))
return self._get_sel_res(vlay, result_type=result_type, logger=log, allow_none=allow_none)
def saveselectedfeatures(self,#generate a memory layer from the current selection
vlay,
logger=None,
allow_none = False,
layname=None):
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('saveselectedfeatures')
algo_nm = 'native:saveselectedfeatures'
if layname is None:
layname = '%s_sel'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
fcnt = vlay.selectedFeatureCount()
if fcnt == 0:
msg = 'No features selected!'
if allow_none:
log.warning(msg)
return None
else:
raise Error(msg)
log.debug('on \'%s\' with %i feats selected'%(
vlay.name(), vlay.selectedFeatureCount()))
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('\'native:saveselectedfeatures\' on \'%s\' with: \n %s'
%(vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay
def polygonfromlayerextent(self,
vlay,
round_to=0, #adds a buffer to the result?
logger=None,
layname=None):
"""
This algorithm takes a map layer and generates a new vector layer with the
minimum bounding box (rectangle polygon with N-S orientation) that covers the input layer.
Optionally, the extent can be enlarged to a rounded value.
"""
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('polygonfromlayerextent')
algo_nm = 'qgis:polygonfromlayerextent'
if layname is None:
layname = '%s_exts'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'ROUND_TO':round_to}
log.debug('\'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay
def fixgeometries(self, vlay,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_fix'%vlay.name()
algo_nm = 'native:fixgeometries'
log = self.logger.getChild('fixgeometries')
#=======================================================================
# build ins
#=======================================================================
"""
distance = 3.0
dcopoy = copy.copy(distance)
"""
ins_d = {
'INPUT': vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
}
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished')
return res_vlay
def createspatialindex(self,
in_vlay,
logger=None,
):
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:createspatialindex'
if logger is None: logger=self.logger
log = self.logger.getChild('createspatialindex')
in_vlay
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'INPUT' : in_vlay }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
#===========================================================================
# post formatting
#===========================================================================
#=======================================================================
# if layname is None:
# layname = '%s_si'%self.vlay.name()
#
# res_vlay.setName(layname) #reset the name
#=======================================================================
return
def warpreproject(self, #repojrect a raster
rlay_raw,
crsOut = None, #crs to re-project to
layname = None,
options = 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
output = 'TEMPORARY_OUTPUT',
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('warpreproject')
if layname is None:
layname = '%s_rproj'%rlay_raw.name()
algo_nm = 'gdal:warpreproject'
if crsOut is None: crsOut = self.crs #just take the project's
#=======================================================================
# precheck
#=======================================================================
"""the algo accepts 'None'... but not sure why we'd want to do this"""
assert isinstance(crsOut, QgsCoordinateReferenceSystem), 'bad crs type'
assert isinstance(rlay_raw, QgsRasterLayer)
assert rlay_raw.crs() != crsOut, 'layer already on this CRS!'
#=======================================================================
# run algo
#=======================================================================
ins_d = {
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : options,
'OUTPUT' : output,
'RESAMPLING' : 0,
'SOURCE_CRS' : None,
'TARGET_CRS' : crsOut,
'TARGET_EXTENT' : None,
'TARGET_EXTENT_CRS' : None,
'TARGET_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
assert rlay_raw.bandCount()==res_rlay.bandCount(), 'band count mismatch'
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
#===========================================================================
# ALGOS - CUSTOM--------
#===========================================================================
def vlay_pts_dist(self, #get the distance between points in a given order
vlay_raw,
ifn = 'fid', #fieldName to index by
request = None,
result = 'vlay_append', #result type
logger=None):
#===========================================================================
# defaults
#===========================================================================
if logger is None: logger=self.logger
log = logger.getChild('vlay_pts_dist')
if request is None:
request = QgsFeatureRequest(
).addOrderBy(ifn, ascending=True
).setSubsetOfAttributes([ifn], vlay_raw.fields())
#===========================================================================
# precheck
#===========================================================================
assert 'Point' in QgsWkbTypes().displayString(vlay_raw.wkbType()), 'passed bad geo type'
#see if indexer is unique
ifn_d = vlay_get_fdata(vlay_raw, fieldn=ifn, logger=log)
assert len(set(ifn_d.values()))==len(ifn_d)
#===========================================================================
# loop and calc
#===========================================================================
d = dict()
first, geo_prev = True, None
for i, feat in enumerate(vlay_raw.getFeatures(request)):
assert not feat.attribute(ifn) in d, 'indexer is not unique!'
geo = feat.geometry()
if first:
first=False
else:
d[feat.attribute(ifn)] = geo.distance(geo_prev)
geo_prev = geo
log.info('got %i distances using \"%s\''%(len(d), ifn))
#===========================================================================
# check
#===========================================================================
assert len(d) == (vlay_raw.dataProvider().featureCount() -1)
#===========================================================================
# results typing
#===========================================================================
if result == 'dict': return d
elif result == 'vlay_append':
#data manip
ncoln = '%s_dist'%ifn
df_raw = vlay_get_fdf(vlay_raw, logger=log)
df = df_raw.join(pd.Series(d, name=ncoln), on=ifn)
assert df[ncoln].isna().sum()==1, 'expected 1 null'
#reassemble
geo_d = vlay_get_fdata(vlay_raw, geo_obj=True, logger=log)
return self.vlay_new_df2(df, geo_d=geo_d, logger=log,
layname='%s_%s'%(vlay_raw.name(), ncoln))
#==========================================================================
# privates----------
#==========================================================================
def _field_handlr(self, #common handling for fields
vlay, #layer to check for field presence
fieldn_l, #list of fields to handle
invert = False,
logger=None,
):
if logger is None: logger=self.logger
log = logger.getChild('_field_handlr')
#=======================================================================
# all flag
#=======================================================================
if isinstance(fieldn_l, str):
if fieldn_l == 'all':
fieldn_l = vlay_fieldnl(vlay)
log.debug('user passed \'all\', retrieved %i fields: \n %s'%(
len(fieldn_l), fieldn_l))
else:
raise Error('unrecognized fieldn_l\'%s\''%fieldn_l)
#=======================================================================
# type setting
#=======================================================================
if isinstance(fieldn_l, tuple) or isinstance(fieldn_l, np.ndarray) or isinstance(fieldn_l, set):
fieldn_l = list(fieldn_l)
#=======================================================================
# checking
#=======================================================================
if not isinstance(fieldn_l, list):
raise Error('expected a list for fields, instead got \n %s'%fieldn_l)
#vlay_check(vlay, exp_fieldns=fieldn_l)
#=======================================================================
# #handle inversions
#=======================================================================
if invert:
big_fn_s = set(vlay_fieldnl(vlay)) #get all the fields
#get the difference
fieldn_l = list(big_fn_s.difference(set(fieldn_l)))
log.debug('inverted selection from %i to %i fields'%
(len(big_fn_s), len(fieldn_l)))
return fieldn_l
def _get_sel_obj(self, vlay): #get the processing object for algos with selections
log = self.logger.getChild('_get_sel_obj')
assert isinstance(vlay, QgsVectorLayer)
if vlay.selectedFeatureCount() == 0:
raise Error('Nothing selected on \'%s\'. exepects some pre selection'%(vlay.name()))
#handle project layer store
if self.qproj.mapLayer(vlay.id()) is None:
#layer not on project yet. add it
if self.qproj.addMapLayer(vlay, False) is None:
raise Error('failed to add map layer \'%s\''%vlay.name())
log.debug('based on %i selected features from \'%s\''%(len(vlay.selectedFeatureIds()), vlay.name()))
return QgsProcessingFeatureSourceDefinition(source=vlay.id(),
selectedFeaturesOnly=True,
featureLimit=-1,
geometryCheck=QgsFeatureRequest.GeometryAbortOnInvalid)
def _get_sel_res(self, #handler for returning selection like results
vlay, #result layer (with selection on it
result_type='select',
#expectiions
allow_none = False,
logger=None
):
#=======================================================================
# setup
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('_get_sel_res')
#=======================================================================
# precheck
#=======================================================================
if vlay.selectedFeatureCount() == 0:
if not allow_none:
raise Error('nothing selected')
return None
#log.debug('user specified \'%s\' for result_type'%result_type)
#=======================================================================
# by handles
#=======================================================================
if result_type == 'select':
#log.debug('user specified \'select\', doing nothing with %i selected'%vlay.selectedFeatureCount())
result = None
elif result_type == 'fids':
result = vlay.selectedFeatureIds() #get teh selected feature ids
elif result_type == 'feats':
result = {feat.id(): feat for feat in vlay.getSelectedFeatures()}
elif result_type == 'layer':
result = self.saveselectedfeatures(vlay, logger=log)
else:
raise Error('unexpected result_type kwarg')
return result
def _in_out_checking(self,res_vlay,
):
"""placeholder"""
def __exit__(self, #destructor
*args,**kwargs):
self.mstore.removeAllMapLayers()
super().__exit__(*args,**kwargs) #initilzie teh baseclass
class MyFeedBackQ(QgsProcessingFeedback):
"""
wrapper for easier reporting and extended progress
Dialogs:
built by QprojPlug.qproj_setup()
Qworkers:
built by Qcoms.__init__()
"""
def __init__(self,
logger=mod_logger):
self.logger=logger.getChild('FeedBack')
super().__init__()
def setProgressText(self, text):
self.logger.debug(text)
def pushInfo(self, info):
self.logger.info(info)
def pushCommandInfo(self, info):
self.logger.info(info)
def pushDebugInfo(self, info):
self.logger.info(info)
def pushConsoleInfo(self, info):
self.logger.info(info)
def reportError(self, error, fatalError=False):
self.logger.error(error)
def upd_prog(self, #advanced progress handling
prog_raw, #pass None to reset
method='raw', #whether to append value to the progress
):
#=======================================================================
# defaults
#=======================================================================
#get the current progress
progress = self.progress()
#===================================================================
# prechecks
#===================================================================
#make sure we have some slots connected
"""not sure how to do this"""
#=======================================================================
# reseting
#=======================================================================
if prog_raw is None:
"""
would be nice to reset the progressBar.. .but that would be complicated
"""
self.setProgress(0)
return
#=======================================================================
# setting
#=======================================================================
if method=='append':
prog = min(progress + prog_raw, 100)
elif method=='raw':
prog = prog_raw
elif method == 'portion':
rem_prog = 100-progress
prog = progress + rem_prog*(prog_raw/100)
assert prog<=100
#===================================================================
# emit signalling
#===================================================================
self.setProgress(prog)
#==============================================================================
# FUNCTIONS----------
#==============================================================================
def init_q(gui=False):
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
print(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate')
def vlay_check( #helper to check various expectations on the layer
vlay,
exp_fieldns = None, #raise error if these field names are OUT
uexp_fieldns = None, #raise error if these field names are IN
real_atts = None, #list of field names to check if attribute value are all real
bgeot = None, #basic geo type checking
fcnt = None, #feature count checking. accepts INT or QgsVectorLayer
fkey = None, #optional secondary key to check
mlay = False, #check if its a memory layer or not
chk_valid = False, #check layer validty
logger = mod_logger,
db_f = False,
):
#=======================================================================
# prechecks
#=======================================================================
if vlay is None:
raise Error('got passed an empty vlay')
if not isinstance(vlay, QgsVectorLayer):
raise Error('unexpected type: %s'%type(vlay))
log = logger.getChild('vlay_check')
checks_l = []
#=======================================================================
# expected field names
#=======================================================================
if not basic.is_null(exp_fieldns): #robust null checking
skip=False
if isinstance(exp_fieldns, str):
if exp_fieldns=='all':
skip=True
if not skip:
fnl = basic.linr(exp_fieldns, vlay_fieldnl(vlay),
'expected field names', vlay.name(),
result_type='missing', logger=log, fancy_log=db_f)
if len(fnl)>0:
raise Error('%s missing expected fields: %s'%(
vlay.name(), fnl))
checks_l.append('exp_fieldns=%i'%len(exp_fieldns))
#=======================================================================
# unexpected field names
#=======================================================================
if not basic.is_null(uexp_fieldns): #robust null checking
#fields on the layer
if len(vlay_fieldnl(vlay))>0:
fnl = basic.linr(uexp_fieldns, vlay_fieldnl(vlay),
'un expected field names', vlay.name(),
result_type='matching', logger=log, fancy_log=db_f)
if len(fnl)>0:
raise Error('%s contains unexpected fields: %s'%(
vlay.name(), fnl))
#no fields on the layer
else:
pass
checks_l.append('uexp_fieldns=%i'%len(uexp_fieldns))
#=======================================================================
# null value check
#=======================================================================
#==========================================================================
# if not real_atts is None:
#
# #pull this data
# df = vlay_get_fdf(vlay, fieldn_l = real_atts, logger=log)
#
# #check for nulls
# if np.any(df.isna()):
# raise Error('%s got %i nulls on %i expected real fields: %s'%(
# vlay.name(), df.isna().sum().sum(), len(real_atts), real_atts))
#
#
# checks_l.append('real_atts=%i'%len(real_atts))
#==========================================================================
#=======================================================================
# basic geometry type
#=======================================================================
#==========================================================================
# if not bgeot is None:
# bgeot_lay = vlay_get_bgeo_type(vlay)
#
# if not bgeot == bgeot_lay:
# raise Error('basic geometry type expectation \'%s\' does not match layers \'%s\''%(
# bgeot, bgeot_lay))
#
# checks_l.append('bgeot=%s'%bgeot)
#==========================================================================
#=======================================================================
# feature count
#=======================================================================
if not fcnt is None:
if isinstance(fcnt, QgsVectorLayer):
fcnt=fcnt.dataProvider().featureCount()
if not fcnt == vlay.dataProvider().featureCount():
raise Error('\'%s\'s feature count (%i) does not match %i'%(
vlay.name(), vlay.dataProvider().featureCount(), fcnt))
checks_l.append('fcnt=%i'%fcnt)
#=======================================================================
# fkey
#=======================================================================
#==============================================================================
# if isinstance(fkey, str):
# fnl = vlay_fieldnl(vlay)
#
# if not fkey in fnl:
# raise Error('fkey \'%s\' not in the fields'%fkey)
#
# fkeys_ser = vlay_get_fdata(vlay, fkey, logger=log, fmt='ser').sort_values()
#
# if not np.issubdtype(fkeys_ser.dtype, np.number):
# raise Error('keys are non-numeric. type: %s'%fkeys_ser.dtype)
#
# if not fkeys_ser.is_unique:
# raise Error('\'%s\' keys are not unique'%fkey)
#
# if not fkeys_ser.is_monotonic:
# raise Error('fkeys are not monotonic')
#
# if np.any(fkeys_ser.isna()):
# raise Error('fkeys have nulls')
#
# checks_l.append('fkey \'%s\'=%i'%(fkey, len(fkeys_ser)))
#==============================================================================
#=======================================================================
# storage type
#=======================================================================
if mlay:
if not 'Memory' in vlay.dataProvider().storageType():
raise Error('\"%s\' unexpected storage type: %s'%(
vlay.name(), vlay.dataProvider().storageType()))
checks_l.append('mlay')
#=======================================================================
# validty
#=======================================================================
#==========================================================================
# if chk_valid:
# vlay_chk_validty(vlay, chk_geo=True)
#
# checks_l.append('validity')
#==========================================================================
#=======================================================================
# wrap
#=======================================================================
log.debug('\'%s\' passed %i checks: %s'%(
vlay.name(), len(checks_l), checks_l))
return
def load_vlay( #load a layer from a file
fp,
providerLib='ogr',
logger=mod_logger):
"""
what are we using this for?
see instanc emethod
"""
log = logger.getChild('load_vlay')
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
#Import a Raster Layer
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
#check if this is valid
if not vlay_raw.isValid():
log.error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
raise Error('vlay loading produced an invalid layer')
#check if it has geometry
if vlay_raw.wkbType() == 100:
log.error('loaded vlay has NoGeometry')
raise Error('no geo')
#==========================================================================
# report
#==========================================================================
vlay = vlay_raw
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay
def vlay_write( #write a VectorLayer
vlay, out_fp,
driverName='GPKG',
fileEncoding = "CP1250",
opts = QgsVectorFileWriter.SaveVectorOptions(), #empty options object
overwrite=False,
logger=mod_logger):
"""
help(QgsVectorFileWriter.SaveVectorOptions)
QgsVectorFileWriter.SaveVectorOptions.driverName='GPKG'
opt2 = QgsVectorFileWriter.BoolOption(QgsVectorFileWriter.CreateOrOverwriteFile)
help(QgsVectorFileWriter)
TODO: Move this back onto Qcoms
"""
#==========================================================================
# defaults
#==========================================================================
log = logger.getChild('vlay_write')
#===========================================================================
# assemble options
#===========================================================================
opts.driverName = driverName
opts.fileEncoding = fileEncoding
#===========================================================================
# checks
#===========================================================================
#file extension
fhead, ext = os.path.splitext(out_fp)
if not 'gpkg' in ext:
raise Error('unexpected extension: %s'%ext)
if os.path.exists(out_fp):
msg = 'requested file path already exists!. overwrite=%s \n %s'%(
overwrite, out_fp)
if overwrite:
log.warning(msg)
os.remove(out_fp) #workaround... should be away to overwrite with the QgsVectorFileWriter
else:
raise Error(msg)
if vlay.dataProvider().featureCount() == 0:
raise Error('\'%s\' has no features!'%(
vlay.name()))
if not vlay.isValid():
Error('passed invalid layer')
error = QgsVectorFileWriter.writeAsVectorFormatV2(
vlay, out_fp,
QgsCoordinateTransformContext(),
opts,
)
#=======================================================================
# wrap and check
#=======================================================================
if error[0] == QgsVectorFileWriter.NoError:
log.info('layer \' %s \' written to: \n %s'%(vlay.name(),out_fp))
return out_fp
raise Error('FAILURE on writing layer \' %s \' with code:\n %s \n %s'%(vlay.name(),error, out_fp))
def vlay_get_fdf( #pull all the feature data and place into a df
vlay,
fmt='df', #result fomrat key.
#dict: {fid:{fieldname:value}}
#df: index=fids, columns=fieldnames
#limiters
request = None, #request to pull data. for more customized requestes.
fieldn_l = None, #or field name list. for generic requests
#modifiers
reindex = None, #optinal field name to reindex df by
#expectations
expect_all_real = False, #whether to expect all real results
allow_none = False,
db_f = False,
logger=mod_logger,
feedback=MyFeedBackQ()):
"""
performance improvement
Warning: requests with getFeatures arent working as expected for memory layers
this could be combined with vlay_get_feats()
also see vlay_get_fdata() (for a single column)
RETURNS
a dictionary in the Qgis attribute dictionary format:
key: generally feat.id()
value: a dictionary of {field name: attribute value}
"""
#===========================================================================
# setups and defaults
#===========================================================================
log = logger.getChild('vlay_get_fdf')
assert isinstance(vlay, QgsVectorLayer)
all_fnl = [fieldn.name() for fieldn in vlay.fields().toList()]
if fieldn_l is None: #use all the fields
fieldn_l = all_fnl
else:
vlay_check(vlay, fieldn_l, logger=logger, db_f=db_f)
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
#===========================================================================
# prechecks
#===========================================================================
if not reindex is None:
if not reindex in fieldn_l:
raise Error('requested reindexer \'%s\' is not a field name'%reindex)
if not vlay.dataProvider().featureCount()>0:
raise Error('no features!')
if len(fieldn_l) == 0:
raise Error('no fields!')
if fmt=='dict' and not (len(fieldn_l)==len(all_fnl)):
raise Error('dict results dont respect field slicing')
assert hasattr(feedback, 'setProgress')
#===========================================================================
# build the request
#===========================================================================
feedback.setProgress(2)
if request is None:
"""WARNING: this doesnt seem to be slicing the fields.
see Alg().deletecolumns()
but this will re-key things
request = QgsFeatureRequest().setSubsetOfAttributes(fieldn_l,vlay.fields())"""
request = QgsFeatureRequest()
#never want geometry
request = request.setFlags(QgsFeatureRequest.NoGeometry)
log.debug('extracting data from \'%s\' on fields: %s'%(vlay.name(), fieldn_l))
#===========================================================================
# loop through each feature and extract the data
#===========================================================================
fid_attvs = dict() #{fid : {fieldn:value}}
fcnt = vlay.dataProvider().featureCount()
for indxr, feat in enumerate(vlay.getFeatures(request)):
#zip values
fid_attvs[feat.id()] = feat.attributes()
feedback.setProgress((indxr/fcnt)*90)
#===========================================================================
# post checks
#===========================================================================
if not len(fid_attvs) == vlay.dataProvider().featureCount():
log.debug('data result length does not match feature count')
if not request.filterType()==3: #check if a filter fids was passed
"""todo: add check to see if the fiter request length matches tresult"""
raise Error('no filter and data length mismatch')
#check the field lengthes
if not len(all_fnl) == len(feat.attributes()):
raise Error('field length mismatch')
#empty check 1
if len(fid_attvs) == 0:
log.warning('failed to get any data on layer \'%s\' with request'%vlay.name())
if not allow_none:
raise Error('no data found!')
else:
if fmt == 'dict':
return dict()
elif fmt == 'df':
return pd.DataFrame()
else:
raise Error('unexpected fmt type')
#===========================================================================
# result formatting
#===========================================================================
log.debug('got %i data elements for \'%s\''%(
len(fid_attvs), vlay.name()))
if fmt == 'dict':
return fid_attvs
elif fmt=='df':
#build the dict
df_raw = pd.DataFrame.from_dict(fid_attvs, orient='index', columns=all_fnl)
#handle column slicing and Qnulls
"""if the requester worked... we probably wouldnt have to do this"""
df = df_raw.loc[:, tuple(fieldn_l)].replace(NULL, np.nan)
feedback.setProgress(95)
if isinstance(reindex, str):
"""
reindex='zid'
view(df)
"""
#try and add the index (fids) as a data column
try:
df = df.join(pd.Series(df.index,index=df.index, name='fid'))
except:
log.debug('failed to preserve the fids.. column already there?')
#re-index by the passed key... should copy the fids over to 'index
df = df.set_index(reindex, drop=True)
log.debug('reindexed data by \'%s\''%reindex)
return df
else:
raise Error('unrecognized fmt kwarg')
def vlay_get_fdata( #get data for a single field from all the features
vlay,
fieldn = None, #get a field name. 'None' returns a dictionary of np.nan
geopropn = None, #get a geometry property
geo_obj = False, #whether to just get the geometry object
request = None, #additional requester (limiting fids). fieldn still required. additional flags added
selected= False, #whether to limit data to just those selected features
fmt = 'dict', #format to return results in
#'singleton' expect and aprovide a unitary value
rekey = None, #field name to rekey dictionary like results by
expect_all_real = False, #whether to expect all real results
dropna = False, #whether to drop nulls from the results
allow_none = False,
logger = mod_logger, db_f=False):
"""
TODO: combine this with vlay_get_fdatas
consider combining with vlay_get_feats
I'm not sure how this will handle requests w/ expressions
"""
log = logger.getChild('vlay_get_fdata')
if request is None:
request = QgsFeatureRequest()
#===========================================================================
# prechecks
#===========================================================================
if geo_obj:
if fmt == 'df': raise IOError
if not geopropn is None: raise IOError
if dropna:
if expect_all_real:
raise Error('cant expect_all_reals AND dropna')
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
vlay_check(vlay, exp_fieldns=[fieldn], logger=log, db_f=db_f)
#===========================================================================
# build the request
#===========================================================================
#no geometry
if (geopropn is None) and (not geo_obj):
if fieldn is None:
raise Error('no field name provided')
request = request.setFlags(QgsFeatureRequest.NoGeometry)
request = request.setSubsetOfAttributes([fieldn],vlay.fields())
else:
request = request.setNoAttributes() #dont get any attributes
#===========================================================================
# selection limited
#===========================================================================
if selected:
"""
todo: check if there is already a fid filter placed on the reuqester
"""
log.debug('limiting data pull to %i selected features on \'%s\''%(
vlay.selectedFeatureCount(), vlay.name()))
sfids = vlay.selectedFeatureIds()
request = request.setFilterFids(sfids)
#===========================================================================
# loop through and collect hte data
#===========================================================================
#if db_f: req_log(request, logger=log)
d = dict() #empty container for results
for feat in vlay.getFeatures(request):
#=======================================================================
# get geometry
#=======================================================================
if geo_obj:
d[feat.id()] = feat.geometry()
#=======================================================================
# get a geometry property
#=======================================================================
elif not geopropn is None:
geo = feat.geometry()
func = getattr(geo, geopropn) #get the method
d[feat.id()] = func() #call the method and store
#=======================================================================
# field request
#=======================================================================
else:
#empty shortcut
if qisnull(feat.attribute(fieldn)):
d[feat.id()] = np.nan
else: #pull real data
d[feat.id()] = feat.attribute(fieldn)
log.debug('retrieved %i attributes from features on \'%s\''%(
len(d), vlay.name()))
#===========================================================================
# null handling
#===========================================================================
if selected:
if not len(d) == vlay.selectedFeatureCount():
raise Error('failed to get data matching %i selected features'%(
vlay.selectedFeatureCount()))
if expect_all_real:
boolar = pd.isnull(np.array(list(d.values())))
if np.any(boolar):
raise Error('got %i nulls'%boolar.sum())
if dropna:
"""faster to use dfs?"""
log.debug('dropping nulls from %i'%len(d))
d2 = dict()
for k, v in d.items():
if np.isnan(v):
continue
d2[k] = v
d = d2 #reset
#===========================================================================
# post checks
#===========================================================================
if len(d) == 0:
log.warning('got no results! from \'%s\''%(
vlay.name()))
if not allow_none:
raise Error('allow_none=FALSE and no results')
"""
view(vlay)
"""
#===========================================================================
# rekey
#===========================================================================
if isinstance(rekey, str):
assert fmt=='dict'
d, _ = vlay_key_convert(vlay, d, rekey, id1_type='fid', logger=log)
#===========================================================================
# results
#===========================================================================
if fmt == 'dict':
return d
elif fmt == 'df':
return pd.DataFrame(pd.Series(d, name=fieldn))
elif fmt == 'singleton':
if not len(d)==1:
raise Error('expected singleton')
return next(iter(d.values()))
elif fmt == 'ser':
return pd.Series(d, name=fieldn)
else:
raise IOError
def vlay_new_mlay(#create a new mlay
gtype, #"Point", "LineString", "Polygon", "MultiPoint", "MultiLineString", or "MultiPolygon".
crs,
layname,
qfields,
feats_l,
logger=mod_logger,
):
#=======================================================================
# defaults
#=======================================================================
log = logger.getChild('vlay_new_mlay')
#=======================================================================
# prechecks
#=======================================================================
if not isinstance(layname, str):
raise Error('expected a string for layname, isntead got %s'%type(layname))
if gtype=='None':
log.warning('constructing mlay w/ \'None\' type')
#=======================================================================
# assemble into new layer
#=======================================================================
#initilzie the layer
EPSG_code=int(crs.authid().split(":")[1]) #get teh coordinate reference system of input_layer
uri = gtype+'?crs=epsg:'+str(EPSG_code)+'&index=yes'
vlaym = QgsVectorLayer(uri, layname, "memory")
# add fields
if not vlaym.dataProvider().addAttributes(qfields):
raise Error('failed to add fields')
vlaym.updateFields()
#add feats
if not vlaym.dataProvider().addFeatures(feats_l):
raise Error('failed to addFeatures')
vlaym.updateExtents()
#=======================================================================
# checks
#=======================================================================
if vlaym.wkbType() == 100:
msg = 'constructed layer \'%s\' has NoGeometry'%vlaym.name()
if gtype == 'None':
log.debug(msg)
else:
raise Error(msg)
log.debug('constructed \'%s\''%vlaym.name())
return vlaym
def vlay_new_df(#build a vlay from a df
df_raw,
crs,
geo_d = None, #container of geometry objects {fid: QgsGeometry}
geo_fn_tup = None, #if geo_d=None, tuple of field names to search for coordinate data
layname='df_layer',
allow_fid_mismatch = False,
infer_dtypes = True, #whether to referesh the dtyping in the df
driverName = 'GPKG',
#expectations
expect_unique_colns = True,
logger=mod_logger, db_f = False,
):
"""
todo: migrate off this
"""
#=======================================================================
# setup
#=======================================================================
log = logger.getChild('vlay_new_df')
log.warning('Depcreciate me')
#=======================================================================
# precheck
#=======================================================================
df = df_raw.copy()
max_len=50
#check lengths
boolcol = df_raw.columns.str.len() >= max_len
if np.any(boolcol):
log.warning('passed %i columns which exeed the max length %i for driver \'%s\'.. truncating: \n %s'%(
boolcol.sum(), max_len, driverName, df_raw.columns.values[boolcol]))
df.columns = df.columns.str.slice(start=0, stop=max_len-1)
#make sure the columns are unique
if not df.columns.is_unique:
"""
this can happen especially when some field names are super long and have their unique parts truncated
"""
boolcol = df.columns.duplicated(keep='first')
log.warning('got %i duplicated columns: \n %s'%(
boolcol.sum(), df.columns[boolcol].values))
if expect_unique_colns:
raise Error('got non unique columns')
#drop the duplicates
log.warning('dropping second duplicate column')
df = df.loc[:, ~boolcol]
#===========================================================================
# assemble the features
#===========================================================================
"""this does its own index check"""
feats_d = feats_build(df, logger=log, geo_d = geo_d,infer_dtypes=infer_dtypes,
geo_fn_tup = geo_fn_tup,
allow_fid_mismatch=allow_fid_mismatch, db_f=db_f)
#=======================================================================
# get the geo type
#=======================================================================
if not geo_d is None:
#pull geometry type from first feature
gtype = QgsWkbTypes().displayString(next(iter(geo_d.values())).wkbType())
elif not geo_fn_tup is None:
gtype = 'Point'
else:
gtype = 'None'
#===========================================================================
# buidl the new layer
#===========================================================================
vlay = vlay_new_mlay(gtype, #no geo
crs,
layname,
list(feats_d.values())[0].fields(),
list(feats_d.values()),
logger=log,
)
#=======================================================================
# post check
#=======================================================================
if db_f:
if vlay.wkbType() == 100:
raise Error('constructed layer has NoGeometry')
#vlay_chk_validty(vlay, chk_geo=True, logger=log)
return vlay
def vlay_fieldnl(vlay):
return [field.name() for field in vlay.fields()]
def feats_build( #build a set of features from teh passed data
data, #data from which to build features from (either df or qvlayd)
geo_d = None, #container of geometry objects {fid: QgsGeometry}
geo_fn_tup = None, #if geo_d=None, tuple of field names to search for coordinate data
allow_fid_mismatch = False, #whether to raise an error if the fids set on the layer dont match the data
infer_dtypes = True, #whether to referesh the dtyping in the df
logger=mod_logger, db_f=False):
log = logger.getChild('feats_build')
#===========================================================================
# precheck
#===========================================================================
#geometry input logic
if (not geo_d is None) and (not geo_fn_tup is None):
raise Error('todo: implement non geo layers')
#index match
if isinstance(geo_d, dict):
#get the data fid_l
if isinstance(data, pd.DataFrame):
dfid_l = data.index.tolist()
elif isinstance(data, dict):
dfid_l = list(data.keys())
else:
raise Error('unexpected type')
if not basic.linr(dfid_l, list(geo_d.keys()),'feat_data', 'geo_d',
sort_values=True, result_type='exact', logger=log):
raise Error('passed geo_d and data indexes dont match')
#overrides
if geo_fn_tup:
geofn_hits = 0
sub_field_match = False #dropping geometry fields
else:
sub_field_match = True
log.debug('for %i data type %s'%(
len(data), type(data)))
#===========================================================================
# data conversion
#===========================================================================
if isinstance(data, pd.DataFrame):
#check the index (this will be the fids)
if not data.index.dtype.char == 'q':
raise Error('expected integer index')
fid_ar = data.index.values
#infer types
if infer_dtypes:
data = data.infer_objects()
#convert the data
qvlayd = df_to_qvlayd(data)
#=======================================================================
# build fields container from data
#=======================================================================
"""we need to convert numpy types to pytypes.
these are later convert to Qtypes"""
fields_d = dict()
for coln, col in data.items():
if not geo_fn_tup is None:
if coln in geo_fn_tup:
geofn_hits +=1
continue #skip this one
#set the type for this name
fields_d[coln] = np_to_pytype(col.dtype, logger=log)
qfields = fields_build_new(fields_d = fields_d, logger=log)
#=======================================================================
# some checks
#=======================================================================
if db_f:
#calc hte expectation
if geo_fn_tup is None:
exp_cnt= len(data.columns)
else:
exp_cnt = len(data.columns) - len(geo_fn_tup)
if not exp_cnt == len(fields_d):
raise Error('only generated %i fields from %i columns'%(
len(data.columns), len(fields_d)))
#check we got them all
if not exp_cnt == len(qfields):
raise Error('failed to create all the fields')
"""
for field in qfields:
print(field)
qfields.toList()
new_qfield = QgsField(fname, qtype, typeName=QMetaType.typeName(QgsField(fname, qtype).type()))
"""
else:
fid_ar = np.array(list(data.keys()))
#set the data
qvlayd = data
#===========================================================================
# build fields container from data
#===========================================================================
#slice out geometry data if there
sub_d1 = list(qvlayd.values())[0] #just get the first
sub_d2 = dict()
for fname, value in sub_d1.items():
if not geo_fn_tup is None:
if fname in geo_fn_tup:
geofn_hits +=1
continue #skip this one
sub_d2[fname] = value
#build the fields from this sample data
qfields = fields_build_new(samp_d = sub_d2, logger=log)
#check for geometry field names
if not geo_fn_tup is None:
if not geofn_hits == len(geo_fn_tup):
log.error('missing some geometry field names form the data')
raise IOError
#===========================================================================
# extract geometry
#===========================================================================
if geo_d is None:
#check for nulls
if db_f:
chk_df= pd.DataFrame.from_dict(qvlayd, orient='index')
if chk_df.loc[:, geo_fn_tup].isna().any().any():
raise Error('got some nulls on the geometry fields: %s'%geo_fn_tup)
geo_d = dict()
for fid, sub_d in copy.copy(qvlayd).items():
#get the xy
xval, yval = sub_d.pop(geo_fn_tup[0]), sub_d.pop(geo_fn_tup[1])
#build the geometry
geo_d[fid] = QgsGeometry.fromPointXY(QgsPointXY(xval,yval))
#add the cleaned data back in
qvlayd[fid] = sub_d
#===========================================================================
# check geometry
#===========================================================================
if db_f:
#precheck geometry validty
for fid, geo in geo_d.items():
if not geo.isGeosValid():
raise Error('got invalid geometry on %i'%fid)
#===========================================================================
# loop through adn build features
#===========================================================================
feats_d = dict()
for fid, sub_d in qvlayd.items():
#=======================================================================
# #log.debug('assembling feature %i'%fid)
# #=======================================================================
# # assmble geometry data
# #=======================================================================
# if isinstance(geo_d, dict):
# geo = geo_d[fid]
#
# elif not geo_fn_tup is None:
# xval = sub_d[geo_fn_tup[0]]
# yval = sub_d[geo_fn_tup[1]]
#
# if pd.isnull(xval) or pd.isnull(yval):
# log.error('got null geometry values')
# raise IOError
#
# geo = QgsGeometry.fromPointXY(QgsPointXY(xval,yval))
# #Point(xval, yval) #make the point geometry
#
# else:
# geo = None
#=======================================================================
#=======================================================================
# buidl the feature
#=======================================================================
#=======================================================================
# feats_d[fid] = feat_build(fid, sub_d, qfields=qfields, geometry=geo,
# sub_field_match = sub_field_match, #because we are excluding the geometry from the fields
# logger=log, db_f=db_f)
#=======================================================================
feat = QgsFeature(qfields, fid)
for fieldn, value in sub_d.items():
"""
cut out feat_build() for simplicity
"""
#skip null values
if pd.isnull(value): continue
#get the index for this field
findx = feat.fieldNameIndex(fieldn)
#get the qfield
qfield = feat.fields().at(findx)
#make the type match
ndata = qtype_to_pytype(value, qfield.type(), logger=log)
#set the attribute
if not feat.setAttribute(findx, ndata):
raise Error('failed to setAttribute')
#setgeometry
feat.setGeometry(geo_d[fid])
#stor eit
feats_d[fid]=feat
#===========================================================================
# checks
#===========================================================================
if db_f:
#fid match
nfid_ar = np.array(list(feats_d.keys()))
if not np.array_equal(nfid_ar, fid_ar):
log.warning('fid mismatch')
if not allow_fid_mismatch:
raise Error('fid mismatch')
#feature validty
for fid, feat in feats_d.items():
if not feat.isValid():
raise Error('invalid feat %i'%feat.id())
if not feat.geometry().isGeosValid():
raise Error('got invalid geometry on feat \'%s\''%(feat.id()))
"""
feat.geometry().type()
"""
log.debug('built %i \'%s\' features'%(
len(feats_d),
QgsWkbTypes.geometryDisplayString(feat.geometry().type()),
))
return feats_d
def fields_build_new( #build qfields from different data containers
samp_d = None, #sample data from which to build qfields {fname: value}
fields_d = None, #direct data from which to build qfields {fname: pytype}
fields_l = None, #list of QgsField objects
logger=mod_logger):
log = logger.getChild('fields_build_new')
#===========================================================================
# buidl the fields_d
#===========================================================================
if (fields_d is None) and (fields_l is None): #only if we have nothign better to start with
if samp_d is None:
log.error('got no data to build fields on!')
raise IOError
fields_l = []
for fname, value in samp_d.items():
if pd.isnull(value):
log.error('for field \'%s\' got null value')
raise IOError
elif inspect.isclass(value):
raise IOError
fields_l.append(field_new(fname, pytype=type(value)))
log.debug('built %i fields from sample data'%len(fields_l))
#===========================================================================
# buidl the fields set
#===========================================================================
elif fields_l is None:
fields_l = []
for fname, ftype in fields_d.items():
fields_l.append(field_new(fname, pytype=ftype))
log.debug('built %i fields from explicit name/type'%len(fields_l))
#check it
if not len(fields_l) == len(fields_d):
raise Error('length mismatch')
elif fields_d is None: #check we have the other
raise IOError
#===========================================================================
# build the Qfields
#===========================================================================
Qfields = QgsFields()
fail_msg_d = dict()
for indx, field in enumerate(fields_l):
if not Qfields.append(field):
fail_msg_d[indx] = ('%i failed to append field \'%s\''%(indx, field.name()), field)
#report
if len(fail_msg_d)>0:
for indx, (msg, field) in fail_msg_d.items():
log.error(msg)
raise Error('failed to write %i fields'%len(fail_msg_d))
"""
field.name()
field.constraints().constraintDescription()
field.length()
"""
#check it
if not len(Qfields) == len(fields_l):
raise Error('length mismatch')
return Qfields
def field_new(fname,
pytype=str,
driverName = 'SpatiaLite', #desired driver (to check for field name length limitations)
fname_trunc = True, #whether to truncate field names tha texceed the limit
logger=mod_logger): #build a QgsField
#===========================================================================
# precheck
#===========================================================================
if not isinstance(fname, str):
raise IOError('expected string for fname')
#vector layer field name lim itation
max_len = fieldn_max_d[driverName]
"""
fname = 'somereallylongname'
"""
if len(fname) >max_len:
log = logger.getChild('field_new')
log.warning('got %i (>%i)characters for passed field name \'%s\'. truncating'%(len(fname), max_len, fname))
if fname_trunc:
fname = fname[:max_len]
else:
raise Error('field name too long')
qtype = ptype_to_qtype(pytype)
"""
#check this type
QMetaType.typeName(QgsField(fname, qtype).type())
QVariant.String
QVariant.Int
QMetaType.typeName(new_qfield.type())
"""
#new_qfield = QgsField(fname, qtype)
new_qfield = QgsField(fname, qtype, typeName=QMetaType.typeName(QgsField(fname, qtype).type()))
return new_qfield
def vlay_get_bgeo_type(vlay,
match_flags=re.IGNORECASE,
):
gstr = QgsWkbTypes().displayString(vlay.wkbType()).lower()
for gtype in ('polygon', 'point', 'line'):
if re.search(gtype, gstr, flags=match_flags):
return gtype
raise Error('failed to match')
def vlay_rename_fields(
vlay_raw,
rnm_d, #field name conversions to apply {old FieldName:newFieldName}
logger=None,
feedback=None,
):
"""
todo: replace with coms.hp.Qproj.vlay_rename_fields
"""
if logger is None: logger=mod_logger
log=logger.getChild('vlay_rename_fields')
#get a working layer
vlay_raw.selectAll()
vlay = processing.run('native:saveselectedfeatures',
{'INPUT' : vlay_raw, 'OUTPUT' : 'TEMPORARY_OUTPUT'},
feedback=feedback)['OUTPUT']
#get fieldname index conversion for layer
fni_d = {f.name():vlay.dataProvider().fieldNameIndex(f.name()) for f in vlay.fields()}
#check it
for k in rnm_d.keys():
assert k in fni_d.keys(), 'requested field \'%s\' not on layer'%k
#re-index rename request
fiRn_d = {fni_d[k]:v for k,v in rnm_d.items()}
#apply renames
if not vlay.dataProvider().renameAttributes(fiRn_d):
raise Error('failed to rename')
vlay.updateFields()
#check it
fn_l = [f.name() for f in vlay.fields()]
s = set(rnm_d.values()).difference(fn_l)
assert len(s)==0, 'failed to rename %i fields: %s'%(len(s), s)
vlay.setName(vlay_raw.name())
log.debug('applied renames to \'%s\' \n %s'%(vlay.name(), rnm_d))
return vlay
def vlay_key_convert(#convert a list of ids in one form to another
vlay,
id1_objs, #list of ids (or dict keyed b y ids) to get conversion of
id_fieldn, #field name for field type ids
id1_type = 'field', #type of ids passed in the id_l (result will return a dict of th eopposit etype)
#'field': keys in id1_objs are values from some field (on the vlay)
#'fid': keys in id1_objs are fids (on the vlay)
fid_fval_d = None, #optional pre-calced data (for performance improvement)
logger=mod_logger,
db_f = False, #extra checks
):
log = logger.getChild('vlay_key_convert')
#===========================================================================
# handle variable inputs
#===========================================================================
if isinstance(id1_objs, dict):
id1_l = list(id1_objs.keys())
elif isinstance(id1_objs, list):
id1_l = id1_objs
else:
raise Error('unrecognized id1_objs type')
#===========================================================================
# extract the fid to fval conversion
#===========================================================================
if fid_fval_d is None:
#limit the pull by id1s
if id1_type == 'fid':
request = QgsFeatureRequest().setFilterFids(id1_l)
log.debug('pulling \'fid_fval_d\' from %i fids'%(len(id1_l)))
#by field values
elif id1_type == 'field': #limit by field value
raise Error(' not implemented')
#build an expression so we only query features with values matching the id1_l
#===================================================================
# qexp = exp_vals_in_field(id1_l, id_fieldn, qfields = vlay.fields(), logger=log)
# request = QgsFeatureRequest(qexp)
#
# log.debug('pulling \'fid_fval_d\' from %i \'%s\' fvals'%(
# len(id1_l), id_fieldn))
#===================================================================
else:
raise Error('unrecognized id1_type')
fid_fval_d = vlay_get_fdata(vlay, fieldn=id_fieldn, request =request, logger=log,
expect_all_real=True, fmt='dict')
#no need
else:
log.debug('using passed \'fid_fval_d\' with %i'%len(fid_fval_d))
#check them
if db_f:
#log.debug('\'fid_fval_d\': \n %s'%fid_fval_d)
for dname, l in (
('keys', list(fid_fval_d.keys())),
('values', list(fid_fval_d.values()))
):
if not len(np.unique(np.array(l))) == len(l):
raise Error('got non unique \'%s\' on fid_fval_d'%dname)
#===========================================================================
# swap keys
#===========================================================================
if id1_type == 'fid':
id1_id2_d = fid_fval_d #o flip necessary
elif id1_type == 'field': #limit by field value
log.debug('swapping keys')
id1_id2_d = dict(zip(
fid_fval_d.values(), fid_fval_d.keys()
))
else:
raise Error('unrecognized id1_type')
#=======================================================================
# #make conversion
#=======================================================================
#for dictionaries
if isinstance(id1_objs, dict):
res_objs = dict()
for id1, val in id1_objs.items():
res_objs[id1_id2_d[id1]] = val
log.debug('got converted DICT results with %i'%len(res_objs))
#for lists
elif isinstance(id1_objs, list):
res_objs = [id1_id2_d[id1] for id1 in id1_objs]
log.debug('got converted LIST results with %i'%len(res_objs))
else:
raise Error('unrecognized id1_objs type')
return res_objs, fid_fval_d #converted objects, conversion dict ONLY FOR THSE OBJECTS!
#==============================================================================
# type checks-----------------
#==============================================================================
def qisnull(obj):
if obj is None:
return True
if isinstance(obj, QVariant):
if obj.isNull():
return True
else:
return False
if pd.isnull(obj):
return True
else:
return False
def is_qtype_match(obj, qtype_code, logger=mod_logger): #check if the object matches the qtype code
log = logger.getChild('is_qtype_match')
#get pythonic type for this code
try:
py_type = type_qvar_py_d[qtype_code]
except:
if not qtype_code in type_qvar_py_d.keys():
log.error('passed qtype_code \'%s\' not in dict from \'%s\''%(qtype_code, type(obj)))
raise IOError
if not isinstance(obj, py_type):
#log.debug('passed object of type \'%s\' does not match Qvariant.type \'%s\''%(type(obj), QMetaType.typeName(qtype_code)))
return False
else:
return True
#==============================================================================
# type conversions----------------
#==============================================================================
def np_to_pytype(npdobj, logger=mod_logger):
if not isinstance(npdobj, np.dtype):
raise Error('not passed a numpy type')
try:
return npc_pytype_d[npdobj.char]
except Exception as e:
log = logger.getChild('np_to_pytype')
if not npdobj.char in npc_pytype_d.keys():
log.error('passed npdtype \'%s\' not found in the conversion dictionary'%npdobj.name)
raise Error('failed oto convert w/ \n %s'%e)
def qtype_to_pytype( #convert object to the pythonic type taht matches the passed qtype code
obj,
qtype_code, #qtupe code (qfield.type())
logger=mod_logger):
if is_qtype_match(obj, qtype_code): #no conversion needed
return obj
#===========================================================================
# shortcut for nulls
#===========================================================================
if qisnull(obj):
return None
#get pythonic type for this code
py_type = type_qvar_py_d[qtype_code]
try:
return py_type(obj)
except:
#datetime
if qtype_code == 16:
return obj.toPyDateTime()
log = logger.getChild('qtype_to_pytype')
if obj is None:
log.error('got NONE type')
elif isinstance(obj, QVariant):
log.error('got a Qvariant object')
else:
log.error('unable to map object \'%s\' of type \'%s\' to type \'%s\''
%(obj, type(obj), py_type))
"""
QMetaType.typeName(obj)
"""
raise IOError
def ptype_to_qtype(py_type, logger=mod_logger): #get the qtype corresponding to the passed pytype
"""useful for buildign Qt objects
really, this is a reverse
py_type=str
"""
if not inspect.isclass(py_type):
logger.error('got unexpected type \'%s\''%type(py_type))
raise Error('bad type')
#build a QVariant object from this python type class, then return its type
try:
qv = QVariant(py_type())
except:
logger.error('failed to build QVariant from \'%s\''%type(py_type))
raise IOError
"""
#get the type
QMetaType.typeName(qv.type())
"""
return qv.type()
def df_to_qvlayd( #convert a data frame into the layer data structure (keeyd by index)
df, #data to convert. df index should match fid index
logger=mod_logger):
log = logger.getChild('df_to_qvlayd')
d = dict() #data dictionary in qgis structure
#prechecks
if not df.index.is_unique:
log.error('got passed non-unique index')
raise IOError
#===========================================================================
# loop and fill
#===========================================================================
for fid, row in df.iterrows():
#=======================================================================
# build sub data
#=======================================================================
sub_d = dict() #sub data structure
for fieldn, value in row.items():
sub_d[fieldn] = value
#=======================================================================
# add the sub into the main
#=======================================================================
d[fid] = sub_d
if not len(df) == len(d):
log.error('got length mismatch')
raise IOError
log.debug('converted df %s into qvlayd'%str(df.shape))
return d
def view(#view the vector data (or just a df) as a html frame
obj, logger=mod_logger,
#**gfd_kwargs, #kwaqrgs to pass to vlay_get_fdatas() 'doesnt work well with the requester'
):
if isinstance(obj, pd.DataFrame) or isinstance(obj, pd.Series):
df = obj
elif isinstance(obj, QgsVectorLayer):
"""this will index the viewed frame by fid"""
df = vlay_get_fdf(obj)
else:
raise Error('got unexpected object type: %s'%type(obj))
basic.view(df)
logger.info('viewer closed')
return
if __name__ == '__main__':
print('???')
|
[
"os.remove",
"hlpr.exceptions.QError",
"processing.run",
"numpy.isnan",
"hlpr.basic.linr",
"os.path.join",
"pandas.DataFrame",
"hlpr.basic.view",
"processing.core.Processing.Processing.initialize",
"inspect.isclass",
"os.path.dirname",
"os.path.exists",
"inspect.isbuiltin",
"hlpr.basic.is_null",
"re.search",
"pandas.DataFrame.from_dict",
"hlpr.basic.get_valid_filename",
"qgis.analysis.QgsNativeAlgorithms",
"pandas.Series",
"copy.copy",
"pandas.isnull",
"numpy.any",
"numpy.array",
"os.path.splitext",
"numpy.array_equal",
"os.path.split",
"logging.getLogger"
] |
[((816, 838), 'logging.getLogger', 'logging.getLogger', (['"""Q"""'], {}), "('Q')\n", (833, 838), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((110896, 110914), 'os.path.exists', 'os.path.exists', (['fp'], {}), '(fp)\n', (110910, 110914), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((113188, 113212), 'os.path.splitext', 'os.path.splitext', (['out_fp'], {}), '(out_fp)\n', (113204, 113212), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((113308, 113330), 'os.path.exists', 'os.path.exists', (['out_fp'], {}), '(out_fp)\n', (113322, 113330), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((131175, 131190), 'numpy.any', 'np.any', (['boolcol'], {}), '(boolcol)\n', (131181, 131190), True, 'import numpy as np\n'), ((149062, 149086), 'hlpr.exceptions.QError', 'Error', (['"""failed to match"""'], {}), "('failed to match')\n", (149067, 149086), True, 'from hlpr.exceptions import QError as Error\n'), ((155113, 155127), 'pandas.isnull', 'pd.isnull', (['obj'], {}), '(obj)\n', (155122, 155127), True, 'import pandas as pd\n'), ((160390, 160404), 'hlpr.basic.view', 'basic.view', (['df'], {}), '(df)\n', (160400, 160404), True, 'import hlpr.basic as basic\n'), ((9318, 9341), 'processing.core.Processing.Processing.initialize', 'Processing.initialize', ([], {}), '()\n', (9339, 9341), False, 'from processing.core.Processing import Processing\n'), ((14988, 15006), 'os.path.exists', 'os.path.exists', (['fp'], {}), '(fp)\n', (15002, 15006), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((18180, 18198), 'os.path.exists', 'os.path.exists', (['fp'], {}), '(fp)\n', (18194, 18198), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((21880, 21923), 'hlpr.basic.get_valid_filename', 'get_valid_filename', (["('%s.tif' % newLayerName)"], {}), "('%s.tif' % newLayerName)\n", (21898, 21923), False, 'from hlpr.basic import get_valid_filename\n'), ((21958, 21986), 'os.path.join', 'os.path.join', (['out_dir', 'newFn'], {}), '(out_dir, newFn)\n', (21970, 21986), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((22477, 22500), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (22491, 22500), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((22521, 22543), 'os.path.exists', 'os.path.exists', (['out_fp'], {}), '(out_fp)\n', (22535, 22543), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((26497, 26519), 'os.path.exists', 'os.path.exists', (['out_fp'], {}), '(out_fp)\n', (26511, 26519), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((28222, 28246), 'os.path.splitext', 'os.path.splitext', (['out_fp'], {}), '(out_fp)\n', (28238, 28246), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((28362, 28384), 'os.path.exists', 'os.path.exists', (['out_fp'], {}), '(out_fp)\n', (28376, 28384), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((32229, 32244), 'numpy.any', 'np.any', (['boolcol'], {}), '(boolcol)\n', (32235, 32244), True, 'import numpy as np\n'), ((38884, 38938), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (38898, 38938), False, 'import processing\n'), ((47656, 47710), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (47670, 47710), False, 'import processing\n'), ((54019, 54073), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (54033, 54073), False, 'import processing\n'), ((57463, 57487), 'os.path.exists', 'os.path.exists', (['table_fp'], {}), '(table_fp)\n', (57477, 57487), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((59020, 59074), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (59034, 59074), False, 'import processing\n'), ((61586, 61640), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (61600, 61640), False, 'import processing\n'), ((63362, 63381), 'os.path.exists', 'os.path.exists', (['ofp'], {}), '(ofp)\n', (63376, 63381), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((63869, 63888), 'os.path.exists', 'os.path.exists', (['ofp'], {}), '(ofp)\n', (63883, 63888), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((64860, 64914), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (64874, 64914), False, 'import processing\n'), ((66819, 66838), 'os.path.exists', 'os.path.exists', (['ofp'], {}), '(ofp)\n', (66833, 66838), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((67699, 67753), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (67713, 67753), False, 'import processing\n'), ((71038, 71092), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (71052, 71092), False, 'import processing\n'), ((71174, 71205), 'os.path.exists', 'os.path.exists', (["res_d['OUTPUT']"], {}), "(res_d['OUTPUT'])\n", (71188, 71205), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((73371, 73425), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (73385, 73425), False, 'import processing\n'), ((74982, 75036), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (74996, 75036), False, 'import processing\n'), ((77481, 77535), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (77495, 77535), False, 'import processing\n'), ((79779, 79833), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (79793, 79833), False, 'import processing\n'), ((82420, 82474), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (82434, 82474), False, 'import processing\n'), ((84455, 84509), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (84469, 84509), False, 'import processing\n'), ((86125, 86179), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (86139, 86179), False, 'import processing\n'), ((87160, 87214), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (87174, 87214), False, 'import processing\n'), ((90006, 90060), 'processing.run', 'processing.run', (['algo_nm', 'ins_d'], {'feedback': 'self.feedback'}), '(algo_nm, ins_d, feedback=self.feedback)\n', (90020, 90060), False, 'import processing\n'), ((104352, 104385), 'hlpr.exceptions.QError', 'Error', (['"""got passed an empty vlay"""'], {}), "('got passed an empty vlay')\n", (104357, 104385), True, 'from hlpr.exceptions import QError as Error\n'), ((104751, 104777), 'hlpr.basic.is_null', 'basic.is_null', (['exp_fieldns'], {}), '(exp_fieldns)\n', (104764, 104777), True, 'import hlpr.basic as basic\n'), ((105634, 105661), 'hlpr.basic.is_null', 'basic.is_null', (['uexp_fieldns'], {}), '(uexp_fieldns)\n', (105647, 105661), True, 'import hlpr.basic as basic\n'), ((111275, 111322), 'hlpr.exceptions.QError', 'Error', (['"""vlay loading produced an invalid layer"""'], {}), "('vlay loading produced an invalid layer')\n", (111280, 111322), True, 'from hlpr.exceptions import QError as Error\n'), ((111454, 111469), 'hlpr.exceptions.QError', 'Error', (['"""no geo"""'], {}), "('no geo')\n", (111459, 111469), True, 'from hlpr.exceptions import QError as Error\n'), ((113258, 113297), 'hlpr.exceptions.QError', 'Error', (["('unexpected extension: %s' % ext)"], {}), "('unexpected extension: %s' % ext)\n", (113263, 113297), True, 'from hlpr.exceptions import QError as Error\n'), ((113816, 113845), 'hlpr.exceptions.QError', 'Error', (['"""passed invalid layer"""'], {}), "('passed invalid layer')\n", (113821, 113845), True, 'from hlpr.exceptions import QError as Error\n'), ((116861, 116882), 'hlpr.exceptions.QError', 'Error', (['"""no features!"""'], {}), "('no features!')\n", (116866, 116882), True, 'from hlpr.exceptions import QError as Error\n'), ((116925, 116944), 'hlpr.exceptions.QError', 'Error', (['"""no fields!"""'], {}), "('no fields!')\n", (116930, 116944), True, 'from hlpr.exceptions import QError as Error\n'), ((117022, 117070), 'hlpr.exceptions.QError', 'Error', (['"""dict results dont respect field slicing"""'], {}), "('dict results dont respect field slicing')\n", (117027, 117070), True, 'from hlpr.exceptions import QError as Error\n'), ((118984, 119014), 'hlpr.exceptions.QError', 'Error', (['"""field length mismatch"""'], {}), "('field length mismatch')\n", (118989, 119014), True, 'from hlpr.exceptions import QError as Error\n'), ((125870, 125884), 'numpy.any', 'np.any', (['boolar'], {}), '(boolar)\n', (125876, 125884), True, 'import numpy as np\n'), ((129086, 129115), 'hlpr.exceptions.QError', 'Error', (['"""failed to add fields"""'], {}), "('failed to add fields')\n", (129091, 129115), True, 'from hlpr.exceptions import QError as Error\n'), ((129258, 129288), 'hlpr.exceptions.QError', 'Error', (['"""failed to addFeatures"""'], {}), "('failed to addFeatures')\n", (129263, 129288), True, 'from hlpr.exceptions import QError as Error\n'), ((135177, 135216), 'hlpr.exceptions.QError', 'Error', (['"""todo: implement non geo layers"""'], {}), "('todo: implement non geo layers')\n", (135182, 135216), True, 'from hlpr.exceptions import QError as Error\n'), ((147271, 147295), 'hlpr.exceptions.QError', 'Error', (['"""length mismatch"""'], {}), "('length mismatch')\n", (147276, 147295), True, 'from hlpr.exceptions import QError as Error\n'), ((148974, 149015), 're.search', 're.search', (['gtype', 'gstr'], {'flags': 'match_flags'}), '(gtype, gstr, flags=match_flags)\n', (148983, 149015), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((149499, 149618), 'processing.run', 'processing.run', (['"""native:saveselectedfeatures"""', "{'INPUT': vlay_raw, 'OUTPUT': 'TEMPORARY_OUTPUT'}"], {'feedback': 'feedback'}), "('native:saveselectedfeatures', {'INPUT': vlay_raw, 'OUTPUT':\n 'TEMPORARY_OUTPUT'}, feedback=feedback)\n", (149513, 149618), False, 'import processing\n'), ((150091, 150116), 'hlpr.exceptions.QError', 'Error', (['"""failed to rename"""'], {}), "('failed to rename')\n", (150096, 150116), True, 'from hlpr.exceptions import QError as Error\n'), ((156141, 156173), 'hlpr.exceptions.QError', 'Error', (['"""not passed a numpy type"""'], {}), "('not passed a numpy type')\n", (156146, 156173), True, 'from hlpr.exceptions import QError as Error\n'), ((158023, 158047), 'inspect.isclass', 'inspect.isclass', (['py_type'], {}), '(py_type)\n', (158038, 158047), False, 'import inspect\n'), ((158128, 158145), 'hlpr.exceptions.QError', 'Error', (['"""bad type"""'], {}), "('bad type')\n", (158133, 158145), True, 'from hlpr.exceptions import QError as Error\n'), ((9188, 9237), 'hlpr.exceptions.QError', 'Error', (['"""qgis has not been properly initlized yet"""'], {}), "('qgis has not been properly initlized yet')\n", (9193, 9237), True, 'from hlpr.exceptions import QError as Error\n'), ((9442, 9463), 'qgis.analysis.QgsNativeAlgorithms', 'QgsNativeAlgorithms', ([], {}), '()\n', (9461, 9463), False, 'from qgis.analysis import QgsNativeAlgorithms\n'), ((12404, 12446), 'hlpr.exceptions.QError', 'Error', (['"""qproj crs does not match sessions"""'], {}), "('qproj crs does not match sessions')\n", (12409, 12446), True, 'from hlpr.exceptions import QError as Error\n'), ((12844, 12877), 'hlpr.exceptions.QError', 'Error', (['"""unrecognized driver name"""'], {}), "('unrecognized driver name')\n", (12849, 12877), True, 'from hlpr.exceptions import QError as Error\n'), ((12957, 12990), 'hlpr.exceptions.QError', 'Error', (['"""unrecognized driver name"""'], {}), "('unrecognized driver name')\n", (12962, 12990), True, 'from hlpr.exceptions import QError as Error\n'), ((15895, 15930), 'hlpr.exceptions.QError', 'Error', (['"""loaded vlay has NoGeometry"""'], {}), "('loaded vlay has NoGeometry')\n", (15900, 15930), True, 'from hlpr.exceptions import QError as Error\n'), ((23450, 23483), 'hlpr.exceptions.QError', 'Error', (['"""Cannot set pipe provider"""'], {}), "('Cannot set pipe provider')\n", (23455, 23483), True, 'from hlpr.exceptions import QError as Error\n'), ((23607, 23641), 'hlpr.exceptions.QError', 'Error', (['"""Cannot set pipe projector"""'], {}), "('Cannot set pipe projector')\n", (23612, 23641), True, 'from hlpr.exceptions import QError as Error\n'), ((25473, 25497), 'hlpr.exceptions.QError', 'Error', (['"""not implemented"""'], {}), "('not implemented')\n", (25478, 25497), True, 'from hlpr.exceptions import QError as Error\n'), ((26460, 26472), 'hlpr.exceptions.QError', 'Error', (['error'], {}), '(error)\n', (26465, 26472), True, 'from hlpr.exceptions import QError as Error\n'), ((28304, 28343), 'hlpr.exceptions.QError', 'Error', (["('unexpected extension: %s' % ext)"], {}), "('unexpected extension: %s' % ext)\n", (28309, 28343), True, 'from hlpr.exceptions import QError as Error\n'), ((28930, 28959), 'hlpr.exceptions.QError', 'Error', (['"""passed invalid layer"""'], {}), "('passed invalid layer')\n", (28935, 28959), True, 'from hlpr.exceptions import QError as Error\n'), ((44563, 44618), 'hlpr.basic.linr', 'basic.linr', (['jlay_fieldn_l', 'mfnl'], {'result_type': '"""matching"""'}), "(jlay_fieldn_l, mfnl, result_type='matching')\n", (44573, 44618), True, 'import hlpr.basic as basic\n'), ((45887, 45901), 'numpy.any', 'np.any', (['booldf'], {}), '(booldf)\n', (45893, 45901), True, 'import numpy as np\n'), ((52846, 52870), 'hlpr.exceptions.QError', 'Error', (['"""expected a list"""'], {}), "('expected a list')\n", (52851, 52870), True, 'from hlpr.exceptions import QError as Error\n'), ((61722, 61753), 'os.path.exists', 'os.path.exists', (["res_d['OUTPUT']"], {}), "(res_d['OUTPUT'])\n", (61736, 61753), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((61814, 61845), 'hlpr.exceptions.QError', 'Error', (['"""failed to get a result"""'], {}), "('failed to get a result')\n", (61819, 61845), True, 'from hlpr.exceptions import QError as Error\n'), ((63295, 63340), 'os.path.join', 'os.path.join', (['self.out_dir', "(layname + '.sdat')"], {}), "(self.out_dir, layname + '.sdat')\n", (63307, 63340), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((64996, 65027), 'os.path.exists', 'os.path.exists', (["res_d['OUTPUT']"], {}), "(res_d['OUTPUT'])\n", (65010, 65027), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((65088, 65119), 'hlpr.exceptions.QError', 'Error', (['"""failed to get a result"""'], {}), "('failed to get a result')\n", (65093, 65119), True, 'from hlpr.exceptions import QError as Error\n'), ((66573, 66618), 'os.path.join', 'os.path.join', (['self.out_dir', "(layname + '.sdat')"], {}), "(self.out_dir, layname + '.sdat')\n", (66585, 66618), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((67835, 67866), 'os.path.exists', 'os.path.exists', (["res_d['RESULT']"], {}), "(res_d['RESULT'])\n", (67849, 67866), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((67886, 67917), 'hlpr.exceptions.QError', 'Error', (['"""failed to get a result"""'], {}), "('failed to get a result')\n", (67891, 67917), True, 'from hlpr.exceptions import QError as Error\n'), ((73507, 73538), 'os.path.exists', 'os.path.exists', (["res_d['RESULT']"], {}), "(res_d['RESULT'])\n", (73521, 73538), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((73558, 73589), 'hlpr.exceptions.QError', 'Error', (['"""failed to get a result"""'], {}), "('failed to get a result')\n", (73563, 73589), True, 'from hlpr.exceptions import QError as Error\n'), ((76502, 76520), 'numpy.isnan', 'np.isnan', (['distance'], {}), '(distance)\n', (76510, 76520), True, 'import numpy as np\n'), ((76540, 76563), 'hlpr.exceptions.QError', 'Error', (['"""got no buffer!"""'], {}), "('got no buffer!')\n", (76545, 76563), True, 'from hlpr.exceptions import QError as Error\n'), ((90142, 90173), 'os.path.exists', 'os.path.exists', (["res_d['OUTPUT']"], {}), "(res_d['OUTPUT'])\n", (90156, 90173), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((90234, 90265), 'hlpr.exceptions.QError', 'Error', (['"""failed to get a result"""'], {}), "('failed to get a result')\n", (90239, 90265), True, 'from hlpr.exceptions import QError as Error\n'), ((95755, 95826), 'hlpr.exceptions.QError', 'Error', (['("""expected a list for fields, instead got \n %s""" % fieldn_l)'], {}), '("""expected a list for fields, instead got \n %s""" % fieldn_l)\n', (95760, 95826), True, 'from hlpr.exceptions import QError as Error\n'), ((103305, 103337), 'hlpr.exceptions.QError', 'Error', (['"""QGIS failed to initiate"""'], {}), "('QGIS failed to initiate')\n", (103310, 103337), True, 'from hlpr.exceptions import QError as Error\n'), ((113503, 113520), 'os.remove', 'os.remove', (['out_fp'], {}), '(out_fp)\n', (113512, 113520), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((113625, 113635), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (113630, 113635), True, 'from hlpr.exceptions import QError as Error\n'), ((116409, 116454), 'hlpr.exceptions.QError', 'Error', (['"""cant allow none and expect all reals"""'], {}), "('cant allow none and expect all reals')\n", (116414, 116454), True, 'from hlpr.exceptions import QError as Error\n'), ((116729, 116792), 'hlpr.exceptions.QError', 'Error', (['("requested reindexer \'%s\' is not a field name" % reindex)'], {}), '("requested reindexer \'%s\' is not a field name" % reindex)\n', (116734, 116792), True, 'from hlpr.exceptions import QError as Error\n'), ((118836, 118879), 'hlpr.exceptions.QError', 'Error', (['"""no filter and data length mismatch"""'], {}), "('no filter and data length mismatch')\n", (118841, 118879), True, 'from hlpr.exceptions import QError as Error\n'), ((119195, 119218), 'hlpr.exceptions.QError', 'Error', (['"""no data found!"""'], {}), "('no data found!')\n", (119200, 119218), True, 'from hlpr.exceptions import QError as Error\n'), ((119864, 119930), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['fid_attvs'], {'orient': '"""index"""', 'columns': 'all_fnl'}), "(fid_attvs, orient='index', columns=all_fnl)\n", (119886, 119930), True, 'import pandas as pd\n'), ((120829, 120860), 'hlpr.exceptions.QError', 'Error', (['"""unrecognized fmt kwarg"""'], {}), "('unrecognized fmt kwarg')\n", (120834, 120860), True, 'from hlpr.exceptions import QError as Error\n'), ((122471, 122512), 'hlpr.exceptions.QError', 'Error', (['"""cant expect_all_reals AND dropna"""'], {}), "('cant expect_all_reals AND dropna')\n", (122476, 122512), True, 'from hlpr.exceptions import QError as Error\n'), ((122583, 122628), 'hlpr.exceptions.QError', 'Error', (['"""cant allow none and expect all reals"""'], {}), "('cant allow none and expect all reals')\n", (122588, 122628), True, 'from hlpr.exceptions import QError as Error\n'), ((123013, 123044), 'hlpr.exceptions.QError', 'Error', (['"""no field name provided"""'], {}), "('no field name provided')\n", (123018, 123044), True, 'from hlpr.exceptions import QError as Error\n'), ((126131, 126142), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (126139, 126142), True, 'import numpy as np\n'), ((126558, 126598), 'hlpr.exceptions.QError', 'Error', (['"""allow_none=FALSE and no results"""'], {}), "('allow_none=FALSE and no results')\n", (126563, 126598), True, 'from hlpr.exceptions import QError as Error\n'), ((129737, 129747), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (129742, 129747), True, 'from hlpr.exceptions import QError as Error\n'), ((131992, 132023), 'hlpr.exceptions.QError', 'Error', (['"""got non unique columns"""'], {}), "('got non unique columns')\n", (131997, 132023), True, 'from hlpr.exceptions import QError as Error\n'), ((133969, 134010), 'hlpr.exceptions.QError', 'Error', (['"""constructed layer has NoGeometry"""'], {}), "('constructed layer has NoGeometry')\n", (133974, 134010), True, 'from hlpr.exceptions import QError as Error\n'), ((135699, 135748), 'hlpr.exceptions.QError', 'Error', (['"""passed geo_d and data indexes dont match"""'], {}), "('passed geo_d and data indexes dont match')\n", (135704, 135748), True, 'from hlpr.exceptions import QError as Error\n'), ((136337, 136368), 'hlpr.exceptions.QError', 'Error', (['"""expected integer index"""'], {}), "('expected integer index')\n", (136342, 136368), True, 'from hlpr.exceptions import QError as Error\n'), ((139700, 139746), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['qvlayd'], {'orient': '"""index"""'}), "(qvlayd, orient='index')\n", (139722, 139746), True, 'import pandas as pd\n'), ((142812, 142828), 'pandas.isnull', 'pd.isnull', (['value'], {}), '(value)\n', (142821, 142828), True, 'import pandas as pd\n'), ((143742, 143773), 'numpy.array_equal', 'np.array_equal', (['nfid_ar', 'fid_ar'], {}), '(nfid_ar, fid_ar)\n', (143756, 143773), True, 'import numpy as np\n'), ((145411, 145427), 'pandas.isnull', 'pd.isnull', (['value'], {}), '(value)\n', (145420, 145427), True, 'import pandas as pd\n'), ((148300, 148328), 'hlpr.exceptions.QError', 'Error', (['"""field name too long"""'], {}), "('field name too long')\n", (148305, 148328), True, 'from hlpr.exceptions import QError as Error\n'), ((151481, 151516), 'hlpr.exceptions.QError', 'Error', (['"""unrecognized id1_objs type"""'], {}), "('unrecognized id1_objs type')\n", (151486, 151516), True, 'from hlpr.exceptions import QError as Error\n'), ((153854, 153884), 'hlpr.exceptions.QError', 'Error', (['"""unrecognized id1_type"""'], {}), "('unrecognized id1_type')\n", (153859, 153884), True, 'from hlpr.exceptions import QError as Error\n'), ((154560, 154595), 'hlpr.exceptions.QError', 'Error', (['"""unrecognized id1_objs type"""'], {}), "('unrecognized id1_objs type')\n", (154565, 154595), True, 'from hlpr.exceptions import QError as Error\n'), ((156488, 156534), 'hlpr.exceptions.QError', 'Error', (['("""failed oto convert w/ \n %s""" % e)'], {}), '("""failed oto convert w/ \n %s""" % e)\n', (156493, 156534), True, 'from hlpr.exceptions import QError as Error\n'), ((8870, 8902), 'hlpr.exceptions.QError', 'Error', (['"""QGIS failed to initiate"""'], {}), "('QGIS failed to initiate')\n", (8875, 8902), True, 'from hlpr.exceptions import QError as Error\n'), ((22768, 22778), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (22773, 22778), True, 'from hlpr.exceptions import QError as Error\n'), ((28577, 28594), 'os.remove', 'os.remove', (['out_fp'], {}), '(out_fp)\n', (28586, 28594), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((28707, 28717), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (28712, 28717), True, 'from hlpr.exceptions import QError as Error\n'), ((34513, 34529), 'pandas.isnull', 'pd.isnull', (['value'], {}), '(value)\n', (34522, 34529), True, 'import pandas as pd\n'), ((36710, 36751), 'hlpr.exceptions.QError', 'Error', (['"""constructed layer has NoGeometry"""'], {}), "('constructed layer has NoGeometry')\n", (36715, 36751), True, 'from hlpr.exceptions import QError as Error\n'), ((45464, 45488), 'hlpr.exceptions.QError', 'Error', (['"""not implmeneted"""'], {}), "('not implmeneted')\n", (45469, 45488), True, 'from hlpr.exceptions import QError as Error\n'), ((48631, 48653), 'hlpr.exceptions.QError', 'Error', (['"""got no joins!"""'], {}), "('got no joins!')\n", (48636, 48653), True, 'from hlpr.exceptions import QError as Error\n'), ((63528, 63542), 'os.remove', 'os.remove', (['ofp'], {}), '(ofp)\n', (63537, 63542), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((63583, 63593), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (63588, 63593), True, 'from hlpr.exceptions import QError as Error\n'), ((64048, 64058), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (64053, 64058), True, 'from hlpr.exceptions import QError as Error\n'), ((64102, 64122), 'os.path.dirname', 'os.path.dirname', (['ofp'], {}), '(ofp)\n', (64117, 64122), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((64149, 64169), 'os.path.dirname', 'os.path.dirname', (['ofp'], {}), '(ofp)\n', (64164, 64169), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((66660, 66680), 'os.path.dirname', 'os.path.dirname', (['ofp'], {}), '(ofp)\n', (66675, 66680), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((66773, 66793), 'os.path.dirname', 'os.path.dirname', (['ofp'], {}), '(ofp)\n', (66788, 66793), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((66974, 66988), 'os.remove', 'os.remove', (['ofp'], {}), '(ofp)\n', (66983, 66988), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((67029, 67039), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (67034, 67039), True, 'from hlpr.exceptions import QError as Error\n'), ((80247, 80257), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (80252, 80257), True, 'from hlpr.exceptions import QError as Error\n'), ((81860, 81870), 'hlpr.exceptions.QError', 'Error', (['msg'], {}), '(msg)\n', (81865, 81870), True, 'from hlpr.exceptions import QError as Error\n'), ((95113, 95158), 'hlpr.exceptions.QError', 'Error', (['("unrecognized fieldn_l\'%s\'" % fieldn_l)'], {}), '("unrecognized fieldn_l\'%s\'" % fieldn_l)\n', (95118, 95158), True, 'from hlpr.exceptions import QError as Error\n'), ((98522, 98547), 'hlpr.exceptions.QError', 'Error', (['"""nothing selected"""'], {}), "('nothing selected')\n", (98527, 98547), True, 'from hlpr.exceptions import QError as Error\n'), ((110991, 111008), 'os.path.split', 'os.path.split', (['fp'], {}), '(fp)\n', (111004, 111008), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((127276, 127301), 'pandas.Series', 'pd.Series', (['d'], {'name': 'fieldn'}), '(d, name=fieldn)\n', (127285, 127301), True, 'import pandas as pd\n'), ((135491, 135515), 'hlpr.exceptions.QError', 'Error', (['"""unexpected type"""'], {}), "('unexpected type')\n", (135496, 135515), True, 'from hlpr.exceptions import QError as Error\n'), ((138059, 138099), 'hlpr.exceptions.QError', 'Error', (['"""failed to create all the fields"""'], {}), "('failed to create all the fields')\n", (138064, 138099), True, 'from hlpr.exceptions import QError as Error\n'), ((139843, 139906), 'hlpr.exceptions.QError', 'Error', (["('got some nulls on the geometry fields: %s' % geo_fn_tup)"], {}), "('got some nulls on the geometry fields: %s' % geo_fn_tup)\n", (139848, 139906), True, 'from hlpr.exceptions import QError as Error\n'), ((139977, 139994), 'copy.copy', 'copy.copy', (['qvlayd'], {}), '(qvlayd)\n', (139986, 139994), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((140651, 140692), 'hlpr.exceptions.QError', 'Error', (["('got invalid geometry on %i' % fid)"], {}), "('got invalid geometry on %i' % fid)\n", (140656, 140692), True, 'from hlpr.exceptions import QError as Error\n'), ((143263, 143294), 'hlpr.exceptions.QError', 'Error', (['"""failed to setAttribute"""'], {}), "('failed to setAttribute')\n", (143268, 143294), True, 'from hlpr.exceptions import QError as Error\n'), ((143889, 143910), 'hlpr.exceptions.QError', 'Error', (['"""fid mismatch"""'], {}), "('fid mismatch')\n", (143894, 143910), True, 'from hlpr.exceptions import QError as Error\n'), ((145550, 145572), 'inspect.isclass', 'inspect.isclass', (['value'], {}), '(value)\n', (145565, 145572), False, 'import inspect\n'), ((146312, 146336), 'hlpr.exceptions.QError', 'Error', (['"""length mismatch"""'], {}), "('length mismatch')\n", (146317, 146336), True, 'from hlpr.exceptions import QError as Error\n'), ((152063, 152088), 'hlpr.exceptions.QError', 'Error', (['""" not implemented"""'], {}), "(' not implemented')\n", (152068, 152088), True, 'from hlpr.exceptions import QError as Error\n'), ((152646, 152676), 'hlpr.exceptions.QError', 'Error', (['"""unrecognized id1_type"""'], {}), "('unrecognized id1_type')\n", (152651, 152676), True, 'from hlpr.exceptions import QError as Error\n'), ((153309, 153359), 'hlpr.exceptions.QError', 'Error', (['("got non unique \'%s\' on fid_fval_d" % dname)'], {}), '("got non unique \'%s\' on fid_fval_d" % dname)\n', (153314, 153359), True, 'from hlpr.exceptions import QError as Error\n'), ((15190, 15207), 'os.path.split', 'os.path.split', (['fp'], {}), '(fp)\n', (15203, 15207), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((18410, 18427), 'os.path.split', 'os.path.split', (['fp'], {}), '(fp)\n', (18423, 18427), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((35016, 35047), 'hlpr.exceptions.QError', 'Error', (['"""failed to setAttribute"""'], {}), "('failed to setAttribute')\n", (35021, 35047), True, 'from hlpr.exceptions import QError as Error\n'), ((48260, 48296), 'hlpr.exceptions.QError', 'Error', (['"""in and out fcnts dont match"""'], {}), "('in and out fcnts dont match')\n", (48265, 48296), True, 'from hlpr.exceptions import QError as Error\n'), ((48763, 48798), 'hlpr.exceptions.QError', 'Error', (['"""no joins but got some hits"""'], {}), "('no joins but got some hits')\n", (48768, 48798), True, 'from hlpr.exceptions import QError as Error\n'), ((66727, 66747), 'os.path.dirname', 'os.path.dirname', (['ofp'], {}), '(ofp)\n', (66742, 66747), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((93665, 93689), 'pandas.Series', 'pd.Series', (['d'], {'name': 'ncoln'}), '(d, name=ncoln)\n', (93674, 93689), True, 'import pandas as pd\n'), ((119348, 119362), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (119360, 119362), True, 'import pandas as pd\n'), ((119403, 119431), 'hlpr.exceptions.QError', 'Error', (['"""unexpected fmt type"""'], {}), "('unexpected fmt type')\n", (119408, 119431), True, 'from hlpr.exceptions import QError as Error\n'), ((127376, 127403), 'hlpr.exceptions.QError', 'Error', (['"""expected singleton"""'], {}), "('expected singleton')\n", (127381, 127403), True, 'from hlpr.exceptions import QError as Error\n'), ((127486, 127511), 'pandas.Series', 'pd.Series', (['d'], {'name': 'fieldn'}), '(d, name=fieldn)\n', (127495, 127511), True, 'import pandas as pd\n'), ((14408, 14428), 'inspect.isbuiltin', 'inspect.isbuiltin', (['v'], {}), '(v)\n', (14425, 14428), False, 'import inspect\n'), ((99505, 99542), 'hlpr.exceptions.QError', 'Error', (['"""unexpected result_type kwarg"""'], {}), "('unexpected result_type kwarg')\n", (99510, 99542), True, 'from hlpr.exceptions import QError as Error\n'), ((120407, 120454), 'pandas.Series', 'pd.Series', (['df.index'], {'index': 'df.index', 'name': '"""fid"""'}), "(df.index, index=df.index, name='fid')\n", (120416, 120454), True, 'import pandas as pd\n'), ((63142, 63160), 'os.path.split', 'os.path.split', (['ofp'], {}), '(ofp)\n', (63155, 63160), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((66418, 66436), 'os.path.split', 'os.path.split', (['ofp'], {}), '(ofp)\n', (66431, 66436), False, 'import os, configparser, logging, inspect, copy, datetime, re\n'), ((153262, 153273), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (153270, 153273), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# 6a-render-model3.py - investigate delauney triangulation for
# individual image surface mesh generation.
# for all the images in the fitted group, generate a 2d polygon
# surface fit. Then project the individual images onto this surface
# and generate an AC3D model.
#
# Note: insufficient image overlap (or long linear image match chains)
# are not good. Ideally we would have a nice mesh of match pairs for
# best results.
#
# this script can also project onto the SRTM surface, or a flat ground
# elevation plane.
import argparse
import cv2
import pickle
import math
import numpy as np
import os.path
import scipy.spatial
from props import getNode
from lib import groups
from lib import panda3d
from lib import project
from lib import srtm
from lib import transformations
mesh_steps = 8 # 1 = corners only
r2d = 180 / math.pi
tolerance = 0.5
parser = argparse.ArgumentParser(description='Set the initial camera poses.')
parser.add_argument('project', help='project directory')
parser.add_argument('--group', type=int, default=0, help='group index')
parser.add_argument('--texture-resolution', type=int, default=512, help='texture resolution (should be 2**n, so numbers like 256, 512, 1024, etc.')
parser.add_argument('--srtm', action='store_true', help='use srtm elevation')
parser.add_argument('--ground', type=float, help='force ground elevation in meters')
parser.add_argument('--direct', action='store_true', help='use direct pose')
args = parser.parse_args()
proj = project.ProjectMgr(args.project)
proj.load_images_info()
# lookup ned reference
ref_node = getNode("/config/ned_reference", True)
ref = [ ref_node.getFloat('lat_deg'),
ref_node.getFloat('lon_deg'),
ref_node.getFloat('alt_m') ]
# setup SRTM ground interpolator
srtm.initialize( ref, 6000, 6000, 30 )
width, height = proj.cam.get_image_params()
print("Loading optimized match points ...")
matches = pickle.load( open( os.path.join(proj.analysis_dir, "matches_grouped"), "rb" ) )
# load the group connections within the image set
group_list = groups.load(proj.analysis_dir)
# initialize temporary structures for vanity stats
for image in proj.image_list:
image.sum_values = 0.0
image.sum_count = 0.0
image.max_z = -9999.0
image.min_z = 9999.0
image.pool_xy = []
image.pool_z = []
image.pool_uv = []
image.fit_xy = []
image.fit_z = []
image.fit_uv = []
image.fit_edge = []
# sort through points to build a global list of feature coordinates
# and a per-image list of feature coordinates
print('Reading feature locations from optimized match points ...')
raw_points = []
raw_values = []
for match in matches:
if match[1] == args.group and len(match[2:]) > 2: # used by current group
ned = match[0]
raw_points.append( [ned[1], ned[0]] )
raw_values.append( ned[2] )
for m in match[2:]:
if proj.image_list[m[0]].name in group_list[args.group]:
image = proj.image_list[ m[0] ]
image.pool_xy.append( [ned[1], ned[0]] )
image.pool_z.append( -ned[2] )
image.pool_uv.append( m[1] )
z = -ned[2]
image.sum_values += z
image.sum_count += 1
if z < image.min_z:
image.min_z = z
#print(min_z, match)
if z > image.max_z:
image.max_z = z
#print(max_z, match)
K = proj.cam.get_K(optimized=True)
dist_coeffs = np.array(proj.cam.get_dist_coeffs(optimized=True))
def undistort(uv_orig):
# convert the point into the proper format for opencv
uv_raw = np.zeros((1,1,2), dtype=np.float32)
uv_raw[0][0] = (uv_orig[0], uv_orig[1])
# do the actual undistort
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
# print(uv_orig, type(uv_new), uv_new)
return uv_new[0][0]
# cull points from the per-image pool that project outside the grid boundaries
for image in proj.image_list:
size = len(image.pool_uv)
for i in reversed(range(len(image.pool_uv))): # iterate in reverse order
uv_new = undistort(image.pool_uv[i])
if uv_new[0] < 0 or uv_new[0] >= width or uv_new[1] < 0 or uv_new[1] >= height:
print("out of range")
print('Generating Delaunay mesh and interpolator ...')
print(len(raw_points))
global_tri_list = scipy.spatial.Delaunay(np.array(raw_points))
interp = scipy.interpolate.LinearNDInterpolator(global_tri_list, raw_values)
def intersect2d(ned, v, avg_ground):
p = ned[:] # copy
# sanity check (always assume camera pose is above ground!)
if v[2] <= 0.0:
return p
eps = 0.01
count = 0
#print("start:", p)
#print("vec:", v)
#print("ned:", ned)
tmp = interp([p[1], p[0]])[0]
if not np.isnan(tmp):
surface = tmp
else:
print("Notice: starting vector intersect with avg ground elev:", avg_ground)
surface = avg_ground
error = abs(p[2] - surface)
#print("p=%s surface=%s error=%s" % (p, surface, error))
while error > eps and count < 25:
d_proj = -(ned[2] - surface)
factor = d_proj / v[2]
n_proj = v[0] * factor
e_proj = v[1] * factor
#print(" proj = %s %s" % (n_proj, e_proj))
p = [ ned[0] + n_proj, ned[1] + e_proj, ned[2] + d_proj ]
#print(" new p:", p)
tmp = interp([p[1], p[0]])[0]
if not np.isnan(tmp):
surface = tmp
error = abs(p[2] - surface)
#print(" p=%s surface=%.2f error = %.3f" % (p, surface, error))
count += 1
#print("surface:", surface)
#if np.isnan(surface):
# #print(" returning nans")
# return [np.nan, np.nan, np.nan]
dy = ned[0] - p[0]
dx = ned[1] - p[1]
dz = ned[2] - p[2]
dist = math.sqrt(dx*dx+dy*dy)
angle = math.atan2(-dz, dist) * r2d # relative to horizon
if angle < 30:
print(" returning high angle nans:", angle)
return [np.nan, np.nan, np.nan]
else:
return p
def intersect_vectors(ned, v_list, avg_ground):
pt_list = []
for v in v_list:
p = intersect2d(ned, v.flatten(), avg_ground)
pt_list.append(p)
return pt_list
for image in proj.image_list:
if image.sum_count > 0:
image.z_avg = image.sum_values / float(image.sum_count)
# print(image.name, 'avg elev:', image.z_avg)
else:
image.z_avg = 0
# compute the uv grid for each image and project each point out into
# ned space, then intersect each vector with the srtm / ground /
# delauney surface.
#for group in group_list:
if True:
group = group_list[args.group]
#if len(group) < 3:
# continue
for name in group:
image = proj.findImageByName(name)
print(image.name, image.z_avg)
# scale the K matrix if we have scaled the images
K = proj.cam.get_K(optimized=True)
IK = np.linalg.inv(K)
grid_list = []
u_list = np.linspace(0, width, mesh_steps + 1)
v_list = np.linspace(0, height, mesh_steps + 1)
# horizontal edges
for u in u_list:
grid_list.append( [u, 0] )
grid_list.append( [u, height] )
# vertical edges (minus corners)
for v in v_list[1:-1]:
grid_list.append( [0, v] )
grid_list.append( [width, v] )
#print('grid_list:', grid_list)
distorted_uv = proj.redistort(grid_list, optimized=True)
distorted_uv = grid_list
if args.direct:
proj_list = project.projectVectors( IK, image.get_body2ned(),
image.get_cam2body(),
grid_list )
else:
#print(image.get_body2ned(opt=True))
proj_list = project.projectVectors( IK,
image.get_body2ned(opt=True),
image.get_cam2body(),
grid_list )
#print 'proj_list:', proj_list
if args.direct:
ned, ypr, quat = image.get_camera_pose()
else:
ned, ypr, quat = image.get_camera_pose(opt=True)
#print('cam orig:', image.camera_pose['ned'], 'optimized:', ned)
if args.ground:
pts_ned = project.intersectVectorsWithGroundPlane(ned,
args.ground,
proj_list)
elif args.srtm:
pts_ned = srtm.interpolate_vectors(ned, proj_list)
else:
# intersect with our polygon surface approximation
pts_ned = intersect_vectors(ned, proj_list, -image.z_avg)
#print(image.name, "pts_3d (ned):\n", pts_ned)
# convert ned to xyz and stash the result for each image
image.grid_list = []
for p in pts_ned:
image.fit_xy.append([p[1], p[0]])
image.fit_z.append(-p[2])
image.fit_edge.append(True)
image.fit_uv = distorted_uv
print('len:', len(image.fit_xy), len(image.fit_z), len(image.fit_uv))
# Triangle fit algorithm
group = group_list[args.group]
#if len(group) < 3:
# continue
for name in group:
image = proj.findImageByName(name)
print(image.name, image.z_avg)
done = False
dist_uv = []
while not done:
tri_list = scipy.spatial.Delaunay(np.array(image.fit_xy))
interp = scipy.interpolate.LinearNDInterpolator(tri_list, image.fit_z)
# find the point in the pool furthest from the triangulated surface
next_index = None
max_error = 0.0
for i, pt in enumerate(image.pool_xy):
z = interp(image.pool_xy[i])[0]
if not np.isnan(z):
error = abs(z - image.pool_z[i])
if error > max_error:
max_error = error
next_index = i
if max_error > tolerance:
print("adding index:", next_index, "error:", max_error)
image.fit_xy.append(image.pool_xy[next_index])
image.fit_z.append(image.pool_z[next_index])
image.fit_uv.append(image.pool_uv[next_index])
image.fit_edge.append(False)
del image.pool_xy[next_index]
del image.pool_z[next_index]
del image.pool_uv[next_index]
else:
print("finished")
done = True
image.fit_uv.extend(proj.undistort_uvlist(image, dist_uv))
print(name, 'len:', len(image.fit_xy), len(image.fit_z), len(image.fit_uv))
# generate the panda3d egg models
dir_node = getNode('/config/directories', True)
img_src_dir = dir_node.getString('images_source')
panda3d.generate_from_fit(proj, group_list[args.group], src_dir=img_src_dir,
analysis_dir=proj.analysis_dir,
resolution=args.texture_resolution)
|
[
"lib.project.intersectVectorsWithGroundPlane",
"cv2.undistortPoints",
"argparse.ArgumentParser",
"math.sqrt",
"math.atan2",
"lib.srtm.interpolate_vectors",
"lib.panda3d.generate_from_fit",
"lib.groups.load",
"numpy.zeros",
"numpy.isnan",
"lib.project.ProjectMgr",
"numpy.array",
"numpy.linalg.inv",
"numpy.linspace",
"props.getNode",
"lib.srtm.initialize"
] |
[((905, 973), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Set the initial camera poses."""'}), "(description='Set the initial camera poses.')\n", (928, 973), False, 'import argparse\n'), ((1527, 1559), 'lib.project.ProjectMgr', 'project.ProjectMgr', (['args.project'], {}), '(args.project)\n', (1545, 1559), False, 'from lib import project\n'), ((1619, 1657), 'props.getNode', 'getNode', (['"""/config/ned_reference"""', '(True)'], {}), "('/config/ned_reference', True)\n", (1626, 1657), False, 'from props import getNode\n'), ((1807, 1843), 'lib.srtm.initialize', 'srtm.initialize', (['ref', '(6000)', '(6000)', '(30)'], {}), '(ref, 6000, 6000, 30)\n', (1822, 1843), False, 'from lib import srtm\n'), ((2090, 2120), 'lib.groups.load', 'groups.load', (['proj.analysis_dir'], {}), '(proj.analysis_dir)\n', (2101, 2120), False, 'from lib import groups\n'), ((10779, 10815), 'props.getNode', 'getNode', (['"""/config/directories"""', '(True)'], {}), "('/config/directories', True)\n", (10786, 10815), False, 'from props import getNode\n'), ((10866, 11014), 'lib.panda3d.generate_from_fit', 'panda3d.generate_from_fit', (['proj', 'group_list[args.group]'], {'src_dir': 'img_src_dir', 'analysis_dir': 'proj.analysis_dir', 'resolution': 'args.texture_resolution'}), '(proj, group_list[args.group], src_dir=img_src_dir,\n analysis_dir=proj.analysis_dir, resolution=args.texture_resolution)\n', (10891, 11014), False, 'from lib import panda3d\n'), ((3703, 3740), 'numpy.zeros', 'np.zeros', (['(1, 1, 2)'], {'dtype': 'np.float32'}), '((1, 1, 2), dtype=np.float32)\n', (3711, 3740), True, 'import numpy as np\n'), ((3826, 3874), 'cv2.undistortPoints', 'cv2.undistortPoints', (['uv_raw', 'K', 'dist_coeffs'], {'P': 'K'}), '(uv_raw, K, dist_coeffs, P=K)\n', (3845, 3874), False, 'import cv2\n'), ((4450, 4470), 'numpy.array', 'np.array', (['raw_points'], {}), '(raw_points)\n', (4458, 4470), True, 'import numpy as np\n'), ((5861, 5889), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (5870, 5889), False, 'import math\n'), ((4856, 4869), 'numpy.isnan', 'np.isnan', (['tmp'], {}), '(tmp)\n', (4864, 4869), True, 'import numpy as np\n'), ((5896, 5917), 'math.atan2', 'math.atan2', (['(-dz)', 'dist'], {}), '(-dz, dist)\n', (5906, 5917), False, 'import math\n'), ((6976, 6992), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (6989, 6992), True, 'import numpy as np\n'), ((7034, 7071), 'numpy.linspace', 'np.linspace', (['(0)', 'width', '(mesh_steps + 1)'], {}), '(0, width, mesh_steps + 1)\n', (7045, 7071), True, 'import numpy as np\n'), ((7089, 7127), 'numpy.linspace', 'np.linspace', (['(0)', 'height', '(mesh_steps + 1)'], {}), '(0, height, mesh_steps + 1)\n', (7100, 7127), True, 'import numpy as np\n'), ((5477, 5490), 'numpy.isnan', 'np.isnan', (['tmp'], {}), '(tmp)\n', (5485, 5490), True, 'import numpy as np\n'), ((8427, 8495), 'lib.project.intersectVectorsWithGroundPlane', 'project.intersectVectorsWithGroundPlane', (['ned', 'args.ground', 'proj_list'], {}), '(ned, args.ground, proj_list)\n', (8466, 8495), False, 'from lib import project\n'), ((9561, 9583), 'numpy.array', 'np.array', (['image.fit_xy'], {}), '(image.fit_xy)\n', (9569, 9583), True, 'import numpy as np\n'), ((8666, 8706), 'lib.srtm.interpolate_vectors', 'srtm.interpolate_vectors', (['ned', 'proj_list'], {}), '(ned, proj_list)\n', (8690, 8706), False, 'from lib import srtm\n'), ((9900, 9911), 'numpy.isnan', 'np.isnan', (['z'], {}), '(z)\n', (9908, 9911), True, 'import numpy as np\n')]
|
import numpy as np
def sherman_morrison_row(e, inv, vec):
ratio = np.einsum("ij,ij->i", vec, inv[:, :, e])
tmp = np.einsum("ek,ekj->ej", vec, inv)
invnew = (
inv
- np.einsum("ki,kj->kij", inv[:, :, e], tmp) / ratio[:, np.newaxis, np.newaxis]
)
invnew[:, :, e] = inv[:, :, e] / ratio[:, np.newaxis]
return ratio, invnew
class PySCFSlaterUHF:
"""A wave function object has a state defined by a reference configuration of electrons.
The functions recompute() and updateinternals() change the state of the object, and
the rest compute and return values from that state. """
def __init__(self, mol, mf):
self.occ = np.asarray(mf.mo_occ > 0.9)
self.parameters = {}
# Determine if we're initializing from an RHF or UHF object...
if len(mf.mo_occ.shape) == 2:
self.parameters["mo_coeff_alpha"] = mf.mo_coeff[0][:, self.occ[0]]
self.parameters["mo_coeff_beta"] = mf.mo_coeff[1][:, self.occ[1]]
else:
self.parameters["mo_coeff_alpha"] = mf.mo_coeff[
:, np.asarray(mf.mo_occ > 0.9)
]
self.parameters["mo_coeff_beta"] = mf.mo_coeff[
:, np.asarray(mf.mo_occ > 1.1)
]
self._coefflookup = ("mo_coeff_alpha", "mo_coeff_beta")
self._mol = mol
self._nelec = tuple(mol.nelec)
def recompute(self, configs):
"""This computes the value from scratch. Returns the logarithm of the wave function as
(phase,logdet). If the wf is real, phase will be +/- 1."""
mycoords = configs.reshape(
(configs.shape[0] * configs.shape[1], configs.shape[2])
)
ao = self._mol.eval_gto("GTOval_sph", mycoords).reshape(
(configs.shape[0], configs.shape[1], -1)
)
self._aovals = ao
self._dets = []
self._inverse = []
for s in [0, 1]:
if s == 0:
mo = ao[:, 0 : self._nelec[0], :].dot(
self.parameters[self._coefflookup[s]]
)
else:
mo = ao[:, self._nelec[0] : self._nelec[0] + self._nelec[1], :].dot(
self.parameters[self._coefflookup[s]]
)
# This could be done faster; we are doubling our effort here.
self._dets.append(np.linalg.slogdet(mo))
self._inverse.append(np.linalg.inv(mo))
return self.value()
def updateinternals(self, e, epos, mask=None):
"""Update any internals given that electron e moved to epos. mask is a Boolean array
which allows us to update only certain walkers"""
s = int(e >= self._nelec[0])
if mask is None:
mask = [True] * epos.shape[0]
eeff = e - s * self._nelec[0]
ao = self._mol.eval_gto("GTOval_sph", epos)
mo = ao.dot(self.parameters[self._coefflookup[s]])
ratio, self._inverse[s][mask, :, :] = sherman_morrison_row(
eeff, self._inverse[s][mask, :, :], mo[mask, :]
)
self._updateval(ratio, s, mask)
### not state-changing functions
def value(self):
"""Return logarithm of the wave function as noted in recompute()"""
return self._dets[0][0] * self._dets[1][0], self._dets[0][1] + self._dets[1][1]
def _updateval(self, ratio, s, mask):
self._dets[s][0][mask] *= np.sign(ratio) # will not work for complex!
self._dets[s][1][mask] += np.log(np.abs(ratio))
def _testrow(self, e, vec):
"""vec is a nconfig,nmo vector which replaces row e"""
s = int(e >= self._nelec[0])
ratio = np.einsum(
"ij,ij->i", vec, self._inverse[s][:, :, e - s * self._nelec[0]]
)
return ratio
def _testcol(self, i, s, vec):
"""vec is a nconfig,nmo vector which replaces column i"""
ratio = np.einsum("ij,ij->i", vec, self._inverse[s][:, i, :])
return ratio
def gradient(self, e, epos):
""" Compute the gradient of the log wave function
Note that this can be called even if the internals have not been updated for electron e,
if epos differs from the current position of electron e."""
s = int(e >= self._nelec[0])
aograd = self._mol.eval_gto("GTOval_ip_sph", epos)
mograd = aograd.dot(self.parameters[self._coefflookup[s]])
ratios = [self._testrow(e, x) for x in mograd]
return np.asarray(ratios) / self.testvalue(e, epos)[np.newaxis, :]
def laplacian(self, e, epos):
""" Compute the laplacian Psi/ Psi. """
s = int(e >= self._nelec[0])
# aograd=self._mol.eval_gto('GTOval_sph_deriv2',epos)
aolap = np.sum(self._mol.eval_gto("GTOval_sph_deriv2", epos)[[4, 7, 9]], axis=0)
molap = aolap.dot(self.parameters[self._coefflookup[s]])
ratios = self._testrow(e, molap)
return ratios / self.testvalue(e, epos)
def testvalue(self, e, epos):
""" return the ratio between the current wave function and the wave function if
electron e's position is replaced by epos"""
s = int(e >= self._nelec[0])
ao = self._mol.eval_gto("GTOval_sph", epos)
mo = ao.dot(self.parameters[self._coefflookup[s]])
return self._testrow(e, mo)
def pgradient(self):
"""Compute the parameter gradient of Psi.
Returns d_p \Psi/\Psi as a dictionary of numpy arrays,
which correspond to the parameter dictionary.
"""
d = {}
for parm in self.parameters:
s = 0
if "beta" in parm:
s = 1
# Get AOs for our spin channel only
ao = self._aovals[
:, s * self._nelec[0] : self._nelec[s] + s * self._nelec[0], :
] # (config, electron, ao)
pgrad_shape = (ao.shape[0],) + self.parameters[parm].shape
pgrad = np.zeros(pgrad_shape)
# Compute derivatives w.r.t MO coefficients
for i in range(self._nelec[s]): # MO loop
for j in range(ao.shape[2]): # AO loop
vec = ao[:, :, j]
pgrad[:, j, i] = self._testcol(i, s, vec) # nconfig
d[parm] = np.array(pgrad) # Returns config, coeff
return d
def test():
from pyscf import lib, gto, scf
import pyqmc.testwf as testwf
mol = gto.M(atom="Li 0. 0. 0.; H 0. 0. 1.5", basis="cc-pvtz", unit="bohr", spin=0)
for mf in [scf.RHF(mol).run(), scf.ROHF(mol).run(), scf.UHF(mol).run()]:
print("")
nconf = 10
nelec = np.sum(mol.nelec)
slater = PySCFSlaterUHF(mol, mf)
configs = np.random.randn(nconf, nelec, 3)
print("testing internals:", testwf.test_updateinternals(slater, configs))
for delta in [1e-3, 1e-4, 1e-5, 1e-6, 1e-7]:
print(
"delta",
delta,
"Testing gradient",
testwf.test_wf_gradient(slater, configs, delta=delta),
)
print(
"delta",
delta,
"Testing laplacian",
testwf.test_wf_laplacian(slater, configs, delta=delta),
)
print(
"delta",
delta,
"Testing pgradient",
testwf.test_wf_pgradient(slater, configs, delta=delta),
)
if __name__ == "__main__":
test()
|
[
"numpy.sum",
"numpy.abs",
"numpy.random.randn",
"numpy.asarray",
"numpy.einsum",
"numpy.zeros",
"pyqmc.testwf.test_updateinternals",
"pyqmc.testwf.test_wf_gradient",
"pyscf.gto.M",
"pyscf.scf.RHF",
"numpy.array",
"numpy.linalg.slogdet",
"numpy.linalg.inv",
"numpy.sign",
"pyscf.scf.ROHF",
"pyscf.scf.UHF",
"pyqmc.testwf.test_wf_pgradient",
"pyqmc.testwf.test_wf_laplacian"
] |
[((76, 116), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'vec', 'inv[:, :, e]'], {}), "('ij,ij->i', vec, inv[:, :, e])\n", (85, 116), True, 'import numpy as np\n'), ((128, 161), 'numpy.einsum', 'np.einsum', (['"""ek,ekj->ej"""', 'vec', 'inv'], {}), "('ek,ekj->ej', vec, inv)\n", (137, 161), True, 'import numpy as np\n'), ((6563, 6639), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""Li 0. 0. 0.; H 0. 0. 1.5"""', 'basis': '"""cc-pvtz"""', 'unit': '"""bohr"""', 'spin': '(0)'}), "(atom='Li 0. 0. 0.; H 0. 0. 1.5', basis='cc-pvtz', unit='bohr', spin=0)\n", (6568, 6639), False, 'from pyscf import lib, gto, scf\n'), ((700, 727), 'numpy.asarray', 'np.asarray', (['(mf.mo_occ > 0.9)'], {}), '(mf.mo_occ > 0.9)\n', (710, 727), True, 'import numpy as np\n'), ((3496, 3510), 'numpy.sign', 'np.sign', (['ratio'], {}), '(ratio)\n', (3503, 3510), True, 'import numpy as np\n'), ((3752, 3826), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'vec', 'self._inverse[s][:, :, e - s * self._nelec[0]]'], {}), "('ij,ij->i', vec, self._inverse[s][:, :, e - s * self._nelec[0]])\n", (3761, 3826), True, 'import numpy as np\n'), ((3995, 4048), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'vec', 'self._inverse[s][:, i, :]'], {}), "('ij,ij->i', vec, self._inverse[s][:, i, :])\n", (4004, 4048), True, 'import numpy as np\n'), ((6774, 6791), 'numpy.sum', 'np.sum', (['mol.nelec'], {}), '(mol.nelec)\n', (6780, 6791), True, 'import numpy as np\n'), ((6853, 6885), 'numpy.random.randn', 'np.random.randn', (['nconf', 'nelec', '(3)'], {}), '(nconf, nelec, 3)\n', (6868, 6885), True, 'import numpy as np\n'), ((202, 244), 'numpy.einsum', 'np.einsum', (['"""ki,kj->kij"""', 'inv[:, :, e]', 'tmp'], {}), "('ki,kj->kij', inv[:, :, e], tmp)\n", (211, 244), True, 'import numpy as np\n'), ((3583, 3596), 'numpy.abs', 'np.abs', (['ratio'], {}), '(ratio)\n', (3589, 3596), True, 'import numpy as np\n'), ((4572, 4590), 'numpy.asarray', 'np.asarray', (['ratios'], {}), '(ratios)\n', (4582, 4590), True, 'import numpy as np\n'), ((6074, 6095), 'numpy.zeros', 'np.zeros', (['pgrad_shape'], {}), '(pgrad_shape)\n', (6082, 6095), True, 'import numpy as np\n'), ((6402, 6417), 'numpy.array', 'np.array', (['pgrad'], {}), '(pgrad)\n', (6410, 6417), True, 'import numpy as np\n'), ((6923, 6967), 'pyqmc.testwf.test_updateinternals', 'testwf.test_updateinternals', (['slater', 'configs'], {}), '(slater, configs)\n', (6950, 6967), True, 'import pyqmc.testwf as testwf\n'), ((2430, 2451), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['mo'], {}), '(mo)\n', (2447, 2451), True, 'import numpy as np\n'), ((2487, 2504), 'numpy.linalg.inv', 'np.linalg.inv', (['mo'], {}), '(mo)\n', (2500, 2504), True, 'import numpy as np\n'), ((6656, 6668), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (6663, 6668), False, 'from pyscf import lib, gto, scf\n'), ((6676, 6689), 'pyscf.scf.ROHF', 'scf.ROHF', (['mol'], {}), '(mol)\n', (6684, 6689), False, 'from pyscf import lib, gto, scf\n'), ((6697, 6709), 'pyscf.scf.UHF', 'scf.UHF', (['mol'], {}), '(mol)\n', (6704, 6709), False, 'from pyscf import lib, gto, scf\n'), ((7147, 7200), 'pyqmc.testwf.test_wf_gradient', 'testwf.test_wf_gradient', (['slater', 'configs'], {'delta': 'delta'}), '(slater, configs, delta=delta)\n', (7170, 7200), True, 'import pyqmc.testwf as testwf\n'), ((7342, 7396), 'pyqmc.testwf.test_wf_laplacian', 'testwf.test_wf_laplacian', (['slater', 'configs'], {'delta': 'delta'}), '(slater, configs, delta=delta)\n', (7366, 7396), True, 'import pyqmc.testwf as testwf\n'), ((7538, 7592), 'pyqmc.testwf.test_wf_pgradient', 'testwf.test_wf_pgradient', (['slater', 'configs'], {'delta': 'delta'}), '(slater, configs, delta=delta)\n', (7562, 7592), True, 'import pyqmc.testwf as testwf\n'), ((1127, 1154), 'numpy.asarray', 'np.asarray', (['(mf.mo_occ > 0.9)'], {}), '(mf.mo_occ > 0.9)\n', (1137, 1154), True, 'import numpy as np\n'), ((1251, 1278), 'numpy.asarray', 'np.asarray', (['(mf.mo_occ > 1.1)'], {}), '(mf.mo_occ > 1.1)\n', (1261, 1278), True, 'import numpy as np\n')]
|
import numpy as np
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog,\
BigFileMesh, FFTPower
from nbodykit import setup_logging
from mpi4py import MPI
import HImodels
# enable logging, we have some clue what's going on.
setup_logging('info')
#
#Global, fixed things
scratchyf = '/global/cscratch1/sd/yfeng1/m3127/'
scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'
project = '/project/projectdirs/m3127/H1mass/'
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]
#Parameters, box size, number of mesh cells, simulation, ...
bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'
#bs,nc,ncsim, sim, prefic = 1024, 1024, 10240, 'highres/%d-9100-fixed'%ncsim, 'highres'
# It's useful to have my rank for printing...
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
comm = pm.comm
#Which model to use
HImodel = HImodels.ModelA
ofolder = '../data/outputs/'
def calc_bias(aa,h1mesh,suff):
'''Compute the bias(es) for the HI'''
if rank==0:
print("Processing a={:.4f}...".format(aa))
print('Reading DM mesh...')
if ncsim == 10240:
dm = BigFileMesh(scratchyf+sim+'/fastpm_%0.4f/'%aa+\
'/1-mesh/N%04d'%nc,'').paint()
else:
dm = BigFileMesh(project+sim+'/fastpm_%0.4f/'%aa+\
'/dmesh_N%04d/1/'%nc,'').paint()
dm /= dm.cmean()
if rank==0: print('Computing DM P(k)...')
pkmm = FFTPower(dm,mode='1d').power
k,pkmm= pkmm['k'],pkmm['power'] # Ignore shotnoise.
if rank==0: print('Done.')
#
pkh1h1 = FFTPower(h1mesh,mode='1d').power
pkh1h1 = pkh1h1['power']-pkh1h1.attrs['shotnoise']
pkh1mm = FFTPower(h1mesh,second=dm,mode='1d').power['power']
if rank==0: print('Done.')
# Compute the biases.
b1x = np.abs(pkh1mm/(pkmm+1e-10))
b1a = np.abs(pkh1h1/(pkmm+1e-10))**0.5
if rank==0: print("Finishing processing a={:.4f}.".format(aa))
return(k,b1x,b1a,np.abs(pkmm))
#
if __name__=="__main__":
#satsuff='-m1_5p0min-alpha_0p8-16node'
suff='-m1_00p3mh-alpha-0p8-subvol'
outfolder = ofolder + suff[1:] + "/modelA/"
try: os.makedirs(outfolder)
except : pass
if rank==0:
print('Starting')
for aa in alist:
if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%(1/aa-1))
halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')
mp = halocat.attrs['MassTable'][1]*1e10##
halocat['Mass'] = halocat['Length'].compute() * mp
cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)
satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)
rsdfac = read_conversions(scratchyf + sim+'/fastpm_%0.4f/'%aa)
#
HImodelz = HImodel(aa)
los = [0,0,1]
halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)
halocat['RSDpos'], cencat['RSDpos'], satcat['RSDpos'] = HImodelz.assignrsd(rsdfac, halocat, cencat, satcat, los=los)
h1mesh = HImodelz.createmesh(bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass')
kk,b1x,b1a,pkmm = calc_bias(aa,h1mesh,suff)
#
if rank==0:
fout = open(outfolder + "HI_bias_{:6.4f}.txt".format(aa),"w")
fout.write("# Mcut={:12.4e}Msun/h.\n".format(mcut))
fout.write("# {:>8s} {:>10s} {:>10s} {:>15s}\n".\
format("k","b1_x","b1_a","Pkmm"))
for i in range(1,kk.size):
fout.write("{:10.5f} {:10.5f} {:10.5f} {:15.5e}\n".\
format(kk[i],b1x[i],b1a[i],pkmm[i]))
fout.close()
#
|
[
"nbodykit.lab.BigFileCatalog",
"numpy.abs",
"nbodykit.setup_logging",
"nbodykit.lab.FFTPower",
"pmesh.pm.ParticleMesh",
"nbodykit.lab.BigFileMesh"
] |
[((308, 329), 'nbodykit.setup_logging', 'setup_logging', (['"""info"""'], {}), "('info')\n", (321, 329), False, 'from nbodykit import setup_logging\n'), ((933, 977), 'pmesh.pm.ParticleMesh', 'ParticleMesh', ([], {'BoxSize': 'bs', 'Nmesh': '[nc, nc, nc]'}), '(BoxSize=bs, Nmesh=[nc, nc, nc])\n', (945, 977), False, 'from pmesh.pm import ParticleMesh\n'), ((1983, 2014), 'numpy.abs', 'np.abs', (['(pkh1mm / (pkmm + 1e-10))'], {}), '(pkh1mm / (pkmm + 1e-10))\n', (1989, 2014), True, 'import numpy as np\n'), ((1626, 1649), 'nbodykit.lab.FFTPower', 'FFTPower', (['dm'], {'mode': '"""1d"""'}), "(dm, mode='1d')\n", (1634, 1649), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n'), ((1763, 1790), 'nbodykit.lab.FFTPower', 'FFTPower', (['h1mesh'], {'mode': '"""1d"""'}), "(h1mesh, mode='1d')\n", (1771, 1790), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n'), ((2021, 2052), 'numpy.abs', 'np.abs', (['(pkh1h1 / (pkmm + 1e-10))'], {}), '(pkh1h1 / (pkmm + 1e-10))\n', (2027, 2052), True, 'import numpy as np\n'), ((2142, 2154), 'numpy.abs', 'np.abs', (['pkmm'], {}), '(pkmm)\n', (2148, 2154), True, 'import numpy as np\n'), ((2558, 2634), 'nbodykit.lab.BigFileCatalog', 'BigFileCatalog', (["(scratchyf + sim + '/fastpm_%0.4f//' % aa)"], {'dataset': '"""LL-0.200"""'}), "(scratchyf + sim + '/fastpm_%0.4f//' % aa, dataset='LL-0.200')\n", (2572, 2634), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n'), ((2758, 2826), 'nbodykit.lab.BigFileCatalog', 'BigFileCatalog', (["(scratchcm + sim + '/fastpm_%0.4f/cencat' % aa + suff)"], {}), "(scratchcm + sim + '/fastpm_%0.4f/cencat' % aa + suff)\n", (2772, 2826), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n'), ((2838, 2906), 'nbodykit.lab.BigFileCatalog', 'BigFileCatalog', (["(scratchcm + sim + '/fastpm_%0.4f/satcat' % aa + suff)"], {}), "(scratchcm + sim + '/fastpm_%0.4f/satcat' % aa + suff)\n", (2852, 2906), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n'), ((1864, 1902), 'nbodykit.lab.FFTPower', 'FFTPower', (['h1mesh'], {'second': 'dm', 'mode': '"""1d"""'}), "(h1mesh, second=dm, mode='1d')\n", (1872, 1902), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n'), ((1309, 1388), 'nbodykit.lab.BigFileMesh', 'BigFileMesh', (["(scratchyf + sim + '/fastpm_%0.4f/' % aa + '/1-mesh/N%04d' % nc)", '""""""'], {}), "(scratchyf + sim + '/fastpm_%0.4f/' % aa + '/1-mesh/N%04d' % nc, '')\n", (1320, 1388), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n'), ((1442, 1521), 'nbodykit.lab.BigFileMesh', 'BigFileMesh', (["(project + sim + '/fastpm_%0.4f/' % aa + '/dmesh_N%04d/1/' % nc)", '""""""'], {}), "(project + sim + '/fastpm_%0.4f/' % aa + '/dmesh_N%04d/1/' % nc, '')\n", (1453, 1521), False, 'from nbodykit.lab import BigFileCatalog, MultipleSpeciesCatalog, BigFileMesh, FFTPower\n')]
|
import torch
import argparse
import os
import random
import numpy as np
from tensorboardX import SummaryWriter
from misc.utils import set_log, visualize
from torch.optim import SGD, Adam
from torch.nn.modules.loss import MSELoss
from inner_loop import InnerLoop
from omniglot_net import OmniglotNet
from score import *
from misc.batch_sampler import BatchSampler
from misc.replay_buffer import ReplayBuffer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MetaLearner(object):
def __init__(self, log, tb_writer, args):
super(self.__class__, self).__init__()
self.log = log
self.tb_writer = tb_writer
self.args = args
self.loss_fn = MSELoss()
self.net = OmniglotNet(self.loss_fn, args).to(device)
self.fast_net = InnerLoop(self.loss_fn, args).to(device)
self.opt = Adam(self.net.parameters(), lr=args.meta_lr)
self.sampler = BatchSampler(args)
self.memory = ReplayBuffer()
def meta_update(self, episode_i, ls):
in_ = episode_i.observations[:, :, 0]
target = episode_i.rewards[:, :, 0]
# We use a dummy forward / backward pass to get the correct grads into self.net
loss, out = forward_pass(self.net, in_, target)
# Unpack the list of grad dicts
gradients = {k: sum(d[k] for d in ls) for k in ls[0].keys()}
# Register a hook on each parameter in the net that replaces the current dummy grad
# with our grads accumulated across the meta-batch
hooks = []
for (k, v) in self.net.named_parameters():
def get_closure():
key = k
def replace_grad(grad):
return gradients[key]
return replace_grad
hooks.append(v.register_hook(get_closure()))
# Compute grads for current step, replace with summed gradients as defined by hook
self.opt.zero_grad()
loss.backward()
# Update the net parameters with the accumulated gradient according to optimizer
self.opt.step()
# Remove the hooks before next training phase
for h in hooks:
h.remove()
def test(self, i_task, episode_i_):
predictions_ = []
for i_agent in range(self.args.n_agent):
test_net = OmniglotNet(self.loss_fn, self.args).to(device)
# Make a test net with same parameters as our current net
test_net.copy_weights(self.net)
test_opt = SGD(test_net.parameters(), lr=self.args.fast_lr)
episode_i = self.memory.storage[i_task - 1]
# Train on the train examples, using the same number of updates as in training
for i in range(self.args.fast_num_update):
in_ = episode_i.observations[:, :, i_agent]
target = episode_i.rewards[:, :, i_agent]
loss, _ = forward_pass(test_net, in_, target)
print("loss {} at {}".format(loss, i_task))
test_opt.zero_grad()
loss.backward()
test_opt.step()
# Evaluate the trained model on train and val examples
tloss, _ = evaluate(test_net, episode_i, i_agent)
vloss, prediction_ = evaluate(test_net, episode_i_, i_agent)
mtr_loss = tloss / 10.
mval_loss = vloss / 10.
print('-------------------------')
print('Meta train:', mtr_loss)
print('Meta val:', mval_loss)
print('-------------------------')
del test_net
predictions_.append(prediction_)
visualize(episode_i, episode_i_, predictions_, i_task, self.args)
def train(self):
for i_task in range(10000):
# Sample episode from current task
self.sampler.reset_task(i_task)
episodes = self.sampler.sample()
# Add to memory
self.memory.add(i_task, episodes)
# Evaluate on test tasks
if len(self.memory) > 1:
self.test(i_task, episodes)
# Collect a meta batch update
if len(self.memory) > 2:
meta_grads = []
for i in range(self.args.meta_batch_size):
if i == 0:
episodes_i = self.memory.storage[i_task - 1]
episodes_i_ = self.memory.storage[i_task]
else:
episodes_i, episodes_i_ = self.memory.sample()
self.fast_net.copy_weights(self.net)
for i_agent in range(self.args.n_agent):
meta_grad = self.fast_net.forward(episodes_i, episodes_i_, i_agent)
meta_grads.append(meta_grad)
# Perform the meta update
self.meta_update(episodes_i, meta_grads)
def main(args):
# Create dir
if not os.path.exists("./logs"):
os.makedirs("./logs")
if not os.path.exists("./pytorch_models"):
os.makedirs("./pytorch_models")
# Set logs
tb_writer = SummaryWriter('./logs/tb_{0}'.format(args.log_name))
log = set_log(args)
# Set seeds
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if device == torch.device("cuda"):
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Set the gpu
learner = MetaLearner(log, tb_writer, args)
learner.train()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
# General
parser.add_argument(
"--policy-type", type=str,
choices=["discrete", "continuous", "normal"],
help="Policy type available only for discrete, normal, and continuous")
parser.add_argument(
"--learner-type", type=str,
choices=["meta", "finetune"],
help="Learner type available only for meta, finetune")
parser.add_argument(
"--n-hidden", default=64, type=int,
help="Number of hidden units")
parser.add_argument(
"--n-traj", default=1, type=int,
help="Number of trajectory to collect from each task")
# Meta-learning
parser.add_argument(
"--meta-batch-size", default=25, type=int,
help="Number of tasks to sample for meta parameter update")
parser.add_argument(
"--fast-num-update", default=5, type=int,
help="Number of updates for adaptation")
parser.add_argument(
"--meta-lr", default=0.03, type=float,
help="Meta learning rate")
parser.add_argument(
"--fast-lr", default=10.0, type=float,
help="Adaptation learning rate")
parser.add_argument(
"--first-order", action="store_true",
help="Adaptation learning rate")
# Env
parser.add_argument(
"--env-name", default="", type=str,
help="OpenAI gym environment name")
parser.add_argument(
"--ep-max-timesteps", default=10, type=int,
help="Episode is terminated when max timestep is reached.")
parser.add_argument(
"--n-agent", default=1, type=int,
help="Number of agents in the environment")
# Misc
parser.add_argument(
"--seed", default=0, type=int,
help="Sets Gym, PyTorch and Numpy seeds")
parser.add_argument(
"--prefix", default="", type=str,
help="Prefix for tb_writer and logging")
args = parser.parse_args()
# Set log name
args.log_name = \
"env::%s_seed::%s_learner_type::%s_meta_batch_size::%s_meta_lr::%s_fast_num_update::%s_" \
"fast_lr::%s_prefix::%s_log" % (
args.env_name, str(args.seed), args.learner_type, args.meta_batch_size, args.meta_lr,
args.fast_num_update, args.fast_lr, args.prefix)
main(args=args)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.nn.modules.loss.MSELoss",
"os.makedirs",
"torch.manual_seed",
"inner_loop.InnerLoop",
"misc.utils.set_log",
"os.path.exists",
"torch.cuda.manual_seed",
"omniglot_net.OmniglotNet",
"torch.cuda.manual_seed_all",
"misc.replay_buffer.ReplayBuffer",
"random.seed",
"torch.cuda.is_available",
"misc.batch_sampler.BatchSampler",
"torch.device",
"misc.utils.visualize"
] |
[((5202, 5215), 'misc.utils.set_log', 'set_log', (['args'], {}), '(args)\n', (5209, 5215), False, 'from misc.utils import set_log, visualize\n'), ((5241, 5263), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (5252, 5263), False, 'import random\n'), ((5268, 5293), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5282, 5293), True, 'import numpy as np\n'), ((5298, 5326), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5315, 5326), False, 'import torch\n'), ((5587, 5626), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (5610, 5626), False, 'import argparse\n'), ((440, 465), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (463, 465), False, 'import torch\n'), ((706, 715), 'torch.nn.modules.loss.MSELoss', 'MSELoss', ([], {}), '()\n', (713, 715), False, 'from torch.nn.modules.loss import MSELoss\n'), ((942, 960), 'misc.batch_sampler.BatchSampler', 'BatchSampler', (['args'], {}), '(args)\n', (954, 960), False, 'from misc.batch_sampler import BatchSampler\n'), ((983, 997), 'misc.replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {}), '()\n', (995, 997), False, 'from misc.replay_buffer import ReplayBuffer\n'), ((3653, 3718), 'misc.utils.visualize', 'visualize', (['episode_i', 'episode_i_', 'predictions_', 'i_task', 'self.args'], {}), '(episode_i, episode_i_, predictions_, i_task, self.args)\n', (3662, 3718), False, 'from misc.utils import set_log, visualize\n'), ((4964, 4988), 'os.path.exists', 'os.path.exists', (['"""./logs"""'], {}), "('./logs')\n", (4978, 4988), False, 'import os\n'), ((4998, 5019), 'os.makedirs', 'os.makedirs', (['"""./logs"""'], {}), "('./logs')\n", (5009, 5019), False, 'import os\n'), ((5031, 5065), 'os.path.exists', 'os.path.exists', (['"""./pytorch_models"""'], {}), "('./pytorch_models')\n", (5045, 5065), False, 'import os\n'), ((5075, 5106), 'os.makedirs', 'os.makedirs', (['"""./pytorch_models"""'], {}), "('./pytorch_models')\n", (5086, 5106), False, 'import os\n'), ((5344, 5364), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5356, 5364), False, 'import torch\n'), ((5374, 5407), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5396, 5407), False, 'import torch\n'), ((5416, 5453), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (5442, 5453), False, 'import torch\n'), ((745, 776), 'omniglot_net.OmniglotNet', 'OmniglotNet', (['self.loss_fn', 'args'], {}), '(self.loss_fn, args)\n', (756, 776), False, 'from omniglot_net import OmniglotNet\n'), ((813, 842), 'inner_loop.InnerLoop', 'InnerLoop', (['self.loss_fn', 'args'], {}), '(self.loss_fn, args)\n', (822, 842), False, 'from inner_loop import InnerLoop\n'), ((2339, 2375), 'omniglot_net.OmniglotNet', 'OmniglotNet', (['self.loss_fn', 'self.args'], {}), '(self.loss_fn, self.args)\n', (2350, 2375), False, 'from omniglot_net import OmniglotNet\n')]
|
import numpy as np
from pandas import (
DataFrame,
IndexSlice,
)
class Render:
params = [[12, 24, 36], [12, 120]]
param_names = ["cols", "rows"]
def setup(self, cols, rows):
self.df = DataFrame(
np.random.randn(rows, cols),
columns=[f"float_{i+1}" for i in range(cols)],
index=[f"row_{i+1}" for i in range(rows)],
)
def time_apply_render(self, cols, rows):
self._style_apply()
self.st._render_html(True, True)
def peakmem_apply_render(self, cols, rows):
self._style_apply()
self.st._render_html(True, True)
def time_classes_render(self, cols, rows):
self._style_classes()
self.st._render_html(True, True)
def peakmem_classes_render(self, cols, rows):
self._style_classes()
self.st._render_html(True, True)
def time_tooltips_render(self, cols, rows):
self._style_tooltips()
self.st._render_html(True, True)
def peakmem_tooltips_render(self, cols, rows):
self._style_tooltips()
self.st._render_html(True, True)
def time_format_render(self, cols, rows):
self._style_format()
self.st._render_html(True, True)
def peakmem_format_render(self, cols, rows):
self._style_format()
self.st._render_html(True, True)
def time_apply_format_hide_render(self, cols, rows):
self._style_apply_format_hide()
self.st._render_html(True, True)
def peakmem_apply_format_hide_render(self, cols, rows):
self._style_apply_format_hide()
self.st._render_html(True, True)
def _style_apply(self):
def _apply_func(s):
return [
"background-color: lightcyan" if s.name == "row_1" else "" for v in s
]
self.st = self.df.style.apply(_apply_func, axis=1)
def _style_classes(self):
classes = self.df.applymap(lambda v: ("cls-1" if v > 0 else ""))
classes.index, classes.columns = self.df.index, self.df.columns
self.st = self.df.style.set_td_classes(classes)
def _style_format(self):
ic = int(len(self.df.columns) / 4 * 3)
ir = int(len(self.df.index) / 4 * 3)
# apply a formatting function
# subset is flexible but hinders vectorised solutions
self.st = self.df.style.format(
"{:,.3f}", subset=IndexSlice["row_1":f"row_{ir}", "float_1":f"float_{ic}"]
)
def _style_apply_format_hide(self):
self.st = self.df.style.applymap(lambda v: "color: red;")
self.st.format("{:.3f}")
self.st.hide_index(self.st.index[1:])
self.st.hide_columns(self.st.columns[1:])
def _style_tooltips(self):
ttips = DataFrame("abc", index=self.df.index[::2], columns=self.df.columns[::2])
self.st = self.df.style.set_tooltips(ttips)
self.st.hide_index(self.st.index[12:])
self.st.hide_columns(self.st.columns[12:])
|
[
"pandas.DataFrame",
"numpy.random.randn"
] |
[((2743, 2815), 'pandas.DataFrame', 'DataFrame', (['"""abc"""'], {'index': 'self.df.index[::2]', 'columns': 'self.df.columns[::2]'}), "('abc', index=self.df.index[::2], columns=self.df.columns[::2])\n", (2752, 2815), False, 'from pandas import DataFrame, IndexSlice\n'), ((240, 267), 'numpy.random.randn', 'np.random.randn', (['rows', 'cols'], {}), '(rows, cols)\n', (255, 267), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from unittest.mock import patch
from unittest import TestCase
import genty
import numpy as np
from . import core
@genty.genty
class BenchmarkTests(TestCase):
@genty.genty_dataset( # type: ignore
bragg=("bragg", [2.93, 2.18, 2.35, 2.12, 31.53, 15.98, 226.69, 193.11]),
morpho=("morpho", [280.36, 52.96, 208.16, 72.69, 89.92, 60.37, 226.69, 193.11]),
chirped=("chirped", [280.36, 52.96, 104.08, 36.34, 31.53, 15.98, 226.69, 193.11]),
)
def test_photonics_transforms(self, pb: str, expected: List[float]) -> None:
np.random.seed(24)
with patch("shutil.which", return_value="here"):
func = core.Photonics(pb, 16) # should be 8... but it is actually not allowed. Nevermind here
x = np.random.normal(0, 1, size=8)
output = func.transform(x)
np.testing.assert_almost_equal(output, expected, decimal=2)
np.random.seed(24)
x2 = np.random.normal(0, 1, size=8)
np.testing.assert_almost_equal(x, x2, decimal=2, err_msg="x was modified in the process")
def test_tanh_crop() -> None:
output = core.tanh_crop([-1e9, 1e9, 0], -12, 16)
np.testing.assert_almost_equal(output, [-12, 16, 2])
def test_morpho_transform_constraints() -> None:
with patch("shutil.which", return_value="here"):
func = core.Photonics("morpho", 60)
x = np.random.normal(0, 5, size=60) # std 5 to play with boundaries
output = func.transform(x)
assert np.all(output >= 0)
q = len(x) // 4
assert np.all(output[:q] <= 300)
assert np.all(output[q: 3 * q] <= 600)
assert np.all(output[2 * q: 3 * q] >= 30)
assert np.all(output[3 * q:] <= 300)
def test_photonics() -> None:
with patch("shutil.which", return_value="here"):
photo = core.Photonics("bragg", 16)
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n12\n"):
output = photo(np.zeros(16))
np.testing.assert_equal(output, 12)
# check error
with patch("nevergrad.instrumentation.utils.CommandFunction.__call__", return_value="line1\n"):
np.testing.assert_raises(RuntimeError, photo, np.zeros(16).tolist())
np.testing.assert_raises(AssertionError, photo, np.zeros(12).tolist())
|
[
"numpy.random.seed",
"genty.genty_dataset",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"unittest.mock.patch",
"numpy.testing.assert_equal",
"numpy.random.normal",
"numpy.all"
] |
[((389, 660), 'genty.genty_dataset', 'genty.genty_dataset', ([], {'bragg': "('bragg', [2.93, 2.18, 2.35, 2.12, 31.53, 15.98, 226.69, 193.11])", 'morpho': "('morpho', [280.36, 52.96, 208.16, 72.69, 89.92, 60.37, 226.69, 193.11])", 'chirped': "('chirped', [280.36, 52.96, 104.08, 36.34, 31.53, 15.98, 226.69, 193.11])"}), "(bragg=('bragg', [2.93, 2.18, 2.35, 2.12, 31.53, 15.98, \n 226.69, 193.11]), morpho=('morpho', [280.36, 52.96, 208.16, 72.69, \n 89.92, 60.37, 226.69, 193.11]), chirped=('chirped', [280.36, 52.96, \n 104.08, 36.34, 31.53, 15.98, 226.69, 193.11]))\n", (408, 660), False, 'import genty\n'), ((1369, 1421), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output', '[-12, 16, 2]'], {}), '(output, [-12, 16, 2])\n', (1399, 1421), True, 'import numpy as np\n'), ((1578, 1609), 'numpy.random.normal', 'np.random.normal', (['(0)', '(5)'], {'size': '(60)'}), '(0, 5, size=60)\n', (1594, 1609), True, 'import numpy as np\n'), ((1685, 1704), 'numpy.all', 'np.all', (['(output >= 0)'], {}), '(output >= 0)\n', (1691, 1704), True, 'import numpy as np\n'), ((1736, 1761), 'numpy.all', 'np.all', (['(output[:q] <= 300)'], {}), '(output[:q] <= 300)\n', (1742, 1761), True, 'import numpy as np\n'), ((1773, 1803), 'numpy.all', 'np.all', (['(output[q:3 * q] <= 600)'], {}), '(output[q:3 * q] <= 600)\n', (1779, 1803), True, 'import numpy as np\n'), ((1816, 1849), 'numpy.all', 'np.all', (['(output[2 * q:3 * q] >= 30)'], {}), '(output[2 * q:3 * q] >= 30)\n', (1822, 1849), True, 'import numpy as np\n'), ((1862, 1891), 'numpy.all', 'np.all', (['(output[3 * q:] <= 300)'], {}), '(output[3 * q:] <= 300)\n', (1868, 1891), True, 'import numpy as np\n'), ((2278, 2313), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['output', '(12)'], {}), '(output, 12)\n', (2301, 2313), True, 'import numpy as np\n'), ((782, 800), 'numpy.random.seed', 'np.random.seed', (['(24)'], {}), '(24)\n', (796, 800), True, 'import numpy as np\n'), ((977, 1007), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(8)'}), '(0, 1, size=8)\n', (993, 1007), True, 'import numpy as np\n'), ((1051, 1110), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output', 'expected'], {'decimal': '(2)'}), '(output, expected, decimal=2)\n', (1081, 1110), True, 'import numpy as np\n'), ((1119, 1137), 'numpy.random.seed', 'np.random.seed', (['(24)'], {}), '(24)\n', (1133, 1137), True, 'import numpy as np\n'), ((1151, 1181), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(8)'}), '(0, 1, size=8)\n', (1167, 1181), True, 'import numpy as np\n'), ((1190, 1284), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x2'], {'decimal': '(2)', 'err_msg': '"""x was modified in the process"""'}), "(x, x2, decimal=2, err_msg=\n 'x was modified in the process')\n", (1220, 1284), True, 'import numpy as np\n'), ((1482, 1524), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (1487, 1524), False, 'from unittest.mock import patch\n'), ((1933, 1975), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (1938, 1975), False, 'from unittest.mock import patch\n'), ((2030, 2127), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n12\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n12\\n')\n", (2035, 2127), False, 'from unittest.mock import patch\n'), ((2341, 2434), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n')\n", (2346, 2434), False, 'from unittest.mock import patch\n'), ((814, 856), 'unittest.mock.patch', 'patch', (['"""shutil.which"""'], {'return_value': '"""here"""'}), "('shutil.which', return_value='here')\n", (819, 856), False, 'from unittest.mock import patch\n'), ((2138, 2235), 'unittest.mock.patch', 'patch', (['"""nevergrad.instrumentation.utils.CommandFunction.__call__"""'], {'return_value': '"""line1\n12\n"""'}), "('nevergrad.instrumentation.utils.CommandFunction.__call__',\n return_value='line1\\n12\\n')\n", (2143, 2235), False, 'from unittest.mock import patch\n'), ((2260, 2272), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2268, 2272), True, 'import numpy as np\n'), ((2561, 2573), 'numpy.zeros', 'np.zeros', (['(12)'], {}), '(12)\n', (2569, 2573), True, 'import numpy as np\n'), ((2486, 2498), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2494, 2498), True, 'import numpy as np\n')]
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : functional.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 03/03/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import math
from PIL import Image
import numpy as np
import torchvision.transforms.functional as TF
import jactorch.transforms.image.functional as jac_tf
from jacinle.utils.argument import get_2dshape
def normalize_coor(img, coor):
coor = coor.copy()
coor[:, 0] /= img.width
coor[:, 1] /= img.height
return img, coor
def denormalize_coor(img, coor):
coor = coor.copy()
coor[:, 0] *= img.width
coor[:, 1] *= img.height
return img, coor
def crop(img, coor, i, j, h, w):
coor = coor.copy()
coor[:, 0] = (coor[:, 0] - j / img.width) * (img.width / w)
coor[:, 1] = (coor[:, 1] - i / img.height) * (img.height / h)
return TF.crop(img, i, j, h, w), coor
def center_crop(img, coor, output_size):
output_size = get_2dshape(output_size)
w, h = img.size
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(img, coor, i, j, th, tw)
def pad(img, coor, padding, mode='constant', fill=0):
if isinstance(padding, int):
padding = (padding, padding, padding, padding)
elif len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
else:
assert len(padding) == 4
img_new = jac_tf.pad(img, padding, mode=mode, fill=fill)
coor = coor.copy()
coor[:, 0] = (coor[:, 0] + padding[0] / img.width) * (img.width / img_new.width)
coor[:, 1] = (coor[:, 1] + padding[1] / img.height) * (img.height/ img_new.height)
return img_new, coor
def hflip(img, coor):
coor = coor.copy()
coor[:, 0] = 1 - coor[:, 0]
return TF.hflip(img), coor
def vflip(img, coor):
coor = coor.copy()
coor[:, 1] = 1 - coor[:, 1]
return TF.vflip(img), coor
def resize(img, coor, size, interpolation=Image.BILINEAR):
# Assuming coordinates are 0/1-normalized.
return TF.resize(img, size, interpolation=interpolation), coor
def resized_crop(img, coor, i, j, h, w, size, interpolation=Image.BILINEAR):
img, coor = crop(img, coor, i, j, h, w)
img, coor = resize(img, coor, size, interpolation)
return img, coor
def refresh_valid(img, coor, force=False):
if coor.shape[1] == 2:
if force:
coor = np.concatenate([coor, np.ones_like(coor[:, 0])], axis=1)
else:
return img, coor
assert coor.shape[1] == 3, 'Support only (x, y, valid) or (x, y) typed coordinates.'
out = []
for x, y, v in coor:
valid = (v == 1) and (x >= 0) and (x < img.width) and (y >= 0) and (y < img.height)
if valid:
out.append((x, y, v))
else:
out.append((0., 0., 0.))
return img, np.array(out, dtype='float32')
def rotate(img, coor, angle, resample, crop_, expand, center=None, translate=None):
assert translate is None
img_new = TF.rotate(img, angle, resample=resample, expand=expand, center=center)
matrix, extra_crop = get_rotation_matrix(img, angle, crop_, expand, center, translate)
_, coor = denormalize_coor(img, coor)
for i in range(coor.shape[0]):
coor[i, :2] = apply_affine_transform(*coor[i, :2], matrix)
_, coor = normalize_coor(img_new, coor)
if extra_crop is not None:
img_new, coor = crop(img_new, coor, *extra_crop)
return img_new, coor
def pad_multiple_of(img, coor, multiple, mode='constant', fill=0):
h, w = img.height, img.width
hh = h - h % multiple + multiple * int(h % multiple != 0)
ww = w - w % multiple + multiple * int(w % multiple != 0)
if h != hh or w != ww:
return pad(img, coor, (0, 0, ww - w, hh - h), mode=mode, fill=fill)
return img, coor
def get_rotation_matrix(image, angle, crop, expand, center, translate):
w, h = image.size
if translate is None:
translate = (0, 0)
if center is None:
center = (w / 2.0, h / 2.0)
angle = math.radians(angle % 360)
matrix = [
round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0,
round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0
]
matrix[2], matrix[5] = apply_affine_transform(-center[0], -center[1], matrix)
matrix[2] += center[0] + translate[0]
matrix[5] += center[1] + translate[1]
# print('debug', angle, translate, center, matrix, apply_affine_transform(0.5, 0.5, matrix))
if crop or expand:
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = apply_affine_transform(x, y, matrix)
xx.append(x)
yy.append(y)
xx.sort()
yy.sort()
extra_crop = None
if crop:
assert not expand, 'Cannot use both expand and crop.'
nw = int(math.ceil(xx[2]) - math.floor(xx[1]))
nh = int(math.ceil(yy[2]) - math.floor(yy[1]))
# CAUSION! extra_crop is of format (dy, dx, h, w)
extra_crop = ((h - nh) // 2, (w - nw) // 2, nh, nw)
if expand:
nw = int(math.ceil(xx[3]) - math.floor(xx[0]))
nh = int(math.ceil(yy[3]) - math.floor(yy[0]))
matrix[2] += (nw - w) / 2.
matrix[5] += (nh - h) / 2.
return matrix, extra_crop
def apply_affine_transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
|
[
"numpy.ones_like",
"torchvision.transforms.functional.rotate",
"torchvision.transforms.functional.hflip",
"math.radians",
"math.ceil",
"jactorch.transforms.image.functional.pad",
"torchvision.transforms.functional.resize",
"math.floor",
"math.sin",
"torchvision.transforms.functional.crop",
"numpy.array",
"torchvision.transforms.functional.vflip",
"math.cos",
"jacinle.utils.argument.get_2dshape"
] |
[((978, 1002), 'jacinle.utils.argument.get_2dshape', 'get_2dshape', (['output_size'], {}), '(output_size)\n', (989, 1002), False, 'from jacinle.utils.argument import get_2dshape\n'), ((1454, 1500), 'jactorch.transforms.image.functional.pad', 'jac_tf.pad', (['img', 'padding'], {'mode': 'mode', 'fill': 'fill'}), '(img, padding, mode=mode, fill=fill)\n', (1464, 1500), True, 'import jactorch.transforms.image.functional as jac_tf\n'), ((3023, 3093), 'torchvision.transforms.functional.rotate', 'TF.rotate', (['img', 'angle'], {'resample': 'resample', 'expand': 'expand', 'center': 'center'}), '(img, angle, resample=resample, expand=expand, center=center)\n', (3032, 3093), True, 'import torchvision.transforms.functional as TF\n'), ((4059, 4084), 'math.radians', 'math.radians', (['(angle % 360)'], {}), '(angle % 360)\n', (4071, 4084), False, 'import math\n'), ((886, 910), 'torchvision.transforms.functional.crop', 'TF.crop', (['img', 'i', 'j', 'h', 'w'], {}), '(img, i, j, h, w)\n', (893, 910), True, 'import torchvision.transforms.functional as TF\n'), ((1812, 1825), 'torchvision.transforms.functional.hflip', 'TF.hflip', (['img'], {}), '(img)\n', (1820, 1825), True, 'import torchvision.transforms.functional as TF\n'), ((1922, 1935), 'torchvision.transforms.functional.vflip', 'TF.vflip', (['img'], {}), '(img)\n', (1930, 1935), True, 'import torchvision.transforms.functional as TF\n'), ((2061, 2110), 'torchvision.transforms.functional.resize', 'TF.resize', (['img', 'size'], {'interpolation': 'interpolation'}), '(img, size, interpolation=interpolation)\n', (2070, 2110), True, 'import torchvision.transforms.functional as TF\n'), ((2863, 2893), 'numpy.array', 'np.array', (['out'], {'dtype': '"""float32"""'}), "(out, dtype='float32')\n", (2871, 2893), True, 'import numpy as np\n'), ((4115, 4130), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4123, 4130), False, 'import math\n'), ((4143, 4158), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4151, 4158), False, 'import math\n'), ((4213, 4228), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (4221, 4228), False, 'import math\n'), ((4185, 4200), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (4193, 4200), False, 'import math\n'), ((4879, 4895), 'math.ceil', 'math.ceil', (['xx[2]'], {}), '(xx[2])\n', (4888, 4895), False, 'import math\n'), ((4898, 4915), 'math.floor', 'math.floor', (['xx[1]'], {}), '(xx[1])\n', (4908, 4915), False, 'import math\n'), ((4934, 4950), 'math.ceil', 'math.ceil', (['yy[2]'], {}), '(yy[2])\n', (4943, 4950), False, 'import math\n'), ((4953, 4970), 'math.floor', 'math.floor', (['yy[1]'], {}), '(yy[1])\n', (4963, 4970), False, 'import math\n'), ((5124, 5140), 'math.ceil', 'math.ceil', (['xx[3]'], {}), '(xx[3])\n', (5133, 5140), False, 'import math\n'), ((5143, 5160), 'math.floor', 'math.floor', (['xx[0]'], {}), '(xx[0])\n', (5153, 5160), False, 'import math\n'), ((5179, 5195), 'math.ceil', 'math.ceil', (['yy[3]'], {}), '(yy[3])\n', (5188, 5195), False, 'import math\n'), ((5198, 5215), 'math.floor', 'math.floor', (['yy[0]'], {}), '(yy[0])\n', (5208, 5215), False, 'import math\n'), ((2447, 2471), 'numpy.ones_like', 'np.ones_like', (['coor[:, 0]'], {}), '(coor[:, 0])\n', (2459, 2471), True, 'import numpy as np\n')]
|
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
def cross_val(estimator, X_train, y_train, nfolds):
''' Takes an instantiated model (estimator) and returns the average
mean square error (mse) and coefficient of determination (r2) from
kfold cross-validation.
Parameters: estimator: model object
X_train: 2d numpy array
y_train: 1d numpy array
nfolds: the number of folds in the kfold cross-validation
Returns: mse: average mean_square_error of model over number of folds
r2: average coefficient of determination over number of folds
There are many possible values for scoring parameter in cross_val_score.
http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
kfold is easily parallelizable, so set n_jobs = -1 in cross_val_score
'''
mse = cross_val_score(estimator, X_train, y_train,
scoring='neg_mean_squared_error',
cv=nfolds, n_jobs=-1) * -1
# mse multiplied by -1 to make positive
mean_mse = np.sqrt(mse.mean())
name = estimator.__class__.__name__
print("{0:<25s} Train CV | RMSLE: {1:0.3f} ".format(name,
mean_mse))
return mean_mse
def stage_score_plot(estimator, X_train, y_train, X_test, y_test):
'''
Parameters: estimator: GradientBoostingRegressor or AdaBoostRegressor
X_train: 2d numpy array
y_train: 1d numpy array
X_test: 2d numpy array
y_test: 1d numpy array
Returns: A plot of the number of iterations vs the MSE for the model for
both the training set and test set.
'''
estimator.fit(X_train, y_train)
name = estimator.__class__.__name__.replace('Regressor', '')
learn_rate = estimator.learning_rate
# initialize
train_scores = np.zeros((estimator.n_estimators,), dtype=np.float64)
test_scores = np.zeros((estimator.n_estimators,), dtype=np.float64)
# Get train score from each boost
for i, y_train_pred in enumerate(estimator.staged_predict(X_train)):
train_scores[i] = mean_squared_error(y_train, y_train_pred)
# Get test score from each boost
for i, y_test_pred in enumerate(estimator.staged_predict(X_test)):
test_scores[i] = mean_squared_error(y_test, y_test_pred)
fig, ax = plt.subplots(figsize = (8,10))
plt.plot(np.sqrt(train_scores), alpha=.5, label="{0} Train - learning rate {1}".format(
name, learn_rate))
plt.plot(np.sqrt(test_scores), alpha=.5, label="{0} Test - learning rate {1}".format(
name, learn_rate), ls='--')
plt.title(name, fontsize=16, fontweight='bold')
plt.ylabel('RMSLE', fontsize=14)
plt.xlabel('Iterations', fontsize=14)
return
def rf_score_plot(randforest, X_train, y_train, X_test, y_test):
'''
Parameters: randforest: RandomForestRegressor
X_train: 2d numpy array
y_train: 1d numpy array
X_test: 2d numpy array
y_test: 1d numpy array
Returns: The prediction of a random forest regressor on the test set
'''
randforest.fit(X_train, y_train)
y_test_pred = randforest.predict(X_test)
test_score = np.sqrt(mean_squared_error(y_test, y_test_pred))
plt.axhline(test_score, alpha = 0.7, c = 'grey', lw=1, ls='-.', label =
'Random Forest Test')
def gridsearch_with_output(estimator, parameter_grid, X_train, y_train):
'''
Parameters: estimator: the type of model (e.g. RandomForestRegressor())
paramter_grid: dictionary defining the gridsearch parameters
X_train: 2d numpy array
y_train: 1d numpy array
Returns: best parameters and model fit with those parameters
'''
model_gridsearch = GridSearchCV(estimator,
parameter_grid,
verbose=True,
n_jobs=-1,
scoring='neg_mean_squared_error')
model_gridsearch.fit(X_train, y_train)
best_params = model_gridsearch.best_params_
model_best = model_gridsearch.best_estimator_
print("\nResult of gridsearch:")
print("{0:<20s} | {1:<8s} | {2}".format("Parameter", "Optimal", "Gridsearch values"))
print("-" * 55)
for param, vals in parameter_grid.items():
print("{0:<20s} | {1:<8s} | {2}".format(str(param),
str(best_params[param]),
str(vals)))
return best_params, model_best
def display_default_and_gsearch_model_results(model_default, model_gridsearch,
X_test, y_test):
'''
Parameters: model_default: fit model using initial parameters
model_gridsearch: fit model using parameters from gridsearch
X_test: 2d numpy array
y_test: 1d numpy array
Return: None, but prints out mse and r2 for the default and model with
gridsearched parameters
'''
name = model_default.__class__.__name__.replace('Regressor', '') # for printing
y_test_pred = model_gridsearch.predict(X_test)
mse = np.sqrt(mean_squared_error(y_test, y_test_pred))
print("Results for {0}".format(name))
print("Gridsearched model rmlse: {0:0.3f})".format(mse))
y_test_pred = model_default.predict(X_test)
mse = np.sqrt(mean_squared_error(y_test, y_test_pred))
print(" Default model rmsle: {0:0.3f}".format(mse))
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.axhline",
"sklearn.model_selection.GridSearchCV",
"sklearn.model_selection.cross_val_score",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"sklearn.metrics.mean_squared_error",
"numpy.sqrt"
] |
[((2361, 2414), 'numpy.zeros', 'np.zeros', (['(estimator.n_estimators,)'], {'dtype': 'np.float64'}), '((estimator.n_estimators,), dtype=np.float64)\n', (2369, 2414), True, 'import numpy as np\n'), ((2433, 2486), 'numpy.zeros', 'np.zeros', (['(estimator.n_estimators,)'], {'dtype': 'np.float64'}), '((estimator.n_estimators,), dtype=np.float64)\n', (2441, 2486), True, 'import numpy as np\n'), ((2858, 2887), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 10)'}), '(figsize=(8, 10))\n', (2870, 2887), True, 'import matplotlib.pyplot as plt\n'), ((3241, 3288), 'matplotlib.pyplot.title', 'plt.title', (['name'], {'fontsize': '(16)', 'fontweight': '"""bold"""'}), "(name, fontsize=16, fontweight='bold')\n", (3250, 3288), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3325), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSLE"""'], {'fontsize': '(14)'}), "('RMSLE', fontsize=14)\n", (3303, 3325), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3367), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iterations"""'], {'fontsize': '(14)'}), "('Iterations', fontsize=14)\n", (3340, 3367), True, 'import matplotlib.pyplot as plt\n'), ((3924, 4016), 'matplotlib.pyplot.axhline', 'plt.axhline', (['test_score'], {'alpha': '(0.7)', 'c': '"""grey"""', 'lw': '(1)', 'ls': '"""-."""', 'label': '"""Random Forest Test"""'}), "(test_score, alpha=0.7, c='grey', lw=1, ls='-.', label=\n 'Random Forest Test')\n", (3935, 4016), True, 'import matplotlib.pyplot as plt\n'), ((4507, 4610), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['estimator', 'parameter_grid'], {'verbose': '(True)', 'n_jobs': '(-1)', 'scoring': '"""neg_mean_squared_error"""'}), "(estimator, parameter_grid, verbose=True, n_jobs=-1, scoring=\n 'neg_mean_squared_error')\n", (4519, 4610), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1289, 1394), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['estimator', 'X_train', 'y_train'], {'scoring': '"""neg_mean_squared_error"""', 'cv': 'nfolds', 'n_jobs': '(-1)'}), "(estimator, X_train, y_train, scoring=\n 'neg_mean_squared_error', cv=nfolds, n_jobs=-1)\n", (1304, 1394), False, 'from sklearn.model_selection import train_test_split, cross_val_score\n'), ((2624, 2665), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'y_train_pred'], {}), '(y_train, y_train_pred)\n', (2642, 2665), False, 'from sklearn.metrics import mean_squared_error\n'), ((2799, 2838), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (2817, 2838), False, 'from sklearn.metrics import mean_squared_error\n'), ((2902, 2923), 'numpy.sqrt', 'np.sqrt', (['train_scores'], {}), '(train_scores)\n', (2909, 2923), True, 'import numpy as np\n'), ((3077, 3097), 'numpy.sqrt', 'np.sqrt', (['test_scores'], {}), '(test_scores)\n', (3084, 3097), True, 'import numpy as np\n'), ((3879, 3918), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (3897, 3918), False, 'from sklearn.metrics import mean_squared_error\n'), ((5986, 6025), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (6004, 6025), False, 'from sklearn.metrics import mean_squared_error\n'), ((6197, 6236), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_test_pred'], {}), '(y_test, y_test_pred)\n', (6215, 6236), False, 'from sklearn.metrics import mean_squared_error\n')]
|
"""
Name: SampleEvent
breif: Samples events for particles provided in a phase space for MCDC-TNT
Author: <NAME> (OR State Univ - <EMAIL>) CEMeNT
Date: Dec 2nd 2021
"""
import numpy as np
import pykokkos as pk
@pk.workload
class SampleEvent:
def __init__(self, p_mesh_cell, p_alive, mesh_cap_xsec, mesh_scat_xsec, mesh_fis_xsec, scatter_event_index, capture_event_index, fission_event_index, num_part, nu_new_neutrons, rands, clever_out):
self.p_mesh_cell: pk.View1D[int] = p_mesh_cell
self.p_alive: pk.View1D[int] = p_alive
self.mesh_cap_xsec: pk.View1D[pk.double] = mesh_cap_xsec
self.mesh_scat_xsec: pk.View1D[pk.double] = mesh_scat_xsec
self.mesh_fis_xsec: pk.View1D[pk.double] = mesh_fis_xsec
self.scatter_event_index: pk.View1D[int] = scatter_event_index
self.capture_event_index: pk.View1D[int] = capture_event_index
self.fission_event_index: pk.View1D[int] = fission_event_index
self.num_part: int = num_part
self.nu_new_neutrons: int = num_part
self.rands: pk.View1D[pk.double] = rands
self.fissions_to_add: int = 0
self.scat_count: int = 0
self.cap_count: int = 0
self.fis_count: int = 0
self.killed: int = 0
self.clever_out: pk.View1D[int] = clever_out
#print('made it through init!')
@pk.main
def run(self):
for i in range(self.num_part):
#normalize cross sections in each mesh cell
total_scat_xsec: pk.double = self.mesh_scat_xsec[self.p_mesh_cell[i]] + self.mesh_cap_xsec[self.p_mesh_cell[i]] + self.mesh_fis_xsec[self.p_mesh_cell[i]]
mesh_scat_xsec_temp: pk.double = self.mesh_scat_xsec[self.p_mesh_cell[i]] / total_scat_xsec
mesh_cap_xsec_temp: pk.double = self.mesh_cap_xsec[self.p_mesh_cell[i]] / total_scat_xsec
mesh_fis_xsec_temp: pk.double = self.mesh_fis_xsec[self.p_mesh_cell[i]] / total_scat_xsec
#pk.printf('%d %d %d\n ',self.scat_count, self.cap_count, self.fis_count)
if self.p_alive[i] == 1:
event_rand:pk.double = self.rands[i]
#scatter?
if event_rand < mesh_scat_xsec_temp:
self.scatter_event_index[self.scat_count] = i
self.scat_count += 1
#pk.printf('had a scatter! %d\n', self.scat_count)
#capture?
elif mesh_scat_xsec_temp < event_rand and event_rand < mesh_scat_xsec_temp + mesh_cap_xsec_temp:
self.p_alive[i] = 0
self.killed += 1
self.capture_event_index[self.cap_count] = i
self.cap_count +=1
#pk.printf('had a capture! %d\n', self.cap_count)
#fission?
elif mesh_scat_xsec_temp + mesh_cap_xsec_temp < event_rand and event_rand < mesh_scat_xsec_temp + mesh_cap_xsec_temp + mesh_fis_xsec_temp:
self.p_alive[i] = 0
self.killed += 1
self.fissions_to_add += self.nu_new_neutrons
self.fission_event_index[self.fis_count] = i
self.fis_count += 1
#pk.printf('had a fission! %d\n', self.fis_count)
else:
pk.printf('Well shoot dang')
self.clever_out[0] = self.scat_count
self.clever_out[1] = self.cap_count
self.clever_out[2] = self.fis_count
def test_SampleEvent():
p_mesh_cell = np.array([0,1,0,5], dtype=np.int32)
p_alive = np.array([1,1,1,0], dtype=np.int32)
mesh_cap_xsec = 1/3*np.ones(2, dtype=float)
mesh_scat_xsec = 1/3*np.ones(2, dtype=float)
mesh_fis_xsec = 1/2*np.ones(2, dtype=float)
scatter_event_index = np.zeros(3, dtype=np.int32)
capture_event_index = np.zeros(3, dtype=np.int32)
fission_event_index = np.zeros(3, dtype=np.int32)
controled_rands = np.array([.2, .4, .8], dtype=float)
nu = 2
num_part = 3
p_mesh_cell = pk.from_numpy(p_mesh_cell)
p_alive = pk.from_numpy(p_alive)
mesh_cap_xsec = pk.from_numpy(mesh_cap_xsec)
mesh_scat_xsec = pk.from_numpy(mesh_scat_xsec)
mesh_fis_xsec = pk.from_numpy(mesh_fis_xsec)
scatter_event_index = pk.from_numpy(scatter_event_index)
capture_event_index = pk.from_numpy(capture_event_index)
fission_event_index = pk.from_numpy(fission_event_index)
controled_rands = pk.from_numpy(controled_rands)
clever_out = np.zeros(3, dtype=np.int32)
clever_out = pk.from_numpy(clever_out)
print("Running!")
pk.execute(pk.ExecutionSpace.OpenMP, SampleEvent(p_mesh_cell, p_alive, mesh_cap_xsec, mesh_scat_xsec, mesh_fis_xsec, scatter_event_index, capture_event_index, fission_event_index, num_part, nu, controled_rands, clever_out))
print('Made it through')
scat_count = clever_out[0]
cap_count = clever_out[1]
fis_count = clever_out[2]
print(scat_count)
assert (fis_count == 1)
assert (scat_count == 1)
assert (cap_count == 1)
assert (capture_event_index[0] == 1)
assert (fission_event_index[0] == 2)
assert (scatter_event_index[0] == 0)
if __name__ == '__main__':
test_SampleEvent()
|
[
"pykokkos.printf",
"numpy.zeros",
"numpy.ones",
"pykokkos.from_numpy",
"numpy.array"
] |
[((3753, 3791), 'numpy.array', 'np.array', (['[0, 1, 0, 5]'], {'dtype': 'np.int32'}), '([0, 1, 0, 5], dtype=np.int32)\n', (3761, 3791), True, 'import numpy as np\n'), ((3807, 3845), 'numpy.array', 'np.array', (['[1, 1, 1, 0]'], {'dtype': 'np.int32'}), '([1, 1, 1, 0], dtype=np.int32)\n', (3815, 3845), True, 'import numpy as np\n'), ((4048, 4075), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.int32'}), '(3, dtype=np.int32)\n', (4056, 4075), True, 'import numpy as np\n'), ((4106, 4133), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.int32'}), '(3, dtype=np.int32)\n', (4114, 4133), True, 'import numpy as np\n'), ((4164, 4191), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.int32'}), '(3, dtype=np.int32)\n', (4172, 4191), True, 'import numpy as np\n'), ((4246, 4284), 'numpy.array', 'np.array', (['[0.2, 0.4, 0.8]'], {'dtype': 'float'}), '([0.2, 0.4, 0.8], dtype=float)\n', (4254, 4284), True, 'import numpy as np\n'), ((4358, 4384), 'pykokkos.from_numpy', 'pk.from_numpy', (['p_mesh_cell'], {}), '(p_mesh_cell)\n', (4371, 4384), True, 'import pykokkos as pk\n'), ((4403, 4425), 'pykokkos.from_numpy', 'pk.from_numpy', (['p_alive'], {}), '(p_alive)\n', (4416, 4425), True, 'import pykokkos as pk\n'), ((4459, 4487), 'pykokkos.from_numpy', 'pk.from_numpy', (['mesh_cap_xsec'], {}), '(mesh_cap_xsec)\n', (4472, 4487), True, 'import pykokkos as pk\n'), ((4513, 4542), 'pykokkos.from_numpy', 'pk.from_numpy', (['mesh_scat_xsec'], {}), '(mesh_scat_xsec)\n', (4526, 4542), True, 'import pykokkos as pk\n'), ((4567, 4595), 'pykokkos.from_numpy', 'pk.from_numpy', (['mesh_fis_xsec'], {}), '(mesh_fis_xsec)\n', (4580, 4595), True, 'import pykokkos as pk\n'), ((4635, 4669), 'pykokkos.from_numpy', 'pk.from_numpy', (['scatter_event_index'], {}), '(scatter_event_index)\n', (4648, 4669), True, 'import pykokkos as pk\n'), ((4700, 4734), 'pykokkos.from_numpy', 'pk.from_numpy', (['capture_event_index'], {}), '(capture_event_index)\n', (4713, 4734), True, 'import pykokkos as pk\n'), ((4765, 4799), 'pykokkos.from_numpy', 'pk.from_numpy', (['fission_event_index'], {}), '(fission_event_index)\n', (4778, 4799), True, 'import pykokkos as pk\n'), ((4835, 4865), 'pykokkos.from_numpy', 'pk.from_numpy', (['controled_rands'], {}), '(controled_rands)\n', (4848, 4865), True, 'import pykokkos as pk\n'), ((4896, 4923), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.int32'}), '(3, dtype=np.int32)\n', (4904, 4923), True, 'import numpy as np\n'), ((4946, 4971), 'pykokkos.from_numpy', 'pk.from_numpy', (['clever_out'], {}), '(clever_out)\n', (4959, 4971), True, 'import pykokkos as pk\n'), ((3880, 3903), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'float'}), '(2, dtype=float)\n', (3887, 3903), True, 'import numpy as np\n'), ((3933, 3956), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'float'}), '(2, dtype=float)\n', (3940, 3956), True, 'import numpy as np\n'), ((3985, 4008), 'numpy.ones', 'np.ones', (['(2)'], {'dtype': 'float'}), '(2, dtype=float)\n', (3992, 4008), True, 'import numpy as np\n'), ((3509, 3537), 'pykokkos.printf', 'pk.printf', (['"""Well shoot dang"""'], {}), "('Well shoot dang')\n", (3518, 3537), True, 'import pykokkos as pk\n')]
|
import logging
import numpy as np
from .dataset import DataSet
from .markers import markers_to_events
def sliding_window_indices(window_size, window_step, sig_len):
'''Returns indices for a sliding window with shape [nwindows x window_size]'''
nwindows = int(np.floor((sig_len - window_size + window_step) /
float(window_step)))
print(nwindows)
starts = np.arange(nwindows).reshape(nwindows, 1) * window_step
return starts + np.arange(window_size)
def sliding_window(signal, window_size, window_step, win_func=None):
'''Apply a sliding window to a 1D signal. Returns [#windows x window_size].'''
signal = np.asarray(signal)
if signal.ndim != 1:
raise ValueError('Sliding window works on 1D arrays only!')
if win_func is not None:
if win_func.size != window_size:
raise ValueError('window_size (%d) does not match win_func.size (%d)' % (
window_size, win_func.size))
indices = sliding_window_indices(window_size, window_step, signal.shape[0])
windows = signal.take(indices=indices)
if win_func is not None:
windows = windows * win_func # broadcasting matches from last dim
return windows
def stft(signal, nfft, stepsize):
'''Calculate the short-time Fourier transform (STFT).
Returns [windows x FFT coefficients]'''
wins = sliding_window(signal, nfft, stepsize, win_func=np.hanning(nfft))
return np.fft.rfft(wins, axis=1)
def spectrogram(signal, nfft, stepsize):
'''
Calculate a spectrogram using STFT.
Returns [windows x frequencies], in units related to power.
Equivalent to power spectral density.
'''
spec = stft(signal, nfft, stepsize)
# convert to power. The abs() is the magnitude of a complex number
spec = np.abs(spec) ** 2 / nfft
# compensate for missing negative frequencies
spec[:, 1:-1] *= 2
# correct for window
spec /= np.mean(np.abs(np.hanning(nfft)) ** 2)
# compensate for overlapping windows
nwins = spec.shape[0]
overlap = stepsize / float(nfft)
spec *= (1 + (nwins - 1) * overlap) / nwins
return spec
def get_samplerate(d, axis=1):
'''
Derive the sample rate from the timestamps given in either ``feat_lab`` or
``d.ids``. The median of the difference between consecutive time stamps is
takes to be the sample rate.
Parameters
----------
d : :class:`psychic.DataSet`
The data to estimate the sample rate of. Must contain time stamps
in ``d.ids``
axis : int (default 1)
The axis along which time samples are stored. If the last axis is specified
here, time stamps are taken from the ``ids`` property, otherwise they are
taken from the corresponding index of ``feat_lab``.
Returns
-------
sample_rate : float
The estimated samplerate.
'''
assert axis < d.data.ndim, 'Invalid axis specified'
if axis == d.data.ndim - 1:
return np.round(1./np.median(np.diff(d.ids[0])))
else:
return np.round(1./np.median(np.diff([float(x) for x in d.feat_lab[axis]])))
def find_segments(events, event_indices, start_mark, end_mark):
'''Helper to find matching start/end markers in an event array'''
events, event_indices = np.asarray(events), np.asarray(event_indices)
assert events.size == event_indices.size
mask = (events==start_mark) | (events==end_mark)
sevents, sevent_ids = events[mask], event_indices[mask]
stack, result = [], []
for si in range(sevent_ids.size):
if sevents[si] == start_mark:
stack.append(sevent_ids[si])
else:
assert stack != [], 'Missing start marker'
result.append((stack.pop(), sevent_ids[si]))
if not stack == []:
logging.getLogger('psychic.utils.find_segments').warning(
'Did not end start marker(s) at %s' % repr(stack))
return result
def cut_segments(d, marker_tuples, offsets=[0, 0]):
'''
Cut a dataset into segments using (start_marker, end_marker) tuples.
Parameters
----------
d : :class:`psychic.DataSet`
Continuous data to cut into segments.
marker_tuples : list of tuples
A list of (start_marker, end_marker) marker codes delimiting each
type of segment.
Returns
-------
data : list of :class:`psychic.DataSet`
A list with datasets.
'''
start_off, end_off = offsets
segments = []
e, ei, _ = markers_to_events(d.labels.flat)
for (sm, em) in marker_tuples:
segments.extend(find_segments(e, ei, sm, em))
segments.sort()
return [d[s + start_off:e + end_off] for (s, e) in segments]
def wolpaw_bitr(N, P):
assert 0 <= P <= 1
assert 2 <= N
result = np.log2(N)
if P > 0:
result += P * np.log2(P)
if P < 1:
result += (1 - P) * np.log2((1 - P)/(N - 1.))
return result
def split_in_bins(d, order, n, legend=lambda i,b: 'slice %d' % i, ascending=True):
idx = np.argsort(order)
if not ascending:
idx = idx[::-1]
bin_size = int(len(order) / float(n))
bins = [idx[i*bin_size:(i+1)*bin_size] for i in range(n)]
labels = np.zeros((n, d.ninstances), dtype=np.bool)
for i,b in enumerate(bins):
labels[i, b] = True
cl_lab = [legend(i, bins[i]) for i in range(n)]
return (bins, DataSet(labels=labels, cl_lab=cl_lab, default=d))
|
[
"numpy.fft.rfft",
"numpy.abs",
"numpy.log2",
"numpy.asarray",
"numpy.zeros",
"numpy.argsort",
"numpy.diff",
"numpy.arange",
"numpy.hanning",
"logging.getLogger"
] |
[((627, 645), 'numpy.asarray', 'np.asarray', (['signal'], {}), '(signal)\n', (637, 645), True, 'import numpy as np\n'), ((1364, 1389), 'numpy.fft.rfft', 'np.fft.rfft', (['wins'], {'axis': '(1)'}), '(wins, axis=1)\n', (1375, 1389), True, 'import numpy as np\n'), ((4481, 4491), 'numpy.log2', 'np.log2', (['N'], {}), '(N)\n', (4488, 4491), True, 'import numpy as np\n'), ((4704, 4721), 'numpy.argsort', 'np.argsort', (['order'], {}), '(order)\n', (4714, 4721), True, 'import numpy as np\n'), ((4879, 4921), 'numpy.zeros', 'np.zeros', (['(n, d.ninstances)'], {'dtype': 'np.bool'}), '((n, d.ninstances), dtype=np.bool)\n', (4887, 4921), True, 'import numpy as np\n'), ((442, 464), 'numpy.arange', 'np.arange', (['window_size'], {}), '(window_size)\n', (451, 464), True, 'import numpy as np\n'), ((3108, 3126), 'numpy.asarray', 'np.asarray', (['events'], {}), '(events)\n', (3118, 3126), True, 'import numpy as np\n'), ((3128, 3153), 'numpy.asarray', 'np.asarray', (['event_indices'], {}), '(event_indices)\n', (3138, 3153), True, 'import numpy as np\n'), ((1337, 1353), 'numpy.hanning', 'np.hanning', (['nfft'], {}), '(nfft)\n', (1347, 1353), True, 'import numpy as np\n'), ((1702, 1714), 'numpy.abs', 'np.abs', (['spec'], {}), '(spec)\n', (1708, 1714), True, 'import numpy as np\n'), ((4523, 4533), 'numpy.log2', 'np.log2', (['P'], {}), '(P)\n', (4530, 4533), True, 'import numpy as np\n'), ((4570, 4598), 'numpy.log2', 'np.log2', (['((1 - P) / (N - 1.0))'], {}), '((1 - P) / (N - 1.0))\n', (4577, 4598), True, 'import numpy as np\n'), ((369, 388), 'numpy.arange', 'np.arange', (['nwindows'], {}), '(nwindows)\n', (378, 388), True, 'import numpy as np\n'), ((1847, 1863), 'numpy.hanning', 'np.hanning', (['nfft'], {}), '(nfft)\n', (1857, 1863), True, 'import numpy as np\n'), ((3572, 3620), 'logging.getLogger', 'logging.getLogger', (['"""psychic.utils.find_segments"""'], {}), "('psychic.utils.find_segments')\n", (3589, 3620), False, 'import logging\n'), ((2838, 2855), 'numpy.diff', 'np.diff', (['d.ids[0]'], {}), '(d.ids[0])\n', (2845, 2855), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
from GPfates import GPfates
etpm = pd.read_table('tapio_tcell_tpm.txt', index_col=0)
etpm = etpm[(etpm > 2).sum(1) > 2]
logexp = np.log10(etpm + 1)
tcells = pd.read_csv('tcells_rebuttal.csv', index_col=0)
m = GPfates.GPfates(tcells, logexp)
# m.dimensionality_reduction()
#
# m.store_dr()
#
# m.infer_pseudotime(priors=m.s.day_int, s_columns=['bgplvm_0', 'bgplvm_1'])
# m.infer_pseudotime(priors=m.s.day_int, s_columns=['bgplvm_2d_0', 'bgplvm_2d_1'])
# GPfates.plt.scatter(m.s.scaled_pseudotime, m.s.pseudotime); GPfates.plt.show()
# m.model_fates(X=['bgplvm_1'])
m.model_fates(X=['bgplvm_2d_1'])
# p = m.identify_bifurcation_point()
# print(p)
# m.calculate_bifurcation_statistics()
# m.fate_model.plot(); GPfates.plt.show()
m.make_fates_viz(['bgplvm_2d_0', 'bgplvm_2d_1'])
m.fates_viz.plot(); GPfates.plt.show()
|
[
"pandas.read_csv",
"GPfates.GPfates.GPfates",
"GPfates.GPfates.plt.show",
"pandas.read_table",
"numpy.log10"
] |
[((76, 125), 'pandas.read_table', 'pd.read_table', (['"""tapio_tcell_tpm.txt"""'], {'index_col': '(0)'}), "('tapio_tcell_tpm.txt', index_col=0)\n", (89, 125), True, 'import pandas as pd\n'), ((170, 188), 'numpy.log10', 'np.log10', (['(etpm + 1)'], {}), '(etpm + 1)\n', (178, 188), True, 'import numpy as np\n'), ((199, 246), 'pandas.read_csv', 'pd.read_csv', (['"""tcells_rebuttal.csv"""'], {'index_col': '(0)'}), "('tcells_rebuttal.csv', index_col=0)\n", (210, 246), True, 'import pandas as pd\n'), ((252, 283), 'GPfates.GPfates.GPfates', 'GPfates.GPfates', (['tcells', 'logexp'], {}), '(tcells, logexp)\n', (267, 283), False, 'from GPfates import GPfates\n'), ((849, 867), 'GPfates.GPfates.plt.show', 'GPfates.plt.show', ([], {}), '()\n', (865, 867), False, 'from GPfates import GPfates\n')]
|
# $Id$
#
# Copyright (C) 2003 <NAME> and Rational Discovery LLC
# All Rights Reserved
#
""" functionality to allow adjusting composite model contents
"""
from __future__ import print_function
import copy
import numpy
def BalanceComposite(model, set1, set2, weight, targetSize, names1=None, names2=None):
""" adjusts the contents of the composite model so as to maximize
the weighted classification accuracty across the two data sets.
The resulting composite model, with _targetSize_ models, is returned.
**Notes**:
- if _names1_ and _names2_ are not provided, _set1_ and _set2_ should
have the same ordering of columns and _model_ should have already
have had _SetInputOrder()_ called.
"""
#
# adjust the weights to be proportional to the size of the two data sets
# The normalization we do here assures that a perfect model contributes
# a score of S1+S2 to the final
#
S1 = len(set1)
S2 = len(set2)
weight1 = float(S1 + S2) * (1 - weight) / S1
weight2 = float(S1 + S2) * weight / S2
# print('\t:::', S1, S2, weight1, weight2)
# print('nModels:', len(model))
# start with a copy so that we get all the additional schnick-schnack
res = copy.copy(model)
res.modelList = []
res.errList = []
res.countList = []
res.quantizationRequirements = []
startSize = len(model)
scores = numpy.zeros(startSize, numpy.float)
actQuantBounds = model.GetActivityQuantBounds()
if names1 is not None:
model.SetInputOrder(names1)
for pt in set1:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight1
if names2 is not None:
model.SetInputOrder(names2)
for pt in set2:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight2
# normalize the scores
nPts = S1 + S2
scores /= nPts
# sort them:
bestOrder = list(numpy.argsort(scores))
bestOrder.reverse()
print('\tTAKE:', bestOrder[:targetSize])
# and now take the best set:
for i in range(targetSize):
idx = bestOrder[i]
mdl = model.modelList[idx]
res.modelList.append(mdl)
res.errList.append(1. - scores[idx])
res.countList.append(1)
# FIX: this should probably be more general:
res.quantizationRequirements.append(0)
return res
|
[
"numpy.zeros",
"numpy.argsort",
"copy.copy"
] |
[((1217, 1233), 'copy.copy', 'copy.copy', (['model'], {}), '(model)\n', (1226, 1233), False, 'import copy\n'), ((1368, 1403), 'numpy.zeros', 'numpy.zeros', (['startSize', 'numpy.float'], {}), '(startSize, numpy.float)\n', (1379, 1403), False, 'import numpy\n'), ((2215, 2236), 'numpy.argsort', 'numpy.argsort', (['scores'], {}), '(scores)\n', (2228, 2236), False, 'import numpy\n')]
|
'''
Reference: <NAME>, et al., "IRGAN: A Minimax Game for Unifying Generative and
Discriminative Information Retrieval Models." SIGIR 2017.
@author: <NAME>
'''
from neurec.model.AbstractRecommender import AbstractRecommender
import tensorflow as tf
import pickle
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from neurec.util import data_gen, reader
from neurec.evaluation import Evaluate
from neurec.util.properties import Properties
class GEN(object):
def __init__(self, itemNum, userNum, emb_dim, lamda, param=None, initdelta=0.05, learning_rate=0.05):
self.itemNum = itemNum
self.userNum = userNum
self.emb_dim = emb_dim
self.lamda = lamda # regularization parameters
self.param = param
self.initdelta = initdelta
self.learning_rate = learning_rate
self.g_params = []
with tf.variable_scope('generator'):
if self.param == None:
self.user_embeddings = tf.Variable(
tf.random_uniform([self.userNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_embeddings = tf.Variable(
tf.random_uniform([self.itemNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_bias = tf.Variable(tf.zeros([self.itemNum]))
else:
self.user_embeddings = tf.Variable(self.param[0])
self.item_embeddings = tf.Variable(self.param[1])
self.item_bias = tf.Variable(param[2])
self.g_params = [self.user_embeddings, self.item_embeddings, self.item_bias]
self.u = tf.placeholder(tf.int32)
self.i = tf.placeholder(tf.int32)
self.reward = tf.placeholder(tf.float32)
self.u_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.u)
self.i_embedding = tf.nn.embedding_lookup(self.item_embeddings, self.i)
self.i_bias = tf.gather(self.item_bias, self.i)
self.all_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
self.i_prob = tf.gather(
tf.reshape(tf.nn.softmax(tf.reshape(self.all_logits, [1, -1])), [-1]),
self.i)
self.gan_loss = -tf.reduce_mean(tf.log(self.i_prob) * self.reward) + self.lamda * (
tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding) + tf.nn.l2_loss(self.i_bias))
g_opt = tf.train.GradientDescentOptimizer(self.learning_rate)
self.gan_updates = g_opt.minimize(self.gan_loss, var_list=self.g_params)
# for test stage, self.u: [batch_size]
self.all_rating = tf.matmul(self.u_embedding, self.item_embeddings, transpose_a=False,
transpose_b=True) + self.item_bias
class DIS(object):
def __init__(self, itemNum, userNum, emb_dim, lamda, param=None, initdelta=0.05, learning_rate=0.05):
self.itemNum = itemNum
self.userNum = userNum
self.emb_dim = emb_dim
self.lamda = lamda # regularization parameters
self.param = param
self.initdelta = initdelta
self.learning_rate = learning_rate
self.d_params = []
with tf.variable_scope('discriminator'):
if self.param == None:
self.user_embeddings = tf.Variable(
tf.random_uniform([self.userNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_embeddings = tf.Variable(
tf.random_uniform([self.itemNum, self.emb_dim], minval=-self.initdelta, maxval=self.initdelta,
dtype=tf.float32))
self.item_bias = tf.Variable(tf.zeros([self.itemNum]))
else:
self.user_embeddings = tf.Variable(self.param[0])
self.item_embeddings = tf.Variable(self.param[1])
self.item_bias = tf.Variable(self.param[2])
self.d_params = [self.user_embeddings, self.item_embeddings, self.item_bias]
# placeholder definition
self.u = tf.placeholder(tf.int32)
self.i = tf.placeholder(tf.int32)
self.label = tf.placeholder(tf.float32)
self.u_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.u)
self.i_embedding = tf.nn.embedding_lookup(self.item_embeddings, self.i)
self.i_bias = tf.gather(self.item_bias, self.i)
self.pre_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.i_embedding), 1) + self.i_bias
self.pre_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.label,
logits=self.pre_logits) + self.lamda * (
tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding) + tf.nn.l2_loss(self.i_bias)
)
d_opt = tf.train.GradientDescentOptimizer(self.learning_rate)
self.d_updates = d_opt.minimize(self.pre_loss, var_list=self.d_params)
self.reward_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.i_embedding),
1) + self.i_bias
self.reward = 2 * (tf.sigmoid(self.reward_logits) - 0.5)
# for test stage, self.u: [batch_size]
self.all_rating = tf.matmul(self.u_embedding, self.item_embeddings, transpose_a=False,
transpose_b=True) + self.item_bias
self.all_logits = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
self.NLL = -tf.reduce_mean(tf.log(
tf.gather(tf.reshape(tf.nn.softmax(tf.reshape(self.all_logits, [1, -1])), [-1]), self.i))
)
# for dns sample
self.dns_rating = tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias
class IRGAN(AbstractRecommender):
properties = [
"factors_num",
"lr",
"g_reg",
"d_reg",
"epochs",
"g_epoch",
"d_epoch",
"batch_size",
"d_tau",
"topk",
"pretrain_file"
]
def __init__(self, sess, dataset):
# super(IRGAN, self).__init__()
super().__init__(**kwds)
train_matrix = dataset.trainMatrix.tocsr()
self.num_users, self.num_items = train_matrix.shape
self.factors_num = self.conf["factors_num"]
self.lr = self.conf["lr"]
self.g_reg = self.conf["g_reg"]
self.d_reg = self.conf["d_reg"]
self.epochs = self.conf["epochs"]
self.g_epoch = self.conf["g_epoch"]
self.d_epoch = self.conf["d_epoch"]
self.batch_size = self.conf["batch_size"]
self.d_tau = self.conf["d_tau"]
self.topK = self.conf["topk"]
self.pretrain_file = self.conf["pretrain_file"]
self.loss_function = "None"
idx_value_dict = {}
for idx, value in enumerate(train_matrix):
if any(value.indices):
idx_value_dict[idx] = value.indices
self.user_pos_train = idx_value_dict
self.num_users, self.num_items = dataset.num_users, dataset.num_items
self.all_items = np.arange(self.num_items)
def build_graph(self):
file = reader.lines(self.pretrain_file)
pretrain_params = pickle.load(file, encoding="latin")
self.generator = GEN(self.num_items, self.num_users, self.factors_num, self.g_reg, param=pretrain_params,
learning_rate=self.lr)
self.discriminator = DIS(self.num_items, self.num_users, self.factors_num, self.d_reg, param=None,
learning_rate=self.lr)
def get_train_data(self):
users_list, items_list, labels_list = [], [], []
train_users = list(self.user_pos_train.keys())
with ThreadPoolExecutor() as executor:
data = executor.map(self.get_train_data_one_user, train_users)
data = list(data)
for users, items, labels in data:
users_list.extend(users)
items_list.extend(items)
labels_list.extend(labels)
return users_list, items_list, labels_list
def get_train_data_one_user(self, user):
user_list, items_list, label_list = [], [], []
pos = self.user_pos_train[user]
rating = self.sess.run(self.generator.all_rating, {self.generator.u: [user]})
rating = np.reshape(rating, [-1])
rating = np.array(rating) / self.d_tau # Temperature
exp_rating = np.exp(rating)
prob = exp_rating / np.sum(exp_rating)
neg = np.random.choice(self.all_items, size=len(pos), p=prob)
for i, j in zip(pos, neg):
user_list.append(user)
items_list.append(i)
label_list.append(1.0)
user_list.append(user)
items_list.append(j)
label_list.append(0.0)
return (user_list, items_list, label_list)
def train_model(self):
for _ in range(self.epochs):
for _ in range(self.d_epoch):
users_list, items_list, labels_list = self.get_train_data()
self.training_discriminator(users_list, items_list, labels_list)
for _ in range(self.g_epoch):
self.training_generator()
Evaluate.test_model(self, self.dataset)
def training_discriminator(self, user, item, label):
num_training_instances = len(user)
for num_batch in np.arange(int(num_training_instances / self.batch_size)):
bat_users, bat_items, bat_lables = \
data_gen._get_pointwise_batch_data(user, item, label, num_batch, self.batch_size)
feed = {self.discriminator.u: bat_users,
self.discriminator.i: bat_items,
self.discriminator.label: bat_lables}
self.sess.run(self.discriminator.d_updates, feed_dict=feed)
def training_generator(self):
for user, pos in self.user_pos_train.items():
sample_lambda = 0.2
rating = self.sess.run(self.generator.all_logits, {self.generator.u: user})
exp_rating = np.exp(rating)
prob = exp_rating / np.sum(exp_rating) # prob is generator distribution p_\theta
pn = (1 - sample_lambda) * prob
pn[pos] += sample_lambda * 1.0 / len(pos)
# Now, pn is the Pn in importance sampling, prob is generator distribution p_\theta
sample = np.random.choice(self.all_items, 2 * len(pos), p=pn)
###########################################################################
# Get reward and adapt it with importance sampling
###########################################################################
feed = {self.discriminator.u: user, self.discriminator.i: sample}
reward = self.sess.run(self.discriminator.reward, feed_dict=feed)
reward = reward * prob[sample] / pn[sample]
###########################################################################
# Update G
###########################################################################
feed = {self.generator.u: user, self.generator.i: sample, self.generator.reward: reward}
self.sess.run(self.generator.gan_updates, feed_dict=feed)
def predict(self, user_id, items):
user_embedding, item_embedding, item_bias = self.sess.run(self.generator.g_params)
u_embedding = user_embedding[user_id]
item_embedding = item_embedding[items]
item_bias = item_bias[items]
ratings = np.matmul(u_embedding, item_embedding.T) + item_bias
return ratings
|
[
"numpy.sum",
"tensorflow.reshape",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.multiply",
"tensorflow.matmul",
"pickle.load",
"numpy.arange",
"numpy.exp",
"tensorflow.Variable",
"neurec.util.data_gen._get_pointwise_batch_data",
"tensorflow.gather",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"numpy.reshape",
"neurec.evaluation.Evaluate.test_model",
"concurrent.futures.ThreadPoolExecutor",
"tensorflow.nn.embedding_lookup",
"tensorflow.log",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.random_uniform",
"neurec.util.reader.lines",
"tensorflow.zeros",
"numpy.array",
"numpy.matmul",
"tensorflow.nn.l2_loss",
"tensorflow.sigmoid"
] |
[((1778, 1802), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (1792, 1802), True, 'import tensorflow as tf\n'), ((1820, 1844), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (1834, 1844), True, 'import tensorflow as tf\n'), ((1867, 1893), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1881, 1893), True, 'import tensorflow as tf\n'), ((1922, 1974), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.user_embeddings', 'self.u'], {}), '(self.user_embeddings, self.u)\n', (1944, 1974), True, 'import tensorflow as tf\n'), ((2002, 2054), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.item_embeddings', 'self.i'], {}), '(self.item_embeddings, self.i)\n', (2024, 2054), True, 'import tensorflow as tf\n'), ((2077, 2110), 'tensorflow.gather', 'tf.gather', (['self.item_bias', 'self.i'], {}), '(self.item_bias, self.i)\n', (2086, 2110), True, 'import tensorflow as tf\n'), ((2579, 2632), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (2612, 2632), True, 'import tensorflow as tf\n'), ((4287, 4311), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (4301, 4311), True, 'import tensorflow as tf\n'), ((4329, 4353), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (4343, 4353), True, 'import tensorflow as tf\n'), ((4375, 4401), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4389, 4401), True, 'import tensorflow as tf\n'), ((4430, 4482), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.user_embeddings', 'self.u'], {}), '(self.user_embeddings, self.u)\n', (4452, 4482), True, 'import tensorflow as tf\n'), ((4510, 4562), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.item_embeddings', 'self.i'], {}), '(self.item_embeddings, self.i)\n', (4532, 4562), True, 'import tensorflow as tf\n'), ((4585, 4618), 'tensorflow.gather', 'tf.gather', (['self.item_bias', 'self.i'], {}), '(self.item_bias, self.i)\n', (4594, 4618), True, 'import tensorflow as tf\n'), ((5048, 5101), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (5081, 5101), True, 'import tensorflow as tf\n'), ((7348, 7373), 'numpy.arange', 'np.arange', (['self.num_items'], {}), '(self.num_items)\n', (7357, 7373), True, 'import numpy as np\n'), ((7417, 7449), 'neurec.util.reader.lines', 'reader.lines', (['self.pretrain_file'], {}), '(self.pretrain_file)\n', (7429, 7449), False, 'from neurec.util import data_gen, reader\n'), ((7476, 7511), 'pickle.load', 'pickle.load', (['file'], {'encoding': '"""latin"""'}), "(file, encoding='latin')\n", (7487, 7511), False, 'import pickle\n'), ((8584, 8608), 'numpy.reshape', 'np.reshape', (['rating', '[-1]'], {}), '(rating, [-1])\n', (8594, 8608), True, 'import numpy as np\n'), ((8692, 8706), 'numpy.exp', 'np.exp', (['rating'], {}), '(rating)\n', (8698, 8706), True, 'import numpy as np\n'), ((879, 909), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""generator"""'], {}), "('generator')\n", (896, 909), True, 'import tensorflow as tf\n'), ((2788, 2878), 'tensorflow.matmul', 'tf.matmul', (['self.u_embedding', 'self.item_embeddings'], {'transpose_a': '(False)', 'transpose_b': '(True)'}), '(self.u_embedding, self.item_embeddings, transpose_a=False,\n transpose_b=True)\n', (2797, 2878), True, 'import tensorflow as tf\n'), ((3350, 3384), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""discriminator"""'], {}), "('discriminator')\n", (3367, 3384), True, 'import tensorflow as tf\n'), ((4750, 4837), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'self.label', 'logits': 'self.pre_logits'}), '(labels=self.label, logits=self.\n pre_logits)\n', (4789, 4837), True, 'import tensorflow as tf\n'), ((5473, 5563), 'tensorflow.matmul', 'tf.matmul', (['self.u_embedding', 'self.item_embeddings'], {'transpose_a': '(False)', 'transpose_b': '(True)'}), '(self.u_embedding, self.item_embeddings, transpose_a=False,\n transpose_b=True)\n', (5482, 5563), True, 'import tensorflow as tf\n'), ((7997, 8017), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (8015, 8017), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((8626, 8642), 'numpy.array', 'np.array', (['rating'], {}), '(rating)\n', (8634, 8642), True, 'import numpy as np\n'), ((8735, 8753), 'numpy.sum', 'np.sum', (['exp_rating'], {}), '(exp_rating)\n', (8741, 8753), True, 'import numpy as np\n'), ((9770, 9856), 'neurec.util.data_gen._get_pointwise_batch_data', 'data_gen._get_pointwise_batch_data', (['user', 'item', 'label', 'num_batch', 'self.batch_size'], {}), '(user, item, label, num_batch, self.\n batch_size)\n', (9804, 9856), False, 'from neurec.util import data_gen, reader\n'), ((10323, 10337), 'numpy.exp', 'np.exp', (['rating'], {}), '(rating)\n', (10329, 10337), True, 'import numpy as np\n'), ((11803, 11843), 'numpy.matmul', 'np.matmul', (['u_embedding', 'item_embedding.T'], {}), '(u_embedding, item_embedding.T)\n', (11812, 11843), True, 'import numpy as np\n'), ((1522, 1548), 'tensorflow.Variable', 'tf.Variable', (['self.param[0]'], {}), '(self.param[0])\n', (1533, 1548), True, 'import tensorflow as tf\n'), ((1588, 1614), 'tensorflow.Variable', 'tf.Variable', (['self.param[1]'], {}), '(self.param[1])\n', (1599, 1614), True, 'import tensorflow as tf\n'), ((1648, 1669), 'tensorflow.Variable', 'tf.Variable', (['param[2]'], {}), '(param[2])\n', (1659, 1669), True, 'import tensorflow as tf\n'), ((2152, 2203), 'tensorflow.multiply', 'tf.multiply', (['self.u_embedding', 'self.item_embeddings'], {}), '(self.u_embedding, self.item_embeddings)\n', (2163, 2203), True, 'import tensorflow as tf\n'), ((3997, 4023), 'tensorflow.Variable', 'tf.Variable', (['self.param[0]'], {}), '(self.param[0])\n', (4008, 4023), True, 'import tensorflow as tf\n'), ((4063, 4089), 'tensorflow.Variable', 'tf.Variable', (['self.param[1]'], {}), '(self.param[1])\n', (4074, 4089), True, 'import tensorflow as tf\n'), ((4123, 4149), 'tensorflow.Variable', 'tf.Variable', (['self.param[2]'], {}), '(self.param[2])\n', (4134, 4149), True, 'import tensorflow as tf\n'), ((4660, 4707), 'tensorflow.multiply', 'tf.multiply', (['self.u_embedding', 'self.i_embedding'], {}), '(self.u_embedding, self.i_embedding)\n', (4671, 4707), True, 'import tensorflow as tf\n'), ((5225, 5272), 'tensorflow.multiply', 'tf.multiply', (['self.u_embedding', 'self.i_embedding'], {}), '(self.u_embedding, self.i_embedding)\n', (5236, 5272), True, 'import tensorflow as tf\n'), ((5361, 5391), 'tensorflow.sigmoid', 'tf.sigmoid', (['self.reward_logits'], {}), '(self.reward_logits)\n', (5371, 5391), True, 'import tensorflow as tf\n'), ((5654, 5705), 'tensorflow.multiply', 'tf.multiply', (['self.u_embedding', 'self.item_embeddings'], {}), '(self.u_embedding, self.item_embeddings)\n', (5665, 5705), True, 'import tensorflow as tf\n'), ((5947, 5998), 'tensorflow.multiply', 'tf.multiply', (['self.u_embedding', 'self.item_embeddings'], {}), '(self.u_embedding, self.item_embeddings)\n', (5958, 5998), True, 'import tensorflow as tf\n'), ((9481, 9520), 'neurec.evaluation.Evaluate.test_model', 'Evaluate.test_model', (['self', 'self.dataset'], {}), '(self, self.dataset)\n', (9500, 9520), False, 'from neurec.evaluation import Evaluate\n'), ((10370, 10388), 'numpy.sum', 'np.sum', (['exp_rating'], {}), '(exp_rating)\n', (10376, 10388), True, 'import numpy as np\n'), ((1018, 1134), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.userNum, self.emb_dim]'], {'minval': '(-self.initdelta)', 'maxval': 'self.initdelta', 'dtype': 'tf.float32'}), '([self.userNum, self.emb_dim], minval=-self.initdelta,\n maxval=self.initdelta, dtype=tf.float32)\n', (1035, 1134), True, 'import tensorflow as tf\n'), ((1242, 1358), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.itemNum, self.emb_dim]'], {'minval': '(-self.initdelta)', 'maxval': 'self.initdelta', 'dtype': 'tf.float32'}), '([self.itemNum, self.emb_dim], minval=-self.initdelta,\n maxval=self.initdelta, dtype=tf.float32)\n', (1259, 1358), True, 'import tensorflow as tf\n'), ((1439, 1463), 'tensorflow.zeros', 'tf.zeros', (['[self.itemNum]'], {}), '([self.itemNum])\n', (1447, 1463), True, 'import tensorflow as tf\n'), ((2295, 2331), 'tensorflow.reshape', 'tf.reshape', (['self.all_logits', '[1, -1]'], {}), '(self.all_logits, [1, -1])\n', (2305, 2331), True, 'import tensorflow as tf\n'), ((2534, 2560), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.i_bias'], {}), '(self.i_bias)\n', (2547, 2560), True, 'import tensorflow as tf\n'), ((3493, 3609), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.userNum, self.emb_dim]'], {'minval': '(-self.initdelta)', 'maxval': 'self.initdelta', 'dtype': 'tf.float32'}), '([self.userNum, self.emb_dim], minval=-self.initdelta,\n maxval=self.initdelta, dtype=tf.float32)\n', (3510, 3609), True, 'import tensorflow as tf\n'), ((3717, 3833), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.itemNum, self.emb_dim]'], {'minval': '(-self.initdelta)', 'maxval': 'self.initdelta', 'dtype': 'tf.float32'}), '([self.itemNum, self.emb_dim], minval=-self.initdelta,\n maxval=self.initdelta, dtype=tf.float32)\n', (3734, 3833), True, 'import tensorflow as tf\n'), ((3914, 3938), 'tensorflow.zeros', 'tf.zeros', (['[self.itemNum]'], {}), '([self.itemNum])\n', (3922, 3938), True, 'import tensorflow as tf\n'), ((4994, 5020), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.i_bias'], {}), '(self.i_bias)\n', (5007, 5020), True, 'import tensorflow as tf\n'), ((2402, 2421), 'tensorflow.log', 'tf.log', (['self.i_prob'], {}), '(self.i_prob)\n', (2408, 2421), True, 'import tensorflow as tf\n'), ((2466, 2497), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.u_embedding'], {}), '(self.u_embedding)\n', (2479, 2497), True, 'import tensorflow as tf\n'), ((2500, 2531), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.i_embedding'], {}), '(self.i_embedding)\n', (2513, 2531), True, 'import tensorflow as tf\n'), ((4926, 4957), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.u_embedding'], {}), '(self.u_embedding)\n', (4939, 4957), True, 'import tensorflow as tf\n'), ((4960, 4991), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.i_embedding'], {}), '(self.i_embedding)\n', (4973, 4991), True, 'import tensorflow as tf\n'), ((5817, 5853), 'tensorflow.reshape', 'tf.reshape', (['self.all_logits', '[1, -1]'], {}), '(self.all_logits, [1, -1])\n', (5827, 5853), True, 'import tensorflow as tf\n')]
|
import tensorflow as tf
from tensorflow import keras
from utils import data_utils, argmanager
from utils.loss import multinomial_nll
import numpy as np
import os
import json
import scipy
import sklearn.metrics
import scipy.stats
from collections import OrderedDict
def softmax(x, temp=1):
norm_x = x - np.mean(x,axis=1, keepdims=True)
return np.exp(temp*norm_x)/np.sum(np.exp(temp*norm_x), axis=1, keepdims=True)
def get_jsd(preds, cts, min_tot_cts=10):
return np.array([scipy.spatial.distance.jensenshannon(x,y) for x,y in zip(preds, cts) \
if y.sum()>min_tot_cts])
def main():
args = argmanager.fetch_metrics_args()
print(args)
# load model
with keras.utils.CustomObjectScope({'multinomial_nll':multinomial_nll, 'tf':tf}):
model = keras.models.load_model(args.model)
inputlen = int(model.input_shape[1])
outputlen = int(model.output_shape[0][1])
# load data
test_peaks_seqs, test_peaks_cts, \
test_nonpeaks_seqs, test_nonpeaks_cts = data_utils.load_test_data(
args.peaks, args.nonpeaks, args.genome, args.bigwig,
args.test_chr, inputlen, outputlen
)
# predict on peaks and nonpeaks
test_peaks_pred_logits, test_peaks_pred_logcts = \
model.predict(test_peaks_seqs,
batch_size=args.batch_size,
verbose=True)
test_nonpeaks_pred_logits, test_nonpeaks_pred_logcts = \
model.predict(test_nonpeaks_seqs,
batch_size=args.batch_size,
verbose=True)
metrics = OrderedDict()
# counts metrics
all_test_logcts = np.log(1 + np.vstack([test_peaks_cts, test_nonpeaks_cts]).sum(-1))
cur_pair = (all_test_logcts,
np.vstack([test_peaks_pred_logcts,
test_nonpeaks_pred_logcts]).ravel())
metrics['bpnet_cts_pearson_peaks_nonpeaks'] = scipy.stats.pearsonr(*cur_pair)[0]
metrics['bpnet_cts_spearman_peaks_nonpeaks'] = scipy.stats.spearmanr(*cur_pair)[0]
cur_pair = ([1]*len(test_peaks_pred_logcts) + [0]*len(test_nonpeaks_pred_logcts),
np.vstack([test_peaks_pred_logcts,
test_nonpeaks_pred_logcts]).ravel())
metrics['binary_auc'] = sklearn.metrics.roc_auc_score(*cur_pair)
peaks_test_logcts = np.log(1 + test_peaks_cts.sum(-1))
cur_pair = (peaks_test_logcts, test_peaks_pred_logcts.ravel())
metrics['bpnet_cts_pearson_peaks'] = scipy.stats.pearsonr(*cur_pair)[0]
metrics['bpnet_cts_spearman_peaks'] = scipy.stats.spearmanr(*cur_pair)[0]
# profile metrics (all within peaks)
cur_pair = (softmax(test_peaks_pred_logits), test_peaks_cts)
metrics['bpnet_profile_median_jsd_peaks'] = np.median(get_jsd(*cur_pair))
cur_pair = (softmax(test_peaks_pred_logits),
test_peaks_cts[:, np.random.permutation(test_peaks_cts.shape[1])])
metrics['bpnet_profile_median_jsd_peaks_randomized'] = np.median(get_jsd(*cur_pair))
with open(args.output_prefix + ".metrics.json", "w") as f:
json.dump(metrics, f, ensure_ascii=False, indent=4)
if __name__=="__main__":
main()
|
[
"json.dump",
"tensorflow.keras.utils.CustomObjectScope",
"tensorflow.keras.models.load_model",
"scipy.spatial.distance.jensenshannon",
"utils.argmanager.fetch_metrics_args",
"scipy.stats.spearmanr",
"scipy.stats.pearsonr",
"numpy.mean",
"numpy.exp",
"utils.data_utils.load_test_data",
"numpy.random.permutation",
"collections.OrderedDict",
"numpy.vstack"
] |
[((629, 660), 'utils.argmanager.fetch_metrics_args', 'argmanager.fetch_metrics_args', ([], {}), '()\n', (658, 660), False, 'from utils import data_utils, argmanager\n'), ((1020, 1139), 'utils.data_utils.load_test_data', 'data_utils.load_test_data', (['args.peaks', 'args.nonpeaks', 'args.genome', 'args.bigwig', 'args.test_chr', 'inputlen', 'outputlen'], {}), '(args.peaks, args.nonpeaks, args.genome, args.\n bigwig, args.test_chr, inputlen, outputlen)\n', (1045, 1139), False, 'from utils import data_utils, argmanager\n'), ((1667, 1680), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1678, 1680), False, 'from collections import OrderedDict\n'), ((308, 341), 'numpy.mean', 'np.mean', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (315, 341), True, 'import numpy as np\n'), ((352, 373), 'numpy.exp', 'np.exp', (['(temp * norm_x)'], {}), '(temp * norm_x)\n', (358, 373), True, 'import numpy as np\n'), ((704, 781), 'tensorflow.keras.utils.CustomObjectScope', 'keras.utils.CustomObjectScope', (["{'multinomial_nll': multinomial_nll, 'tf': tf}"], {}), "({'multinomial_nll': multinomial_nll, 'tf': tf})\n", (733, 781), False, 'from tensorflow import keras\n'), ((797, 832), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['args.model'], {}), '(args.model)\n', (820, 832), False, 'from tensorflow import keras\n'), ((1990, 2021), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['*cur_pair'], {}), '(*cur_pair)\n', (2010, 2021), False, 'import scipy\n'), ((2076, 2108), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['*cur_pair'], {}), '(*cur_pair)\n', (2097, 2108), False, 'import scipy\n'), ((2581, 2612), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['*cur_pair'], {}), '(*cur_pair)\n', (2601, 2612), False, 'import scipy\n'), ((2658, 2690), 'scipy.stats.spearmanr', 'scipy.stats.spearmanr', (['*cur_pair'], {}), '(*cur_pair)\n', (2679, 2690), False, 'import scipy\n'), ((3175, 3226), 'json.dump', 'json.dump', (['metrics', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(metrics, f, ensure_ascii=False, indent=4)\n', (3184, 3226), False, 'import json\n'), ((379, 400), 'numpy.exp', 'np.exp', (['(temp * norm_x)'], {}), '(temp * norm_x)\n', (385, 400), True, 'import numpy as np\n'), ((487, 529), 'scipy.spatial.distance.jensenshannon', 'scipy.spatial.distance.jensenshannon', (['x', 'y'], {}), '(x, y)\n', (523, 529), False, 'import scipy\n'), ((1841, 1903), 'numpy.vstack', 'np.vstack', (['[test_peaks_pred_logcts, test_nonpeaks_pred_logcts]'], {}), '([test_peaks_pred_logcts, test_nonpeaks_pred_logcts])\n', (1850, 1903), True, 'import numpy as np\n'), ((2217, 2279), 'numpy.vstack', 'np.vstack', (['[test_peaks_pred_logcts, test_nonpeaks_pred_logcts]'], {}), '([test_peaks_pred_logcts, test_nonpeaks_pred_logcts])\n', (2226, 2279), True, 'import numpy as np\n'), ((2965, 3011), 'numpy.random.permutation', 'np.random.permutation', (['test_peaks_cts.shape[1]'], {}), '(test_peaks_cts.shape[1])\n', (2986, 3011), True, 'import numpy as np\n'), ((1736, 1782), 'numpy.vstack', 'np.vstack', (['[test_peaks_cts, test_nonpeaks_cts]'], {}), '([test_peaks_cts, test_nonpeaks_cts])\n', (1745, 1782), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""
"""
import numpy as np
from scipy.odr import Model
from scipy.optimize import leastsq
from scipy import ndimage
from scipy.ndimage import gaussian_gradient_magnitude
from scipy.ndimage import map_coordinates
from common import PIX_ERR
from features import line_profile
def contour(img, A0, R0, phi1=-np.pi/2, phi2=np.pi/2, dphi=np.pi/180, DR=0.2,
sigma=3):
#this is just a rough draft not intended to be working
y0, x0 = A0
phi = np.arange(phi1, phi2, dphi)
x1 = x0+R0*(1-DR)*np.cos(phi)
y1 = y0+R0*(1-DR)*np.sin(phi)
x2 = x0+R0*(1+DR)*np.cos(phi)
y2 = y0+R0*(1+DR)*np.sin(phi)
rim=[]
Nphi, = phi.shape
for i in range(Nphi):
A1 = np.asarray(((y1[i],x1[i]),(PIX_ERR, PIX_ERR)))
A2 = np.asarray(((y2[i],x2[i]),(PIX_ERR, PIX_ERR)))
metrics, metrics_err, profile = line_profile(img, A1[i], A2[i])
rel_rim = find_rim(profile, sigma)*metrics
real_rim = A1 + rel_rim
rim.append(real_rim)
return rim
def find_rim(profile, sigma=3):
grad = ndimage.gaussian_gradient_magnitude(
ndimage.gaussian_filter1d(profile,sigma) , sigma)
return np.argmax(grad)
def line_from_points(point1, point2):
"""
@param point1: array in numpy order = (y,x)
@param point2:
"""
k = (point2 - point1)[0] / (point2 - point1)[1]
b = point1[0] - k * point1[1]
return k, b
def line_perpendicular(k,b,x):
"""
@param k: y=kx+b
@param b: y=kx+b
@param x: where the perpendicular has to intersect the line
"""
# y = k*x+b
k_perp = -1./k
b_perp = (k - k_perp) * x + b
return k_perp, b_perp
def circle_fcn(B, x, y):
return B[0]**2 - (B[1]-x)**2 - (B[2]-y)**2
def _circle_fjacb(B,x,y):
fjacb = np.empty((x.shape[0],3))
fjacb[:,0] = 2*B[0]
fjacb[:,1] = -2*(B[1]-x)
fjacb[:,2] = -2*(B[2]-y)
return fjacb
def _circle_fjacd(B,x,y):
fjacd = np.empty((x.shape[0],2))
fjacd[:,0] = 2*(B[1]-x)
fjacd[:,1] = 2*(B[1]-y)
return fjacd
def _circle_est(x,y):
return np.mean((x.ptp(), y.ptp()))/2.0, x.mean(), y.mean()
def _circle_meta():
return {'name':'Equation of a circle'}
circle_model = Model(circle_fcn, estimate=_circle_est,
fjacb=_circle_fjacb, fjacd=_circle_fjacd,
meta=_circle_meta, implicit=True)
def FitCircle(x,y):
'''
leastsq without errors
'''
return leastsq(circle_fcn, _circle_est(x,y), (x, y), Dfun=_circle_fjacb, full_output=1)
def section_profile(img, point1, point2):
'''define the brightness profile along the line defined by 2 points
coordinates of points with their errors are supplied as numpy arrays
in notation array((y,x),(dy,dx))!
might as well submit other options to map_coordinates function
it is assumed that pipette is more or less horizontal
so that axis intersects left and right image sides
'''
# define the line going though 2 points
y1,x1,dy1,dx1 = point1.flatten()
y2,x2,dy2,dx2 = point2.flatten()
k = (y2 - y1) / (x2 - x1)
dk = np.sqrt(dy1*dy1 + dy2*dy2 + k*k*(dx1*dx1+dx2*dx2) )/np.fabs(x2-x1)
# number of points for profile
# it is assumed that pipette is more or less horizontal
# so that axis intersects left and right image sides
nPoints = int(max(np.fabs(y2-y1), np.fabs(x2-x1)))
#coordinates of points in the profile
x = np.linspace(x1, x2, nPoints)
y = np.linspace(y1, y2, nPoints)
#calculate profile metric - coefficient for lengths in profile vs pixels
if np.fabs(k) <=1:
metric = np.sqrt(1 + k*k)
metric_err = np.fabs(k)*dk/metric
else:
metric = np.sqrt(1 + 1/(k*k))
metric_err = dk/np.fabs(metric * k*k*k)
#output interpolated values at points of profile and profile metric
return metric, metric_err, map_coordinates(img, [y, x], output = float)
def CircleFunc(r, N=100):
phi = np.linspace(0,2*np.pi,N)
return r*np.cos(phi), r*np.sin(phi)
def VesicleEdge_phc(img, x0, y0, r0, N=100, phi1=0, phi2=2*np.pi, sigma=1):
Xedge = np.empty(N)
Yedge = np.empty(N)
for i, phi in enumerate(np.linspace(phi1, phi2, N)):
x = x0+r0*np.cos(phi)
y = y0+r0*np.sin(phi)
if x < 0:
x = 0
y = y0+(x-x0)*np.tan(phi)
elif x > img.shape[1]-1:
x = img.shape[1]-1
y = y0+(x-x0)*np.tan(phi)
if y < 0:
y = 0
x = x0+(y-y0)/np.tan(phi)
elif y > img.shape[0]-1:
y = img.shape[1]-1
x = x0+(y-y0)/np.tan(phi)
point1 = np.asarray(((y0,x0),(PIX_ERR, PIX_ERR)))
point2 = np.asarray(((y,x),(PIX_ERR, PIX_ERR)))
metric, metric_err, line = section_profile(img, point1, point2)
grad = gaussian_gradient_magnitude(line,sigma)
pos = np.argmax(grad)
Xedge[i] = x0+pos*np.cos(phi)*metric
Yedge[i] = y0+pos*np.sin(phi)*metric
return Xedge, Yedge
|
[
"scipy.ndimage.gaussian_filter1d",
"numpy.argmax",
"scipy.odr.Model",
"numpy.empty",
"numpy.asarray",
"features.line_profile",
"scipy.ndimage.gaussian_gradient_magnitude",
"numpy.fabs",
"numpy.arange",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.tan",
"scipy.ndimage.map_coordinates",
"numpy.sqrt"
] |
[((2221, 2341), 'scipy.odr.Model', 'Model', (['circle_fcn'], {'estimate': '_circle_est', 'fjacb': '_circle_fjacb', 'fjacd': '_circle_fjacd', 'meta': '_circle_meta', 'implicit': '(True)'}), '(circle_fcn, estimate=_circle_est, fjacb=_circle_fjacb, fjacd=\n _circle_fjacd, meta=_circle_meta, implicit=True)\n', (2226, 2341), False, 'from scipy.odr import Model\n'), ((479, 506), 'numpy.arange', 'np.arange', (['phi1', 'phi2', 'dphi'], {}), '(phi1, phi2, dphi)\n', (488, 506), True, 'import numpy as np\n'), ((1184, 1199), 'numpy.argmax', 'np.argmax', (['grad'], {}), '(grad)\n', (1193, 1199), True, 'import numpy as np\n'), ((1794, 1819), 'numpy.empty', 'np.empty', (['(x.shape[0], 3)'], {}), '((x.shape[0], 3))\n', (1802, 1819), True, 'import numpy as np\n'), ((1957, 1982), 'numpy.empty', 'np.empty', (['(x.shape[0], 2)'], {}), '((x.shape[0], 2))\n', (1965, 1982), True, 'import numpy as np\n'), ((3442, 3470), 'numpy.linspace', 'np.linspace', (['x1', 'x2', 'nPoints'], {}), '(x1, x2, nPoints)\n', (3453, 3470), True, 'import numpy as np\n'), ((3479, 3507), 'numpy.linspace', 'np.linspace', (['y1', 'y2', 'nPoints'], {}), '(y1, y2, nPoints)\n', (3490, 3507), True, 'import numpy as np\n'), ((3966, 3994), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'N'], {}), '(0, 2 * np.pi, N)\n', (3977, 3994), True, 'import numpy as np\n'), ((4124, 4135), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (4132, 4135), True, 'import numpy as np\n'), ((4148, 4159), 'numpy.empty', 'np.empty', (['N'], {}), '(N)\n', (4156, 4159), True, 'import numpy as np\n'), ((719, 767), 'numpy.asarray', 'np.asarray', (['((y1[i], x1[i]), (PIX_ERR, PIX_ERR))'], {}), '(((y1[i], x1[i]), (PIX_ERR, PIX_ERR)))\n', (729, 767), True, 'import numpy as np\n'), ((779, 827), 'numpy.asarray', 'np.asarray', (['((y2[i], x2[i]), (PIX_ERR, PIX_ERR))'], {}), '(((y2[i], x2[i]), (PIX_ERR, PIX_ERR)))\n', (789, 827), True, 'import numpy as np\n'), ((866, 897), 'features.line_profile', 'line_profile', (['img', 'A1[i]', 'A2[i]'], {}), '(img, A1[i], A2[i])\n', (878, 897), False, 'from features import line_profile\n'), ((1123, 1164), 'scipy.ndimage.gaussian_filter1d', 'ndimage.gaussian_filter1d', (['profile', 'sigma'], {}), '(profile, sigma)\n', (1148, 1164), False, 'from scipy import ndimage\n'), ((3116, 3180), 'numpy.sqrt', 'np.sqrt', (['(dy1 * dy1 + dy2 * dy2 + k * k * (dx1 * dx1 + dx2 * dx2))'], {}), '(dy1 * dy1 + dy2 * dy2 + k * k * (dx1 * dx1 + dx2 * dx2))\n', (3123, 3180), True, 'import numpy as np\n'), ((3168, 3184), 'numpy.fabs', 'np.fabs', (['(x2 - x1)'], {}), '(x2 - x1)\n', (3175, 3184), True, 'import numpy as np\n'), ((3593, 3603), 'numpy.fabs', 'np.fabs', (['k'], {}), '(k)\n', (3600, 3603), True, 'import numpy as np\n'), ((3626, 3644), 'numpy.sqrt', 'np.sqrt', (['(1 + k * k)'], {}), '(1 + k * k)\n', (3633, 3644), True, 'import numpy as np\n'), ((3712, 3736), 'numpy.sqrt', 'np.sqrt', (['(1 + 1 / (k * k))'], {}), '(1 + 1 / (k * k))\n', (3719, 3736), True, 'import numpy as np\n'), ((3884, 3926), 'scipy.ndimage.map_coordinates', 'map_coordinates', (['img', '[y, x]'], {'output': 'float'}), '(img, [y, x], output=float)\n', (3899, 3926), False, 'from scipy.ndimage import map_coordinates\n'), ((4188, 4214), 'numpy.linspace', 'np.linspace', (['phi1', 'phi2', 'N'], {}), '(phi1, phi2, N)\n', (4199, 4214), True, 'import numpy as np\n'), ((4655, 4697), 'numpy.asarray', 'np.asarray', (['((y0, x0), (PIX_ERR, PIX_ERR))'], {}), '(((y0, x0), (PIX_ERR, PIX_ERR)))\n', (4665, 4697), True, 'import numpy as np\n'), ((4713, 4753), 'numpy.asarray', 'np.asarray', (['((y, x), (PIX_ERR, PIX_ERR))'], {}), '(((y, x), (PIX_ERR, PIX_ERR)))\n', (4723, 4753), True, 'import numpy as np\n'), ((4839, 4879), 'scipy.ndimage.gaussian_gradient_magnitude', 'gaussian_gradient_magnitude', (['line', 'sigma'], {}), '(line, sigma)\n', (4866, 4879), False, 'from scipy.ndimage import gaussian_gradient_magnitude\n'), ((4893, 4908), 'numpy.argmax', 'np.argmax', (['grad'], {}), '(grad)\n', (4902, 4908), True, 'import numpy as np\n'), ((530, 541), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (536, 541), True, 'import numpy as np\n'), ((565, 576), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (571, 576), True, 'import numpy as np\n'), ((600, 611), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (606, 611), True, 'import numpy as np\n'), ((635, 646), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (641, 646), True, 'import numpy as np\n'), ((3358, 3374), 'numpy.fabs', 'np.fabs', (['(y2 - y1)'], {}), '(y2 - y1)\n', (3365, 3374), True, 'import numpy as np\n'), ((3374, 3390), 'numpy.fabs', 'np.fabs', (['(x2 - x1)'], {}), '(x2 - x1)\n', (3381, 3390), True, 'import numpy as np\n'), ((3757, 3784), 'numpy.fabs', 'np.fabs', (['(metric * k * k * k)'], {}), '(metric * k * k * k)\n', (3764, 3784), True, 'import numpy as np\n'), ((4004, 4015), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4010, 4015), True, 'import numpy as np\n'), ((4019, 4030), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4025, 4030), True, 'import numpy as np\n'), ((3664, 3674), 'numpy.fabs', 'np.fabs', (['k'], {}), '(k)\n', (3671, 3674), True, 'import numpy as np\n'), ((4235, 4246), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4241, 4246), True, 'import numpy as np\n'), ((4265, 4276), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4271, 4276), True, 'import numpy as np\n'), ((4339, 4350), 'numpy.tan', 'np.tan', (['phi'], {}), '(phi)\n', (4345, 4350), True, 'import numpy as np\n'), ((4515, 4526), 'numpy.tan', 'np.tan', (['phi'], {}), '(phi)\n', (4521, 4526), True, 'import numpy as np\n'), ((4935, 4946), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4941, 4946), True, 'import numpy as np\n'), ((4980, 4991), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (4986, 4991), True, 'import numpy as np\n'), ((4441, 4452), 'numpy.tan', 'np.tan', (['phi'], {}), '(phi)\n', (4447, 4452), True, 'import numpy as np\n'), ((4617, 4628), 'numpy.tan', 'np.tan', (['phi'], {}), '(phi)\n', (4623, 4628), True, 'import numpy as np\n')]
|
import ignore
import tensorflow as tf
import numpy as np
char_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz단어나무놀이소녀키스사랑']
num_dic = {n: i for i, n in enumerate(char_arr)}
dic_len = len(num_dic)
seq_data = [['word', '단어'], ['wood', '나무'],
['game', '놀이'], ['girl', '소녀'],
['kiss', '키스'], ['love', '사랑']]
def make_batch(seq_data):
input_batch = []
output_batch = []
target_batch = []
for seq in seq_data:
input = [num_dic[n] for n in seq[0]]
output = [num_dic[n] for n in ('S' + seq[1])]
target = [num_dic[n] for n in (seq[1] + 'E')]
input_batch.append(np.eye(dic_len)[input])
output_batch.append(np.eye(dic_len)[output])
target_batch.append(target)
return input_batch, output_batch, target_batch
# 옵션 설정
learning_rate = 0.01
n_hidden = 128
total_epoch = 100
n_class = n_input = dic_len
# 신경망 모델 구성
enc_input = tf.placeholder(tf.float32, [None, None, n_input])
dec_input = tf.placeholder(tf.float32, [None, None, n_input])
targets = tf.placeholder(tf.int64, [None, None])
with tf.variable_scope('encode'):
enc_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
enc_cell = tf.nn.rnn_cell.DropoutWrapper(enc_cell, output_keep_prob=0.5)
outputs, enc_states = tf.nn.dynamic_rnn(enc_cell, enc_input, dtype=tf.float32)
with tf.variable_scope('decode'):
dec_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
dec_cell = tf.nn.rnn_cell.DropoutWrapper(dec_cell, output_keep_prob=0.5)
outputs, dec_states = tf.nn.dynamic_rnn(dec_cell, dec_input, initial_state=enc_states, dtype=tf.float32)
model = tf.layers.dense(outputs, n_class, activation=None)
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model, labels=targets))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# 신경망 모델 학습
sess = tf.Session()
sess.run(tf.global_variables_initializer())
input_batch, output_batch, target_batch = make_batch(seq_data)
for epoch in range(total_epoch):
_, loss = sess.run([optimizer, cost], feed_dict={enc_input: input_batch, dec_input: output_batch, targets: target_batch})
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6}'.format(loss))
print('최적화 완료!')
# 번역 테스트
def translate(word):
seq_data = [word, 'P' * len(word)]
input_batch, output_batch, target_batch = make_batch([seq_data])
prediction = tf.argmax(model, 2)
result = sess.run(prediction, feed_dict={enc_input: input_batch, dec_input: output_batch, targets: target_batch})
decoded = [char_arr[i] for i in result[0]]
end = decoded.index('E')
translated = ''.join(decoded[:end])
return translated
print('\n=== 번역 테스트 ===')
print('word ->', translate('word'))
print('wodr ->', translate('wodr'))
print('love ->', translate('love'))
print('loev ->', translate('loev'))
print('abcd ->', translate('abcd'))
|
[
"tensorflow.nn.rnn_cell.BasicRNNCell",
"tensorflow.nn.dynamic_rnn",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.layers.dense",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.placeholder",
"numpy.eye",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
] |
[((912, 961), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, n_input]'], {}), '(tf.float32, [None, None, n_input])\n', (926, 961), True, 'import tensorflow as tf\n'), ((974, 1023), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, n_input]'], {}), '(tf.float32, [None, None, n_input])\n', (988, 1023), True, 'import tensorflow as tf\n'), ((1034, 1072), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None, None]'], {}), '(tf.int64, [None, None])\n', (1048, 1072), True, 'import tensorflow as tf\n'), ((1606, 1656), 'tensorflow.layers.dense', 'tf.layers.dense', (['outputs', 'n_class'], {'activation': 'None'}), '(outputs, n_class, activation=None)\n', (1621, 1656), True, 'import tensorflow as tf\n'), ((1842, 1854), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1852, 1854), True, 'import tensorflow as tf\n'), ((1079, 1106), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encode"""'], {}), "('encode')\n", (1096, 1106), True, 'import tensorflow as tf\n'), ((1123, 1160), 'tensorflow.nn.rnn_cell.BasicRNNCell', 'tf.nn.rnn_cell.BasicRNNCell', (['n_hidden'], {}), '(n_hidden)\n', (1150, 1160), True, 'import tensorflow as tf\n'), ((1176, 1237), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['enc_cell'], {'output_keep_prob': '(0.5)'}), '(enc_cell, output_keep_prob=0.5)\n', (1205, 1237), True, 'import tensorflow as tf\n'), ((1265, 1321), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['enc_cell', 'enc_input'], {'dtype': 'tf.float32'}), '(enc_cell, enc_input, dtype=tf.float32)\n', (1282, 1321), True, 'import tensorflow as tf\n'), ((1328, 1355), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decode"""'], {}), "('decode')\n", (1345, 1355), True, 'import tensorflow as tf\n'), ((1372, 1409), 'tensorflow.nn.rnn_cell.BasicRNNCell', 'tf.nn.rnn_cell.BasicRNNCell', (['n_hidden'], {}), '(n_hidden)\n', (1399, 1409), True, 'import tensorflow as tf\n'), ((1425, 1486), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['dec_cell'], {'output_keep_prob': '(0.5)'}), '(dec_cell, output_keep_prob=0.5)\n', (1454, 1486), True, 'import tensorflow as tf\n'), ((1514, 1601), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['dec_cell', 'dec_input'], {'initial_state': 'enc_states', 'dtype': 'tf.float32'}), '(dec_cell, dec_input, initial_state=enc_states, dtype=tf.\n float32)\n', (1531, 1601), True, 'import tensorflow as tf\n'), ((1679, 1755), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'model', 'labels': 'targets'}), '(logits=model, labels=targets)\n', (1725, 1755), True, 'import tensorflow as tf\n'), ((1864, 1897), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1895, 1897), True, 'import tensorflow as tf\n'), ((2374, 2393), 'tensorflow.argmax', 'tf.argmax', (['model', '(2)'], {}), '(model, 2)\n', (2383, 2393), True, 'import tensorflow as tf\n'), ((1769, 1806), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (1791, 1806), True, 'import tensorflow as tf\n'), ((630, 645), 'numpy.eye', 'np.eye', (['dic_len'], {}), '(dic_len)\n', (636, 645), True, 'import numpy as np\n'), ((682, 697), 'numpy.eye', 'np.eye', (['dic_len'], {}), '(dic_len)\n', (688, 697), True, 'import numpy as np\n')]
|
import ipywidgets as widgets
import cartopy.crs as ccrs
import geoviews as gv
import holoviews as hv
import numpy as np
import panel as pn
import param
from shapely.geometry import Polygon as sPolygon, LineString as sLineString
from .interface import EDRInterface
from .lookup import CRS_LOOKUP
class EDRExplorer(param.Parameterized):
"""
A `Panel` dashboard from which you can explore the data presented by an EDR Server.
"""
# Metadata widgets.
coll_uri = widgets.Text(placeholder='Specify an EDR Server...', description='Server')
coll = widgets.Dropdown(options=[], description='Collections', disabled=True)
locations = widgets.Dropdown(options=[], description='Locations', disabled=True)
datasets = widgets.SelectMultiple(options=[], description="Datasets", disabled=True)
start_time = widgets.Dropdown(options=[], description='Start Date', disabled=True)
end_time = widgets.Dropdown(options=[], description='End Date', disabled=True)
start_z = widgets.Dropdown(options=[], description='Z Lower', disabled=True)
end_z = widgets.Dropdown(options=[], description='Z Upper', disabled=True)
# Error display widgets.
connect_error_box = widgets.HTML("", layout=widgets.Layout(display="none"))
data_error_box = widgets.HTML("", layout=widgets.Layout(display="none"))
# Plot control widgets.
pc_times = widgets.SelectionSlider(options=[""], description="Timestep", disabled=True)
pc_zs = widgets.SelectionSlider(options=[""], description="Z Level", disabled=True)
pc_params = widgets.Dropdown(options=[], description="Parameter", disabled=True)
use_colours = pn.widgets.Checkbox(name="Use supplied colours", disabled=True)
use_levels = pn.widgets.Checkbox(name="Use supplied levels", disabled=True)
# Parameters for triggering plot updates.
_data_key = param.String("")
_colours = param.Boolean(use_colours.value)
_levels = param.Boolean(use_levels.value)
cmap = param.String("viridis")
alpha = param.Magnitude(0.85)
# Buttons.
connect_button = widgets.Button(description="Connect")
submit_button = widgets.Button(description="Submit", disabled=True)
dataset_button = widgets.Button(
description="Get Dataset",
disabled=True,
layout=widgets.Layout(top="-0.5rem")
)
# Lists and boxes aggregating multiple widgets.
wlist = [coll, locations, datasets, start_time, end_time, start_z, end_z] # Metadata widgets.
pwlist = [pc_times, pc_zs, pc_params] # Plot widgets.
pchecklist = [use_colours, use_levels]
wbox = widgets.VBox(wlist)
pwbox = pn.Row(pn.Column(*pwlist[:2]), pwlist[-1], pn.Column(*pchecklist))
# Map projection code
web_mercator_epsg = "EPSG:3857"
def __init__(self, server_address=None):
"""
Set up a new `Panel` dashboard to use to explore the data presented by an
EDR Server. This constructs an instance of `.interface.EDRInterface` to submit
requests to the EDR Server on the dashboard's behalf and displays results from
these requests in the dashboard.
Optionally pass the hostname of an EDR server via `server_address`. If specified,
this value will pre-populate the `Server` field of the interface.
"""
self.server_address = server_address
if self.server_address is not None:
self.coll_uri.value = self.server_address
super().__init__()
# Class properties.
self._edr_interface = None
self._dataset = None
self._no_t = "No t values in collection"
self._no_z = "No z values in collection"
# Plot.
self.plot = gv.DynamicMap(self.make_plot)
# Button click bindings.
self.connect_button.on_click(self._load_collections)
self.submit_button.on_click(self._request_plot_data)
self.dataset_button.on_click(self._get_dataset)
# Watches on widgets.
self.coll.observe(self._populate_contents_callback, names='value')
self.start_time.observe(self._filter_end_time, names='value')
self.start_z.observe(self._filter_end_z, names='value')
self.pc_times.observe(self._plot_change, names='value')
self.pc_zs.observe(self._plot_change, names='value')
self.pc_params.observe(self._plot_change, names='value')
self.use_colours.param.watch(self._checkbox_change, "value", onlychanged=True)
self.use_levels.param.watch(self._checkbox_change, "value", onlychanged=True)
# Items for geometry-based queries.
self._area_poly = None
self._corridor_path = None
self._area_stream = None
self._corridor_stream = None
self._query_tools()
@property
def edr_interface(self):
"""The instance of `.interface.EDRInterface` used to handle requests to the EDR Server."""
return self._edr_interface
@edr_interface.setter
def edr_interface(self, value):
"""Set the instance of `.interface.EDRInterface` used to handle requests to the EDR Server."""
self._edr_interface = value
@property
def dataset(self):
"""
A well-known Python data object containing all the data represented by the current state
of select widgets on the dashboard.
"""
return self._dataset
@dataset.setter
def dataset(self, value):
self._dataset = value
@property
def layout(self):
"""
Construct a layout of `Panel` objects to produce the EDR explorer dashboard.
To view the dashboard:
explorer = EDRExplorer()
explorer.layout
The layout is composed of two main elements:
* a set of selector widgets in a column on the left that define the values passed
in queries to the EDR Server via the `.interface.EDRInterface` instance
* a plot on the right that displays graphical results from queries submitted to the
EDR Server via the `.interface.EDRInterface` instance
There are some extra elements too:
* the widgets column on the left contains three buttons:
* one to connect to the server at the URI specified in the `Server` text field widget,
* one to submit a query to the EDR Server via the `.interface.EDRInterface` instance
based on the values set in the selector widgets, and
* one to request and return to the user all the data referenced by the current state
of the dashboard's select widgets as a well-known Python data object (such as an Iris cube).
* the widgets column on the left also contains two fields for displaying error messages
when connecting to or retrieving data from the EDR Server. These are hidden by
default and are made visible when there is a relevant error message to display. Once
the error has been resolved the field will become hidden again.
* the plot area on the right contains two plot control widgets to select specific data
from queries submitted to the EDR Server to show on the plot.
* the plot areas on the right also contains two checkboxes to select whether or not to
show data on the plot rendered using colours and levels supplied in the query response.
"""
connect_row = pn.Row(
pn.Column(self.coll_uri, self.connect_error_box),
self.connect_button
)
control_widgets = pn.Column(self.wbox, self.data_error_box)
buttons = pn.Column(self.submit_button, self.dataset_button)
control_row = pn.Row(control_widgets, buttons, align=("end", "start"))
control_col = pn.Column(connect_row, control_row)
tiles = gv.tile_sources.Wikipedia.opts(width=800, height=600)
plot = tiles * self.plot
plot_col = pn.Column(plot, self.pwbox)
return pn.Row(control_col, plot_col).servable()
def _populate_error_box(self, error_box_ref, errors):
error_box = getattr(self, error_box_ref)
good_layout = widgets.Layout(
display="none",
visibility="hidden",
border="none",
)
bad_layout = widgets.Layout(
border="2px solid #dc3545",
padding="0.05rem 0.5rem",
margin="0 0.25rem 0 5.625rem",
width="70%",
overflow="auto",
display="flex",
)
error_box.value = errors
error_box.layout = good_layout if errors == "" else bad_layout
def _load_collections(self, event):
"""
Callback when the `connect` button is clicked.
Set up the EDR interface instance and connect to the server's collections.
"""
self._clear_controls()
server_loc = self.coll_uri.value
self.edr_interface = EDRInterface(server_loc)
error_box = "connect_error_box"
if self.edr_interface.errors is None:
# Independent check to see if we can clear the error box.
self._populate_error_box(error_box, "")
if self.edr_interface.json is not None and self.edr_interface.errors is None:
# The only state in which the controls can be populated and enabled.
self.coll.options = [(ct, cid) for (cid, ct) in zip(self.edr_interface.collection_ids, self.edr_interface.collection_titles)]
self.coll.value = self.edr_interface.collection_ids[0]
self._enable_controls()
elif self.edr_interface.errors is not None:
# We have known errors to show.
self._populate_error_box(error_box, self.edr_interface.errors)
else:
# Something else has gone wrong, which we need to show.
self._populate_error_box(error_box, "UnspecifiedError")
def _enable_controls(self):
"""Enable query control widgets in the left column."""
for widget in self.wlist:
widget.disabled = False
self.submit_button.disabled = False
def _clear_controls(self):
"""Clear state of all control and error display widgets and disable them."""
for widget in self.wlist + self.pwlist:
widget.disabled = True
if isinstance(widget, widgets.SelectMultiple):
widget.options = ("",)
widget.value = ("",)
elif isinstance(widget, widgets.SelectionSlider):
widget.options = ("",)
widget.value = ""
else:
widget.options = []
widget.value = None
for box in self.pchecklist:
box.value = False
box.disabled = True
self.submit_button.disabled = True
self.dataset_button.disabled = True
self._populate_error_box("connect_error_box", "")
self._populate_error_box("data_error_box", "")
def _check_enable_checkboxes(self):
"""
Check if we can enable the checkboxes to specify the plot should
use colours and levels specified in the data JSON. This is only
possible if this information is present in the data JSON.
"""
box_disabled = self.edr_interface.data_handler.get_colours(self.pc_params.value) is None
for box in self.pchecklist:
box.disabled = box_disabled
def _checkbox_change(self, event):
"""
Bind a change in a checkbox to the relevant param object to trigger
a plot update.
"""
name = event.obj.name
if "colour" in name:
self._colours = event.new
elif "level" in name:
self._levels = event.new
def _enable_plot_controls(self):
"""Enable plot control widgets for updating the specific data shown on the plot."""
for widget in self.pwlist:
widget.disabled = False
self.dataset_button.disabled = False
self._check_enable_checkboxes()
def _populate_contents_callback(self, change):
"""
Populate the options and values attributes of all the left column query control
widgets when a collection provided by the EDR Server is specified.
"""
collection_id = change["new"]
if collection_id is not None:
# Parameters and locations.
self._populate_params(collection_id)
locs = self.edr_interface.get_locations(collection_id)
self.locations.options = locs
# Times.
if self.edr_interface.has_temporal_extent(collection_id):
times = self.edr_interface.get_temporal_extent(collection_id)
else:
times = [self._no_t]
self.start_time.options = times
self.end_time.options = times
# Vertical levels.
if self.edr_interface.has_vertical_extent(collection_id):
zs = self.edr_interface.get_vertical_extent(collection_id)
else:
zs = [self._no_z]
self.start_z.options = zs
self.end_z.options = zs
def _populate_params(self, collection_id):
"""
Populate the `Datasets` widget with a descriptive list (names and units) of
the parameters provided by the selected collection.
"""
params_dict = self.edr_interface.get_collection_parameters(collection_id)
options = []
for k, v in params_dict.items():
choice = f'{v["label"].replace("_", " ").title()} ({v["units"]})'
options.append((choice, k))
self.datasets.options = options
def _filter_end_time(self, change):
"""
Only show end datetimes in the `End Date` widget that are later than
the value selected in the `Start Date` widget.
"""
start_time_selected = change["new"]
if start_time_selected is not None:
# Avoid errors when clearing widget state.
times = self.start_time.options
sel_idx = times.index(start_time_selected)
self.end_time.options = times[sel_idx:]
def _filter_end_z(self, change):
"""
Only show end vertical values in the `End Z` widget that are greater than
the value selected in the `Start Z` widget.
"""
start_z_selected = change["new"]
if start_z_selected is not None:
# Avoid errors when clearing widget state.
zs = self.start_z.options
sel_idx = zs.index(start_z_selected)
self.end_z.options = zs[sel_idx:]
def _get_dataset(self, _):
"""
Callback when the `get dataset` button is clicked.
Request from the EDR Server all data represented by the current states of
the select widgets and provide this data as a well-known Python data
object (such as an Iris Cube).
"""
# XXX somewhere we should check if the server supports `Cube` queries,
# and preferentially use that if available.
from .dataset import make_dataset
collection_id = self.coll.value
params = self.edr_interface.get_collection_parameters(collection_id)
keys = self.datasets.value
names_dict = {k: v["label"] for k, v in params.items() if k in keys}
dataset = make_dataset(self.edr_interface.data_handler, names_dict)
self.dataset = dataset
def _geometry_stream_data(self, query_name):
"""
Return the data attribute of the holoviews stream referenced by `query_name`.
"""
ref = f"_{query_name}_stream"
geom_stream = getattr(self, ref)
return geom_stream.data
def _geometry_query_is_defined(self, query_name):
"""
Determine whether a geometry specified by `query_name` has been defined.
We determine this by checking if all the values in its x and y coords
are 0 - if they are, we assume it's in its default state and thus
undefined.
"""
data = self._geometry_stream_data(query_name)
return all(data["xs"][0]) and all(data["ys"][0])
def _hv_stream_to_wkt(self, query_name):
"""
Convert the data points in the geometry specified by `query_name` to
the appropriate Shapely geometry, and return the WKT string representation
of the geometry.
"""
constructor = sPolygon if query_name == "area" else sLineString
data = self._geometry_stream_data(query_name)
xpoints, ypoints = np.array(data["xs"][0]), np.array(data["ys"][0])
wgs84_points = ccrs.PlateCarree().transform_points(
ccrs.Mercator(), xpoints, ypoints
)
result = None
errors = None
try:
geom = constructor(wgs84_points)
except ValueError:
errors = f"Invalid {query_name!r} geometry provided"
else:
result = geom.wkt
return result, errors
def _request_plot_data(self, _):
"""
Callback when the `submit` button is clicked.
This makes a get data request to the EDR Server via the
`.interface.EDRInterface` instance.
"""
# Get selection widgets state for request.
coll_id = self.coll.value
param_names = self.datasets.value
locations = self.locations.value
start_date = self.start_time.value
end_date = self.end_time.value
start_z = self.start_z.value
end_z = self.end_z.value
# Define common query parameters.
query_params = {"crs": "EPSG:4326"}
if start_date != self._no_t:
query_params["datetime"] = "/".join([start_date, end_date])
if start_z != self._no_z:
query_params["z"] = [start_z, end_z]
# Set query type.
query_type = None
errors = None
query_types = ["area", "corridor"]
for qtype in query_types:
if self._geometry_query_is_defined(qtype):
print(f"Query type: {qtype}")
query_type = qtype
coords, errors = self._hv_stream_to_wkt(query_type)
if coords is not None:
query_params["coords"] = coords
if query_type is None:
query_type = "locations"
query_params["loc_id"] = locations
# Request dataset.
self.edr_interface.query(coll_id, query_type, param_names, **query_params)
# Collect coords and query errors, if present.
all_errors = []
if errors is not None:
all_errors.append(errors)
if self.edr_interface.errors is not None:
all_errors.append(self.edr_interface.errors)
if len(all_errors):
self.edr_interface.errors = "\n".join(all_errors)
error_box = "data_error_box"
if self.edr_interface.errors is None:
# Independent check to see if we can clear the error box.
self._populate_error_box(error_box, "")
if self.edr_interface.data_handler is not None and self.edr_interface.errors is None:
# Generate and enable the plot controls.
if self.edr_interface.has_temporal_extent(coll_id):
plot_control_times = list(self.edr_interface.data_handler.coords["t"])
else:
plot_control_times = [self._no_t]
self.pc_times.options = plot_control_times
self.pc_times.value = plot_control_times[0]
if self.edr_interface.has_vertical_extent(coll_id):
plot_control_zs = list(self.edr_interface.data_handler.coords["z"])
else:
plot_control_zs = [self._no_z]
self.pc_zs.options = plot_control_zs
self.pc_zs.value = plot_control_zs[0]
plot_control_params = list(param_names)
self.pc_params.options = list(filter(lambda o: o[1] in plot_control_params, self.datasets.options))
self.pc_params.value = plot_control_params[0]
self._enable_plot_controls()
elif self.edr_interface.errors is not None:
self._populate_error_box(error_box, self.edr_interface.errors)
else:
self._populate_error_box(error_box, "Uncaught error (data retrieval)")
def _plot_change(self, _):
"""
Helper function to capture changes from either plot control widget
and trigger an update of the plot.
"""
param = self.pc_params.value
t = self.pc_times.value
z = self.pc_zs.value
can_request_data = False
self._check_enable_checkboxes()
value_dict = {}
if t not in (None, "", self._no_t):
value_dict.update({"t": t})
can_request_data = True
if z not in (None, "", self._no_z):
value_dict.update({"z": z})
can_request_data = True
if param is not None and can_request_data:
self._data_key = self.edr_interface.data_handler.make_key(param, value_dict)
def _query_tools(self):
self._area_poly = hv.Polygons(
[[(0, 0), (0, 0)]]
).opts(
line_color="gray", line_width=1.5, line_alpha=0.75,
fill_color="gray", fill_alpha=0.3,
)
self._corridor_path = hv.Path(
[[(0, 0), (0, 0)]]
).opts(
color="gray", line_width=2, line_alpha=0.75,
)
self._area_stream = hv.streams.PolyDraw(
source=self._area_poly,
num_objects=1,
tooltip="Area Query Tool"
)
self._corridor_stream = hv.streams.PolyDraw(
source=self._corridor_path,
num_objects=1,
tooltip="Corridor Query Tool"
)
@param.depends('_data_key', '_colours', '_levels', 'cmap', 'alpha')
def make_plot(self):
"""Show data from a data request to the EDR Server on the plot."""
showable = gv.Image(
([-8, -1], [53, 58], [[0, 0], [0, 0]]), # Approximate UK extent.
crs=CRS_LOOKUP["WGS_1984"],
).opts(alpha=0.0)
if self._data_key != "":
dataset = self.edr_interface.data_handler[self._data_key]
opts = {"cmap": self.cmap, "alpha": self.alpha, "colorbar": True}
colours = self.edr_interface.data_handler.get_colours(self.pc_params.value)
if colours is not None:
opts.update({"clim": (colours["vmin"], colours["vmax"])})
if self.use_colours.value:
opts["cmap"] = colours["colours"]
if self.use_levels.value:
opts["color_levels"] = colours["values"]
error_box = "data_error_box"
if self.edr_interface.data_handler.errors is None:
# Independent check to see if we can clear the data error box.
self._populate_error_box(error_box, "")
if dataset is not None and self.edr_interface.data_handler.errors is None:
showable = dataset.to(gv.Image, ['longitude', 'latitude']).opts(**opts)
elif self.edr_interface.data_handler.errors is not None:
self._populate_error_box(
error_box,
self.edr_interface.data_handler.errors
)
else:
self._populate_error_box(
error_box,
"Unspecified error (plotting)"
)
return showable * self._area_poly * self._corridor_path
|
[
"param.Magnitude",
"cartopy.crs.Mercator",
"holoviews.Polygons",
"geoviews.Image",
"ipywidgets.Text",
"geoviews.tile_sources.Wikipedia.opts",
"panel.widgets.Checkbox",
"panel.Row",
"param.String",
"ipywidgets.SelectionSlider",
"ipywidgets.Button",
"geoviews.DynamicMap",
"param.Boolean",
"ipywidgets.Dropdown",
"ipywidgets.SelectMultiple",
"panel.Column",
"ipywidgets.Layout",
"holoviews.Path",
"ipywidgets.VBox",
"param.depends",
"numpy.array",
"cartopy.crs.PlateCarree",
"holoviews.streams.PolyDraw"
] |
[((483, 557), 'ipywidgets.Text', 'widgets.Text', ([], {'placeholder': '"""Specify an EDR Server..."""', 'description': '"""Server"""'}), "(placeholder='Specify an EDR Server...', description='Server')\n", (495, 557), True, 'import ipywidgets as widgets\n'), ((569, 639), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Collections"""', 'disabled': '(True)'}), "(options=[], description='Collections', disabled=True)\n", (585, 639), True, 'import ipywidgets as widgets\n'), ((656, 724), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Locations"""', 'disabled': '(True)'}), "(options=[], description='Locations', disabled=True)\n", (672, 724), True, 'import ipywidgets as widgets\n'), ((740, 813), 'ipywidgets.SelectMultiple', 'widgets.SelectMultiple', ([], {'options': '[]', 'description': '"""Datasets"""', 'disabled': '(True)'}), "(options=[], description='Datasets', disabled=True)\n", (762, 813), True, 'import ipywidgets as widgets\n'), ((831, 900), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Start Date"""', 'disabled': '(True)'}), "(options=[], description='Start Date', disabled=True)\n", (847, 900), True, 'import ipywidgets as widgets\n'), ((916, 983), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""End Date"""', 'disabled': '(True)'}), "(options=[], description='End Date', disabled=True)\n", (932, 983), True, 'import ipywidgets as widgets\n'), ((998, 1064), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Z Lower"""', 'disabled': '(True)'}), "(options=[], description='Z Lower', disabled=True)\n", (1014, 1064), True, 'import ipywidgets as widgets\n'), ((1077, 1143), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Z Upper"""', 'disabled': '(True)'}), "(options=[], description='Z Upper', disabled=True)\n", (1093, 1143), True, 'import ipywidgets as widgets\n'), ((1375, 1451), 'ipywidgets.SelectionSlider', 'widgets.SelectionSlider', ([], {'options': "['']", 'description': '"""Timestep"""', 'disabled': '(True)'}), "(options=[''], description='Timestep', disabled=True)\n", (1398, 1451), True, 'import ipywidgets as widgets\n'), ((1464, 1539), 'ipywidgets.SelectionSlider', 'widgets.SelectionSlider', ([], {'options': "['']", 'description': '"""Z Level"""', 'disabled': '(True)'}), "(options=[''], description='Z Level', disabled=True)\n", (1487, 1539), True, 'import ipywidgets as widgets\n'), ((1556, 1624), 'ipywidgets.Dropdown', 'widgets.Dropdown', ([], {'options': '[]', 'description': '"""Parameter"""', 'disabled': '(True)'}), "(options=[], description='Parameter', disabled=True)\n", (1572, 1624), True, 'import ipywidgets as widgets\n'), ((1643, 1706), 'panel.widgets.Checkbox', 'pn.widgets.Checkbox', ([], {'name': '"""Use supplied colours"""', 'disabled': '(True)'}), "(name='Use supplied colours', disabled=True)\n", (1662, 1706), True, 'import panel as pn\n'), ((1724, 1786), 'panel.widgets.Checkbox', 'pn.widgets.Checkbox', ([], {'name': '"""Use supplied levels"""', 'disabled': '(True)'}), "(name='Use supplied levels', disabled=True)\n", (1743, 1786), True, 'import panel as pn\n'), ((1850, 1866), 'param.String', 'param.String', (['""""""'], {}), "('')\n", (1862, 1866), False, 'import param\n'), ((1882, 1914), 'param.Boolean', 'param.Boolean', (['use_colours.value'], {}), '(use_colours.value)\n', (1895, 1914), False, 'import param\n'), ((1929, 1960), 'param.Boolean', 'param.Boolean', (['use_levels.value'], {}), '(use_levels.value)\n', (1942, 1960), False, 'import param\n'), ((1972, 1995), 'param.String', 'param.String', (['"""viridis"""'], {}), "('viridis')\n", (1984, 1995), False, 'import param\n'), ((2008, 2029), 'param.Magnitude', 'param.Magnitude', (['(0.85)'], {}), '(0.85)\n', (2023, 2029), False, 'import param\n'), ((2067, 2104), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Connect"""'}), "(description='Connect')\n", (2081, 2104), True, 'import ipywidgets as widgets\n'), ((2125, 2176), 'ipywidgets.Button', 'widgets.Button', ([], {'description': '"""Submit"""', 'disabled': '(True)'}), "(description='Submit', disabled=True)\n", (2139, 2176), True, 'import ipywidgets as widgets\n'), ((2588, 2607), 'ipywidgets.VBox', 'widgets.VBox', (['wlist'], {}), '(wlist)\n', (2600, 2607), True, 'import ipywidgets as widgets\n'), ((21809, 21875), 'param.depends', 'param.depends', (['"""_data_key"""', '"""_colours"""', '"""_levels"""', '"""cmap"""', '"""alpha"""'], {}), "('_data_key', '_colours', '_levels', 'cmap', 'alpha')\n", (21822, 21875), False, 'import param\n'), ((2627, 2649), 'panel.Column', 'pn.Column', (['*pwlist[:2]'], {}), '(*pwlist[:2])\n', (2636, 2649), True, 'import panel as pn\n'), ((2663, 2685), 'panel.Column', 'pn.Column', (['*pchecklist'], {}), '(*pchecklist)\n', (2672, 2685), True, 'import panel as pn\n'), ((3682, 3711), 'geoviews.DynamicMap', 'gv.DynamicMap', (['self.make_plot'], {}), '(self.make_plot)\n', (3695, 3711), True, 'import geoviews as gv\n'), ((7541, 7582), 'panel.Column', 'pn.Column', (['self.wbox', 'self.data_error_box'], {}), '(self.wbox, self.data_error_box)\n', (7550, 7582), True, 'import panel as pn\n'), ((7601, 7651), 'panel.Column', 'pn.Column', (['self.submit_button', 'self.dataset_button'], {}), '(self.submit_button, self.dataset_button)\n', (7610, 7651), True, 'import panel as pn\n'), ((7674, 7730), 'panel.Row', 'pn.Row', (['control_widgets', 'buttons'], {'align': "('end', 'start')"}), "(control_widgets, buttons, align=('end', 'start'))\n", (7680, 7730), True, 'import panel as pn\n'), ((7753, 7788), 'panel.Column', 'pn.Column', (['connect_row', 'control_row'], {}), '(connect_row, control_row)\n', (7762, 7788), True, 'import panel as pn\n'), ((7806, 7859), 'geoviews.tile_sources.Wikipedia.opts', 'gv.tile_sources.Wikipedia.opts', ([], {'width': '(800)', 'height': '(600)'}), '(width=800, height=600)\n', (7836, 7859), True, 'import geoviews as gv\n'), ((7912, 7939), 'panel.Column', 'pn.Column', (['plot', 'self.pwbox'], {}), '(plot, self.pwbox)\n', (7921, 7939), True, 'import panel as pn\n'), ((8126, 8192), 'ipywidgets.Layout', 'widgets.Layout', ([], {'display': '"""none"""', 'visibility': '"""hidden"""', 'border': '"""none"""'}), "(display='none', visibility='hidden', border='none')\n", (8140, 8192), True, 'import ipywidgets as widgets\n'), ((8261, 8411), 'ipywidgets.Layout', 'widgets.Layout', ([], {'border': '"""2px solid #dc3545"""', 'padding': '"""0.05rem 0.5rem"""', 'margin': '"""0 0.25rem 0 5.625rem"""', 'width': '"""70%"""', 'overflow': '"""auto"""', 'display': '"""flex"""'}), "(border='2px solid #dc3545', padding='0.05rem 0.5rem', margin\n ='0 0.25rem 0 5.625rem', width='70%', overflow='auto', display='flex')\n", (8275, 8411), True, 'import ipywidgets as widgets\n'), ((21499, 21589), 'holoviews.streams.PolyDraw', 'hv.streams.PolyDraw', ([], {'source': 'self._area_poly', 'num_objects': '(1)', 'tooltip': '"""Area Query Tool"""'}), "(source=self._area_poly, num_objects=1, tooltip=\n 'Area Query Tool')\n", (21518, 21589), True, 'import holoviews as hv\n'), ((21663, 21761), 'holoviews.streams.PolyDraw', 'hv.streams.PolyDraw', ([], {'source': 'self._corridor_path', 'num_objects': '(1)', 'tooltip': '"""Corridor Query Tool"""'}), "(source=self._corridor_path, num_objects=1, tooltip=\n 'Corridor Query Tool')\n", (21682, 21761), True, 'import holoviews as hv\n'), ((1222, 1252), 'ipywidgets.Layout', 'widgets.Layout', ([], {'display': '"""none"""'}), "(display='none')\n", (1236, 1252), True, 'import ipywidgets as widgets\n'), ((1299, 1329), 'ipywidgets.Layout', 'widgets.Layout', ([], {'display': '"""none"""'}), "(display='none')\n", (1313, 1329), True, 'import ipywidgets as widgets\n'), ((2287, 2316), 'ipywidgets.Layout', 'widgets.Layout', ([], {'top': '"""-0.5rem"""'}), "(top='-0.5rem')\n", (2301, 2316), True, 'import ipywidgets as widgets\n'), ((7423, 7471), 'panel.Column', 'pn.Column', (['self.coll_uri', 'self.connect_error_box'], {}), '(self.coll_uri, self.connect_error_box)\n', (7432, 7471), True, 'import panel as pn\n'), ((16568, 16591), 'numpy.array', 'np.array', (["data['xs'][0]"], {}), "(data['xs'][0])\n", (16576, 16591), True, 'import numpy as np\n'), ((16593, 16616), 'numpy.array', 'np.array', (["data['ys'][0]"], {}), "(data['ys'][0])\n", (16601, 16616), True, 'import numpy as np\n'), ((16689, 16704), 'cartopy.crs.Mercator', 'ccrs.Mercator', ([], {}), '()\n', (16702, 16704), True, 'import cartopy.crs as ccrs\n'), ((7955, 7984), 'panel.Row', 'pn.Row', (['control_col', 'plot_col'], {}), '(control_col, plot_col)\n', (7961, 7984), True, 'import panel as pn\n'), ((16640, 16658), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16656, 16658), True, 'import cartopy.crs as ccrs\n'), ((21137, 21168), 'holoviews.Polygons', 'hv.Polygons', (['[[(0, 0), (0, 0)]]'], {}), '([[(0, 0), (0, 0)]])\n', (21148, 21168), True, 'import holoviews as hv\n'), ((21348, 21375), 'holoviews.Path', 'hv.Path', (['[[(0, 0), (0, 0)]]'], {}), '([[(0, 0), (0, 0)]])\n', (21355, 21375), True, 'import holoviews as hv\n'), ((21995, 22071), 'geoviews.Image', 'gv.Image', (['([-8, -1], [53, 58], [[0, 0], [0, 0]])'], {'crs': "CRS_LOOKUP['WGS_1984']"}), "(([-8, -1], [53, 58], [[0, 0], [0, 0]]), crs=CRS_LOOKUP['WGS_1984'])\n", (22003, 22071), True, 'import geoviews as gv\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 10:35:53 2021
@author: Peace4Lv
"""
from pyecharts.components import Image
from pyecharts.options import ComponentTitleOpts
from os import path
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
plt.rcParams['font.sans-serif'] = ['KaiTi']
plt.rcParams['axes.unicode_minus'] = False
def DrawImage(imgUrl="../html/pic/horizontalLine.png", **kw):
image = Image()
# check file exists
if not path.isfile(imgUrl):
imgUrl = r"https://gitee.com/RiskyJR/pic-bed/raw/master/comm-timeline-graphic-1024x380.png"
image.add(
src=imgUrl,
# image align center should modify outside
style_opts={
"style": "margin-top: 20px;text-align: center;width:1800px;height:900px;"},
)
image.set_global_opts(
title_opts=ComponentTitleOpts(title="Time Line")
)
image.render("../html/imageTest.html")
print("horizontal line image finished...\n")
return image
def UpdateTimeLineImage(startTick_x=['2021-08-09 09:00:00', '2021-08-09 09:45:00',
'2021-08-09 11:11:00', '2021-08-09 14:30:00',
'2021-08-09 15:18:00',
'2021-08-09 16:40:00', '2021-08-09 17:19:00'],
eventName_x=['开会', '发票', 'visual-code', '舆情分析',
'AOA-Paper', 'AOA-Paper', 'visual-code'],
eventLast_x=[30, 78, 33, 47, 69, 39, 15], *k, **kw):
colors = ['#E5562D', '#E0A459', '#CFBE65', '#A8CF65', '#6FD67D', '#68D5AE'
'#6FD0DB', '#5294D0', '#595CD0', '#9E59D0', '#D05994']
# datetime-str→datetime→baseline→gap
# Create the base bar from 5am to 1am
startTick_t = [datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
for x in startTick_x]
zeroTick_t = datetime.strptime(datetime.strftime(
startTick_t[1], "%Y-%m-%d")+" 05:00:00", "%Y-%m-%d %H:%M:%S")
endTick_t = zeroTick_t+timedelta(hours=19)
eventName = eventName_x
eventLast = eventLast_x
levels = np.array([-5, 5, -3, 3, -1, 1])
fig, ax = plt.subplots(figsize=(36, 36*0.5625),
facecolor='#D6D7C5', dpi=500)
baseGapMin = (endTick_t-zeroTick_t).total_seconds()/60
ax.set(facecolor="#D6D7C5")
ax.broken_barh(
[(0, baseGapMin)], (-1/2, 1), alpha=.5,
facecolors='#ace9e8', edgecolors='white', lw=4, capstyle='round')
ax.set_ylim(-8, 8)
# set as page background image no need title
# ax.set_title('Daily Time Line', fontsize=60, color='white')
for ii, (iname, itick, ieventLast) in enumerate(zip(eventName, startTick_t, eventLast)):
barhColor = colors[ii % 4]
level = levels[ii % 6]
vert = 'top' if level < 0 else 'bottom'
# tickTemp = datetime.strptime(itick, "%Y-%m-%d %H:%M:%S")
curPointX = (itick-zeroTick_t).total_seconds()/60
curPointX_M = curPointX + ieventLast/2
ax.scatter(curPointX_M, 0, s=100, facecolor='w',
edgecolor=barhColor, zorder=9999)
# a line up to the text
ax.plot((curPointX_M, curPointX_M), (0, level), c='white', alpha=.5)
# text
itickStr = datetime.strftime(itick, "%m-%d %H:%M")
itext = iname+"\n"+itickStr+"|"+str(ieventLast)
textInstance = ax.text(
curPointX_M, level, itext,
horizontalalignment='center', verticalalignment=vert, fontsize=20,
fontfamily='Microsoft YaHei')
textInstance.set_bbox(
dict(boxstyle="round", alpha=0.5, color='#C3EAE9'))
# broken_bar
ax.broken_barh([(curPointX, ieventLast)], (-1/2, 1),
facecolors=barhColor, edgecolors='white', lw=4)
# Remove components for a cleaner look
plt.setp((ax.get_yticklabels() + ax.get_yticklines() +
list(ax.spines.values())), visible=False)
plt.setp((ax.get_xticklabels() + ax.get_xticklines() +
list(ax.spines.values())), visible=False)
plt.xlabel(startTick_t[int(len(startTick_t)/2)].strftime("%Y-%m-%d")+' Time Line',
loc='left', fontsize=30, fontfamily='Microsoft YaHei', color='white')
plt.ylabel('Update:'+datetime.now().strftime("%Y-%m-%d"),
loc='bottom', fontsize=30, fontfamily='Microsoft YaHei', color='white')
if True:
imageFile = r'../html/pic/timeline.jpg'
plt.savefig(imageFile,dpi=400, bbox_inches='tight')
print('image generated', imageFile)
return imageFile
else:
plt.show()
if __name__ == "__main__":
UpdateTimeLineImage()
# DrawImage()
|
[
"datetime.datetime.strftime",
"pyecharts.components.Image",
"matplotlib.pyplot.show",
"pyecharts.options.ComponentTitleOpts",
"datetime.datetime.now",
"datetime.datetime.strptime",
"os.path.isfile",
"numpy.array",
"datetime.timedelta",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((452, 459), 'pyecharts.components.Image', 'Image', ([], {}), '()\n', (457, 459), False, 'from pyecharts.components import Image\n'), ((2129, 2160), 'numpy.array', 'np.array', (['[-5, 5, -3, 3, -1, 1]'], {}), '([-5, 5, -3, 3, -1, 1])\n', (2137, 2160), True, 'import numpy as np\n'), ((2175, 2244), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(36, 36 * 0.5625)', 'facecolor': '"""#D6D7C5"""', 'dpi': '(500)'}), "(figsize=(36, 36 * 0.5625), facecolor='#D6D7C5', dpi=500)\n", (2187, 2244), True, 'import matplotlib.pyplot as plt\n'), ((495, 514), 'os.path.isfile', 'path.isfile', (['imgUrl'], {}), '(imgUrl)\n', (506, 514), False, 'from os import path\n'), ((1806, 1847), 'datetime.datetime.strptime', 'datetime.strptime', (['x', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(x, '%Y-%m-%d %H:%M:%S')\n", (1823, 1847), False, 'from datetime import datetime, timedelta\n'), ((2040, 2059), 'datetime.timedelta', 'timedelta', ([], {'hours': '(19)'}), '(hours=19)\n', (2049, 2059), False, 'from datetime import datetime, timedelta\n'), ((3274, 3313), 'datetime.datetime.strftime', 'datetime.strftime', (['itick', '"""%m-%d %H:%M"""'], {}), "(itick, '%m-%d %H:%M')\n", (3291, 3313), False, 'from datetime import datetime, timedelta\n'), ((4474, 4526), 'matplotlib.pyplot.savefig', 'plt.savefig', (['imageFile'], {'dpi': '(400)', 'bbox_inches': '"""tight"""'}), "(imageFile, dpi=400, bbox_inches='tight')\n", (4485, 4526), True, 'import matplotlib.pyplot as plt\n'), ((4613, 4623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4621, 4623), True, 'import matplotlib.pyplot as plt\n'), ((863, 900), 'pyecharts.options.ComponentTitleOpts', 'ComponentTitleOpts', ([], {'title': '"""Time Line"""'}), "(title='Time Line')\n", (881, 900), False, 'from pyecharts.options import ComponentTitleOpts\n'), ((1924, 1969), 'datetime.datetime.strftime', 'datetime.strftime', (['startTick_t[1]', '"""%Y-%m-%d"""'], {}), "(startTick_t[1], '%Y-%m-%d')\n", (1941, 1969), False, 'from datetime import datetime, timedelta\n'), ((4281, 4295), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4293, 4295), False, 'from datetime import datetime, timedelta\n')]
|
import numpy as np
import unittest
import ray
from ray.rllib.evaluation.postprocessing import adjust_nstep, discount_cumsum
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.test_utils import check
class TestPostprocessing(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_n_step_3(self):
"""Tests, whether n-step adjustments of trajectories work."""
# n-step = 3
gamma = 0.9
obs = [1, 2, 3, 4, 5, 6, 7]
actions = ["ac1", "ac2", "ac1", "ac1", "ac1", "ac2", "ac1"]
rewards = [10.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0]
dones = [0, 0, 0, 0, 0, 0, 1]
next_obs = [2, 3, 4, 5, 6, 7, 8]
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: dones,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(3, gamma, batch)
check(batch[SampleBatch.OBS], [1, 2, 3, 4, 5, 6, 7])
check(
batch[SampleBatch.ACTIONS],
["ac1", "ac2", "ac1", "ac1", "ac1", "ac2", "ac1"],
)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 8, 8, 8])
check(batch[SampleBatch.DONES], [0, 0, 0, 0, 1, 1, 1])
check(
batch[SampleBatch.REWARDS], [91.0, 171.0, 271.0, 271.0, 271.0, 190.0, 100.0]
)
def test_n_step_4(self):
"""Tests, whether n-step adjustments of trajectories work."""
# n-step = 4
gamma = 0.99
obs = np.arange(0, 7)
actions = np.random.randint(-1, 3, size=(7,))
check_actions = actions.copy()
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
dones = [False, False, False, False, False, False, True]
next_obs = np.arange(1, 8)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: dones,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(4, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])
check(batch[SampleBatch.DONES], [False, False, False, True, True, True, True])
check(
batch[SampleBatch.REWARDS],
[
discount_cumsum(np.array(rewards[0:4]), gamma)[0],
discount_cumsum(np.array(rewards[1:5]), gamma)[0],
discount_cumsum(np.array(rewards[2:6]), gamma)[0],
discount_cumsum(np.array(rewards[3:7]), gamma)[0],
discount_cumsum(np.array(rewards[4:]), gamma)[0],
discount_cumsum(np.array(rewards[5:]), gamma)[0],
discount_cumsum(np.array(rewards[6:]), gamma)[0],
],
)
def test_n_step_malformed_dones(self):
# Test bad input (trajectory has dones in middle).
# Re-use same batch, but change dones.
gamma = 1.0
obs = np.arange(0, 7)
actions = np.random.randint(-1, 3, size=(7,))
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
next_obs = np.arange(1, 8)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: [False, False, True, False, False, False, True],
SampleBatch.NEXT_OBS: next_obs,
}
)
self.assertRaisesRegex(
AssertionError,
"Unexpected done in middle",
lambda: adjust_nstep(5, gamma, batch),
)
def test_n_step_very_short_trajectory(self):
"""Tests, whether n-step also works for very small trajectories."""
gamma = 1.0
obs = np.arange(0, 2)
actions = np.random.randint(-100, 300, size=(2,))
check_actions = actions.copy()
rewards = [10.0, 100.0]
next_obs = np.arange(1, 3)
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: [False, False],
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(3, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.DONES], [False, False])
check(batch[SampleBatch.REWARDS], [10.0 + gamma * 100.0, 100.0])
check(batch[SampleBatch.NEXT_OBS], [2, 2])
def test_n_step_from_same_obs_source_array(self):
"""Tests, whether n-step also works on a shared obs/new-obs array."""
gamma = 0.99
# The underlying observation data. Both obs and next_obs will
# be references into that same np.array.
underlying_obs = np.arange(0, 8)
obs = underlying_obs[:7]
next_obs = underlying_obs[1:]
actions = np.random.randint(-1, 3, size=(7,))
check_actions = actions.copy()
rewards = [10.0, 0.0, 100.0, 50.0, 60.0, 10.0, 100.0]
dones = [False, False, False, False, False, False, True]
batch = SampleBatch(
{
SampleBatch.OBS: obs,
SampleBatch.ACTIONS: actions,
SampleBatch.REWARDS: rewards,
SampleBatch.DONES: dones,
SampleBatch.NEXT_OBS: next_obs,
}
)
adjust_nstep(4, gamma, batch)
check(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])
check(batch[SampleBatch.ACTIONS], check_actions)
check(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])
check(batch[SampleBatch.DONES], [False, False, False, True, True, True, True])
check(
batch[SampleBatch.REWARDS],
[
discount_cumsum(np.array(rewards[0:4]), gamma)[0],
discount_cumsum(np.array(rewards[1:5]), gamma)[0],
discount_cumsum(np.array(rewards[2:6]), gamma)[0],
discount_cumsum(np.array(rewards[3:7]), gamma)[0],
discount_cumsum(np.array(rewards[4:]), gamma)[0],
discount_cumsum(np.array(rewards[5:]), gamma)[0],
discount_cumsum(np.array(rewards[6:]), gamma)[0],
],
)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"ray.init",
"ray.rllib.policy.sample_batch.SampleBatch",
"ray.rllib.utils.test_utils.check",
"pytest.main",
"numpy.random.randint",
"ray.shutdown",
"numpy.arange",
"ray.rllib.evaluation.postprocessing.adjust_nstep",
"numpy.array"
] |
[((329, 339), 'ray.init', 'ray.init', ([], {}), '()\n', (337, 339), False, 'import ray\n'), ((402, 416), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (414, 416), False, 'import ray\n'), ((822, 984), 'ray.rllib.policy.sample_batch.SampleBatch', 'SampleBatch', (['{SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions, SampleBatch.REWARDS:\n rewards, SampleBatch.DONES: dones, SampleBatch.NEXT_OBS: next_obs}'], {}), '({SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions,\n SampleBatch.REWARDS: rewards, SampleBatch.DONES: dones, SampleBatch.\n NEXT_OBS: next_obs})\n', (833, 984), False, 'from ray.rllib.policy.sample_batch import SampleBatch\n'), ((1101, 1130), 'ray.rllib.evaluation.postprocessing.adjust_nstep', 'adjust_nstep', (['(3)', 'gamma', 'batch'], {}), '(3, gamma, batch)\n', (1113, 1130), False, 'from ray.rllib.evaluation.postprocessing import adjust_nstep, discount_cumsum\n'), ((1139, 1191), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.OBS]', '[1, 2, 3, 4, 5, 6, 7]'], {}), '(batch[SampleBatch.OBS], [1, 2, 3, 4, 5, 6, 7])\n', (1144, 1191), False, 'from ray.rllib.utils.test_utils import check\n'), ((1200, 1288), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.ACTIONS]', "['ac1', 'ac2', 'ac1', 'ac1', 'ac1', 'ac2', 'ac1']"], {}), "(batch[SampleBatch.ACTIONS], ['ac1', 'ac2', 'ac1', 'ac1', 'ac1', 'ac2',\n 'ac1'])\n", (1205, 1288), False, 'from ray.rllib.utils.test_utils import check\n'), ((1328, 1385), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.NEXT_OBS]', '[4, 5, 6, 7, 8, 8, 8]'], {}), '(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 8, 8, 8])\n', (1333, 1385), False, 'from ray.rllib.utils.test_utils import check\n'), ((1394, 1448), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.DONES]', '[0, 0, 0, 0, 1, 1, 1]'], {}), '(batch[SampleBatch.DONES], [0, 0, 0, 0, 1, 1, 1])\n', (1399, 1448), False, 'from ray.rllib.utils.test_utils import check\n'), ((1457, 1544), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.REWARDS]', '[91.0, 171.0, 271.0, 271.0, 271.0, 190.0, 100.0]'], {}), '(batch[SampleBatch.REWARDS], [91.0, 171.0, 271.0, 271.0, 271.0, 190.0,\n 100.0])\n', (1462, 1544), False, 'from ray.rllib.utils.test_utils import check\n'), ((1719, 1734), 'numpy.arange', 'np.arange', (['(0)', '(7)'], {}), '(0, 7)\n', (1728, 1734), True, 'import numpy as np\n'), ((1753, 1788), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(3)'], {'size': '(7,)'}), '(-1, 3, size=(7,))\n', (1770, 1788), True, 'import numpy as np\n'), ((1974, 1989), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (1983, 1989), True, 'import numpy as np\n'), ((2006, 2168), 'ray.rllib.policy.sample_batch.SampleBatch', 'SampleBatch', (['{SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions, SampleBatch.REWARDS:\n rewards, SampleBatch.DONES: dones, SampleBatch.NEXT_OBS: next_obs}'], {}), '({SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions,\n SampleBatch.REWARDS: rewards, SampleBatch.DONES: dones, SampleBatch.\n NEXT_OBS: next_obs})\n', (2017, 2168), False, 'from ray.rllib.policy.sample_batch import SampleBatch\n'), ((2285, 2314), 'ray.rllib.evaluation.postprocessing.adjust_nstep', 'adjust_nstep', (['(4)', 'gamma', 'batch'], {}), '(4, gamma, batch)\n', (2297, 2314), False, 'from ray.rllib.evaluation.postprocessing import adjust_nstep, discount_cumsum\n'), ((2323, 2375), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.OBS]', '[0, 1, 2, 3, 4, 5, 6]'], {}), '(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])\n', (2328, 2375), False, 'from ray.rllib.utils.test_utils import check\n'), ((2384, 2432), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.ACTIONS]', 'check_actions'], {}), '(batch[SampleBatch.ACTIONS], check_actions)\n', (2389, 2432), False, 'from ray.rllib.utils.test_utils import check\n'), ((2441, 2498), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.NEXT_OBS]', '[4, 5, 6, 7, 7, 7, 7]'], {}), '(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])\n', (2446, 2498), False, 'from ray.rllib.utils.test_utils import check\n'), ((2507, 2585), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.DONES]', '[False, False, False, True, True, True, True]'], {}), '(batch[SampleBatch.DONES], [False, False, False, True, True, True, True])\n', (2512, 2585), False, 'from ray.rllib.utils.test_utils import check\n'), ((3330, 3345), 'numpy.arange', 'np.arange', (['(0)', '(7)'], {}), '(0, 7)\n', (3339, 3345), True, 'import numpy as np\n'), ((3364, 3399), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(3)'], {'size': '(7,)'}), '(-1, 3, size=(7,))\n', (3381, 3399), True, 'import numpy as np\n'), ((3481, 3496), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (3490, 3496), True, 'import numpy as np\n'), ((3513, 3717), 'ray.rllib.policy.sample_batch.SampleBatch', 'SampleBatch', (['{SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions, SampleBatch.REWARDS:\n rewards, SampleBatch.DONES: [False, False, True, False, False, False, \n True], SampleBatch.NEXT_OBS: next_obs}'], {}), '({SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions,\n SampleBatch.REWARDS: rewards, SampleBatch.DONES: [False, False, True, \n False, False, False, True], SampleBatch.NEXT_OBS: next_obs})\n', (3524, 3717), False, 'from ray.rllib.policy.sample_batch import SampleBatch\n'), ((4148, 4163), 'numpy.arange', 'np.arange', (['(0)', '(2)'], {}), '(0, 2)\n', (4157, 4163), True, 'import numpy as np\n'), ((4182, 4221), 'numpy.random.randint', 'np.random.randint', (['(-100)', '(300)'], {'size': '(2,)'}), '(-100, 300, size=(2,))\n', (4199, 4221), True, 'import numpy as np\n'), ((4312, 4327), 'numpy.arange', 'np.arange', (['(1)', '(3)'], {}), '(1, 3)\n', (4321, 4327), True, 'import numpy as np\n'), ((4344, 4514), 'ray.rllib.policy.sample_batch.SampleBatch', 'SampleBatch', (['{SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions, SampleBatch.REWARDS:\n rewards, SampleBatch.DONES: [False, False], SampleBatch.NEXT_OBS: next_obs}'], {}), '({SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions,\n SampleBatch.REWARDS: rewards, SampleBatch.DONES: [False, False],\n SampleBatch.NEXT_OBS: next_obs})\n', (4355, 4514), False, 'from ray.rllib.policy.sample_batch import SampleBatch\n'), ((4632, 4661), 'ray.rllib.evaluation.postprocessing.adjust_nstep', 'adjust_nstep', (['(3)', 'gamma', 'batch'], {}), '(3, gamma, batch)\n', (4644, 4661), False, 'from ray.rllib.evaluation.postprocessing import adjust_nstep, discount_cumsum\n'), ((4670, 4707), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.OBS]', '[0, 1]'], {}), '(batch[SampleBatch.OBS], [0, 1])\n', (4675, 4707), False, 'from ray.rllib.utils.test_utils import check\n'), ((4716, 4764), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.ACTIONS]', 'check_actions'], {}), '(batch[SampleBatch.ACTIONS], check_actions)\n', (4721, 4764), False, 'from ray.rllib.utils.test_utils import check\n'), ((4773, 4820), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.DONES]', '[False, False]'], {}), '(batch[SampleBatch.DONES], [False, False])\n', (4778, 4820), False, 'from ray.rllib.utils.test_utils import check\n'), ((4829, 4893), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.REWARDS]', '[10.0 + gamma * 100.0, 100.0]'], {}), '(batch[SampleBatch.REWARDS], [10.0 + gamma * 100.0, 100.0])\n', (4834, 4893), False, 'from ray.rllib.utils.test_utils import check\n'), ((4902, 4944), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.NEXT_OBS]', '[2, 2]'], {}), '(batch[SampleBatch.NEXT_OBS], [2, 2])\n', (4907, 4944), False, 'from ray.rllib.utils.test_utils import check\n'), ((5243, 5258), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (5252, 5258), True, 'import numpy as np\n'), ((5349, 5384), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(3)'], {'size': '(7,)'}), '(-1, 3, size=(7,))\n', (5366, 5384), True, 'import numpy as np\n'), ((5568, 5730), 'ray.rllib.policy.sample_batch.SampleBatch', 'SampleBatch', (['{SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions, SampleBatch.REWARDS:\n rewards, SampleBatch.DONES: dones, SampleBatch.NEXT_OBS: next_obs}'], {}), '({SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions,\n SampleBatch.REWARDS: rewards, SampleBatch.DONES: dones, SampleBatch.\n NEXT_OBS: next_obs})\n', (5579, 5730), False, 'from ray.rllib.policy.sample_batch import SampleBatch\n'), ((5847, 5876), 'ray.rllib.evaluation.postprocessing.adjust_nstep', 'adjust_nstep', (['(4)', 'gamma', 'batch'], {}), '(4, gamma, batch)\n', (5859, 5876), False, 'from ray.rllib.evaluation.postprocessing import adjust_nstep, discount_cumsum\n'), ((5886, 5938), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.OBS]', '[0, 1, 2, 3, 4, 5, 6]'], {}), '(batch[SampleBatch.OBS], [0, 1, 2, 3, 4, 5, 6])\n', (5891, 5938), False, 'from ray.rllib.utils.test_utils import check\n'), ((5947, 5995), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.ACTIONS]', 'check_actions'], {}), '(batch[SampleBatch.ACTIONS], check_actions)\n', (5952, 5995), False, 'from ray.rllib.utils.test_utils import check\n'), ((6004, 6061), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.NEXT_OBS]', '[4, 5, 6, 7, 7, 7, 7]'], {}), '(batch[SampleBatch.NEXT_OBS], [4, 5, 6, 7, 7, 7, 7])\n', (6009, 6061), False, 'from ray.rllib.utils.test_utils import check\n'), ((6070, 6148), 'ray.rllib.utils.test_utils.check', 'check', (['batch[SampleBatch.DONES]', '[False, False, False, True, True, True, True]'], {}), '(batch[SampleBatch.DONES], [False, False, False, True, True, True, True])\n', (6075, 6148), False, 'from ray.rllib.utils.test_utils import check\n'), ((6785, 6814), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (6796, 6814), False, 'import pytest\n'), ((3947, 3976), 'ray.rllib.evaluation.postprocessing.adjust_nstep', 'adjust_nstep', (['(5)', 'gamma', 'batch'], {}), '(5, gamma, batch)\n', (3959, 3976), False, 'from ray.rllib.evaluation.postprocessing import adjust_nstep, discount_cumsum\n'), ((2687, 2709), 'numpy.array', 'np.array', (['rewards[0:4]'], {}), '(rewards[0:4])\n', (2695, 2709), True, 'import numpy as np\n'), ((2754, 2776), 'numpy.array', 'np.array', (['rewards[1:5]'], {}), '(rewards[1:5])\n', (2762, 2776), True, 'import numpy as np\n'), ((2821, 2843), 'numpy.array', 'np.array', (['rewards[2:6]'], {}), '(rewards[2:6])\n', (2829, 2843), True, 'import numpy as np\n'), ((2888, 2910), 'numpy.array', 'np.array', (['rewards[3:7]'], {}), '(rewards[3:7])\n', (2896, 2910), True, 'import numpy as np\n'), ((2955, 2976), 'numpy.array', 'np.array', (['rewards[4:]'], {}), '(rewards[4:])\n', (2963, 2976), True, 'import numpy as np\n'), ((3021, 3042), 'numpy.array', 'np.array', (['rewards[5:]'], {}), '(rewards[5:])\n', (3029, 3042), True, 'import numpy as np\n'), ((3087, 3108), 'numpy.array', 'np.array', (['rewards[6:]'], {}), '(rewards[6:])\n', (3095, 3108), True, 'import numpy as np\n'), ((6250, 6272), 'numpy.array', 'np.array', (['rewards[0:4]'], {}), '(rewards[0:4])\n', (6258, 6272), True, 'import numpy as np\n'), ((6317, 6339), 'numpy.array', 'np.array', (['rewards[1:5]'], {}), '(rewards[1:5])\n', (6325, 6339), True, 'import numpy as np\n'), ((6384, 6406), 'numpy.array', 'np.array', (['rewards[2:6]'], {}), '(rewards[2:6])\n', (6392, 6406), True, 'import numpy as np\n'), ((6451, 6473), 'numpy.array', 'np.array', (['rewards[3:7]'], {}), '(rewards[3:7])\n', (6459, 6473), True, 'import numpy as np\n'), ((6518, 6539), 'numpy.array', 'np.array', (['rewards[4:]'], {}), '(rewards[4:])\n', (6526, 6539), True, 'import numpy as np\n'), ((6584, 6605), 'numpy.array', 'np.array', (['rewards[5:]'], {}), '(rewards[5:])\n', (6592, 6605), True, 'import numpy as np\n'), ((6650, 6671), 'numpy.array', 'np.array', (['rewards[6:]'], {}), '(rewards[6:])\n', (6658, 6671), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 14:27:05 2020
@author: ricardoguimaraes
"""
import numpy as np
import pandas as pd
import geopandas as gpd
from gdf_heatmap import gdf_heatmap
from array_to_tiff import array_to_tiff
if '__main__' == __name__ :
from shapely.geometry import Point
import matplotlib.pyplot as plt
df = pd.DataFrame({'x': np.random.normal(-45, 8, size=(100)),
'y': np.random.normal(-4, 8, size=(100)),
'z': np.random.normal(-40, 4, size=(100))}
)
df['geometry'] = df.apply(lambda x: Point(x['x'], x['y']), axis=1)
gdf = gpd.GeoDataFrame(df)
Result = gdf_heatmap(gdf, df_column ='z',
dx=0.5, dy=0.5, verbose=True,
smooth=0.3,
function='gaussian')
array_to_tiff(Result['array'], Result['x'],
Result['y'],Result['dx'], Result['dy'],
to_file=r'C:\Users\lealp\Downloads\Temp\My_tiff')
input('Press any to close')
plt.close('all')
del Result
del gdf
|
[
"shapely.geometry.Point",
"matplotlib.pyplot.close",
"geopandas.GeoDataFrame",
"numpy.random.normal",
"gdf_heatmap.gdf_heatmap",
"array_to_tiff.array_to_tiff"
] |
[((665, 685), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['df'], {}), '(df)\n', (681, 685), True, 'import geopandas as gpd\n'), ((704, 802), 'gdf_heatmap.gdf_heatmap', 'gdf_heatmap', (['gdf'], {'df_column': '"""z"""', 'dx': '(0.5)', 'dy': '(0.5)', 'verbose': '(True)', 'smooth': '(0.3)', 'function': '"""gaussian"""'}), "(gdf, df_column='z', dx=0.5, dy=0.5, verbose=True, smooth=0.3,\n function='gaussian')\n", (715, 802), False, 'from gdf_heatmap import gdf_heatmap\n'), ((862, 1004), 'array_to_tiff.array_to_tiff', 'array_to_tiff', (["Result['array']", "Result['x']", "Result['y']", "Result['dx']", "Result['dy']"], {'to_file': '"""C:\\\\Users\\\\lealp\\\\Downloads\\\\Temp\\\\My_tiff"""'}), "(Result['array'], Result['x'], Result['y'], Result['dx'],\n Result['dy'], to_file='C:\\\\Users\\\\lealp\\\\Downloads\\\\Temp\\\\My_tiff')\n", (875, 1004), False, 'from array_to_tiff import array_to_tiff\n'), ((1068, 1084), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1077, 1084), True, 'import matplotlib.pyplot as plt\n'), ((380, 414), 'numpy.random.normal', 'np.random.normal', (['(-45)', '(8)'], {'size': '(100)'}), '(-45, 8, size=100)\n', (396, 414), True, 'import numpy as np\n'), ((447, 480), 'numpy.random.normal', 'np.random.normal', (['(-4)', '(8)'], {'size': '(100)'}), '(-4, 8, size=100)\n', (463, 480), True, 'import numpy as np\n'), ((513, 547), 'numpy.random.normal', 'np.random.normal', (['(-40)', '(4)'], {'size': '(100)'}), '(-40, 4, size=100)\n', (529, 547), True, 'import numpy as np\n'), ((619, 640), 'shapely.geometry.Point', 'Point', (["x['x']", "x['y']"], {}), "(x['x'], x['y'])\n", (624, 640), False, 'from shapely.geometry import Point\n')]
|
import numpy as np
from scipy.integrate import quad
import pandas as pd
# calculate the k-corrention in erg.s-1.cm-2:
def NE(E,Epeak,alpha,beita):
if (alpha-beita)*Epeak/(2+alpha)>=E:
NE=(E/100)**alpha*np.exp(-E*(2+alpha)/Epeak)
return NE
elif (alpha-beita)*Epeak/(2+alpha)<=E:
NE=(((alpha-beita)*Epeak/(100*(2+alpha)))**(alpha-beita)*np.exp(beita-alpha)*(E/100)**beita)
return NE
def k(Epeak,Z,alpha,beita,bandmin,bandmax):
a1=quad(lambda E:E*NE(E,Epeak,alpha,beita),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:E*NE(E,Epeak,alpha,beita),bandmin,bandmax)
k=a1[0]/a2[0]
return k
# calculate the k-corrention in photons.s-1.cm-2:
def nk(Epeak,Z,alpha,beita,bandmin,bandmax):
a1=quad(lambda E:E*NE(E,Epeak,alpha,beita),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:NE(E,Epeak,alpha,beita),bandmin,bandmax)
k=a1[0]/a2[0]
# return k
return k*1.6*10**(-9) #transform kev to erg
# calculate the luminosity distance
omegal=0.734
omegam=0.266
h=0.71
H0=1/(3.09*10**17)
H0yr=1/(9.78*10**9)
# H0=70*10**5
c=2.99792458*10**8
def dl(Z):
integrateportion=quad(lambda x:1/np.sqrt(omegam*(1+x)**3+omegal),0,Z)
dl=c*(1+Z)/(h*H0)*integrateportion[0]
# dl =c/H0*integrateportion[0]
return dl*10**2 # transform m to cm
#Calculate the opening angle
def seita(z,ep,s,alpha,beita,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*k(ep,z,alpha,beita,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
return seita
# calculate seita for photons.s-1.cm-2
def pseita(z,ep,s,alpha,beita,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*nk(ep,z,alpha,beita,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
return seita
#Calculate the Egamma
def egamma(z,ep):
Egama = (ep * (1 + z) / 10 ** 2.57) ** (1 / 0.61) * 3.8 * 10 ** 50
return Egama
#Calculate the Eiso
def eiso(z,ep,s,alpha,beita,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*k(ep,z,alpha,beita,bandmin,bandmax)/(1+z)
return eiso
#Define a new spectrum calculate method @2018.6.20 [the cases only contain 'alpha']
def alphaNE(E,Epeak,alpha):
NE=(E/100)**alpha*np.exp(-(2+alpha)*E/Epeak)
return NE
def alphaek(Epeak,alpha,Z,bandmin,bandmax):
a1=quad(lambda E:E*alphaNE(E,Epeak,alpha),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:E*alphaNE(E,Epeak,alpha),bandmin, bandmax)
k=a1[0]/a2[0]
return k
def alphapk(Epeak,alpha,Z,bandmin,bandmax):
a1=quad(lambda E:E*alphaNE(E,Epeak,alpha),1/(1+Z),10**4/(1+Z))
a2=quad(lambda E:alphaNE(E,Epeak,alpha),bandmin,bandmax)
k=a1[0]/a2[0]
return k*1.6*10**(-9)
def seitaerg6_20(z,ep,s,alpha,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*alphaek(ep,alpha,z,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
# k = alphaek(ep,alpha,z,bandmin, bandmax)
return seita,z,Egama
def seitaphoton6_20(z,ep,s,alpha,bandmin,bandmax):
eiso=4*np.pi*dl(z)**2*s*alphapk(ep,alpha,z,bandmin,bandmax)/(1+z)
Egama=(ep*(1+z)/10**2.57)**(1/0.61)*3.8*10**50
seitaradian=np.arccos(1-Egama/eiso)
seita=seitaradian/(2*np.pi)*360
# k=alphapk(ep,alpha,z,bandmin,bandmax)*(1/1.6)*10**(9)
return seita,z,Egama
#refer the 6.11 work:
def erg6_11():
df = pd.read_excel("/Users/dingding/Desktop/calculate/6.9/erg.xlsx")
ebandmin = df['bandmin']
ebandmax=df['bandmax']
egrbname=df['GRB']
ez=df['z']
eep=df['ep']
ealpha=df['alpha']
ebeta=df['beta']
efluence=df['fluence']
i=0
seita1=[]
eegamma=[]
for i in range(len(egrbname)):
seita1=np.append(seita1,seita(ez[i],eep[i],efluence[i],ealpha[i],ebeta[i],ebandmin[i],ebandmax[i]))
eegamma=np.append(eegamma,egamma(ez[i],eep[i]))
return seita1,ez,eegamma
def photon6_11():
dp = pd.read_excel("/Users/dingding/Desktop/calculate/6.9/photons.xlsx")
pbandmin = dp['bandmin']
pbandmax=dp['bandmax']
pgrbname=dp['GRB']
pz=dp['z']
pep=dp['ep']
palpha=dp['alpha']
pbeta=dp['beta']
pfluence=dp['fluence']
i=0
seita2=[]
pegamma=[]
for i in range(len(pgrbname)):
seita2=np.append(seita2,pseita(pz[i],pep[i],pfluence[i],palpha[i],pbeta[i],pbandmin[i],pbandmax[i]))
pegamma=np.append(pegamma,egamma(pz[i],pep[i]))
return seita2,pz,pegamma
#Calculate the Linear regression equation:
def linearregressionEQ(series1,series2):
up=[]
down=[]
xmean=np.mean(series1)
ymean=np.mean(series2)
for i in range(len(series1)):
up=np.append(up,series1[i]*series2[i]-len(series1)*xmean*ymean)
down=np.append(down,series1[i]**2-len(series1)*xmean**2)
u=np.sum(up)
d=np.sum(down)
b=u/d
a=ymean-b*xmean
return a,b
def linearnew(series1,series2):
up1=[]
up2=[]
up3=[]
up4=[]
down1=[]
down2=[]
for i in range(len(series1)):
up1=np.append(up1,series1[i]**2)
up2=np.append(up2,series2[i])
up3=np.append(up3,series1[i])
up4=np.append(up4,series1[i]*series2[i])
down1=np.append(down1,series1[i]**2)
down2=np.append(down2,series1[i])
up1=np.sum(up1)
up2=np.sum(up2)
up3=np.sum(up3)
up4=np.sum(up4)
down1=np.sum(down1)
down2=np.sum(down2)
up=up1*up2-up3*up4
down=down1*len(series1)-down2**2
a0=up/down
up=len(series1)*up4-up3*up2
down=len(series1)*down1-down2**2
a1=up/down
return a0,a1
# 8.31
# Define a model to describe the distribution of GRB with redshift z
# define the complete gamma function:
def comGammaFunc(v):
gamma=quad(lambda t:t**(v-1)*np.e**(-t),0,float("inf"))
return gamma[0]
#define the incomplete gamma function:
def incomGammaFunc(v,z):
sgamma=quad(lambda u:u**(v-1)*np.e**(-u),0,z)[0]
bgamma=quad(lambda u:u**(v-1)*np.e**(-u),z,float('inf'))[0]
return bgamma,sgamma
#and define the Seitafunction:
def SeitaFunc(eps,z,alpha,beta):
Seita1=incomGammaFunc(alpha+2,eps**beta*10**(0.15*beta*z))[1]
Seita2=comGammaFunc(alpha+2)
Seita=Seita1/Seita2
return Seita
# define the star formation rate segment function:
def RSFR(z):
zpeak=1
if z<=zpeak:
Rsfr=(1+z)**(3.44)
return Rsfr
elif z>=zpeak:
Rsfr=(1+zpeak)**(3.44)
return Rsfr
# define the grb rate function:
def RGRB(z,eps,alpha,beta,rho):
A=1/(33.30270146296203)
RGRB=A*rho*RSFR(z)*SeitaFunc(eps,z,alpha,beta)
return RGRB
#define a number calculate function without duration T
def N(z,eps,alpha,beta,rho,zmax):
convertfactor=c*3600*24*365*10**2*3.26164*10**9
dlgpc=dl(z)/convertfactor
E=np.sqrt(omegam*(1+z)**3+omegal)
n=RGRB(z,eps,alpha,beta,rho)/(1+z)*4*np.pi*c*dlgpc**2/(H0yr*(1+z)**2*E)
N=quad(lambda z:n,z,zmax)
return N[0]
import matplotlib.pyplot as plt
import matplotlib
import random
# 9.6
# Here during the defination, normalized constant A_{L} is ellipsis:
def Luminosityfunction(L_gamma):
L_critical=10**(49.69) #unit is erg
sigma_L=0.4
A_L=1/(1.7235434382660358e+50)
luminosityfunc=A_L*np.exp(-(np.log10(L_gamma)-np.log10(
L_critical))**2/(2*sigma_L**2))/(np.sqrt(2*np.pi)*sigma_L)
return luminosityfunc
# Define the angle distribution as log-normal distribution:
def thetalogdistri(theta_jet):
theta_critical=10**(-1.27)
sigema_theta=0.6
A_theta=1/0.32112249370542306
Psi=A_theta*np.exp(-(np.log10(theta_jet)-np.log10(theta_critical))**2/
(2*sigema_theta**2))/(np.sqrt(2*np.pi)*sigema_theta)
return Psi-0.22039824379156006-0.688381515339374
#-0.22039824379156006
# def Ntheta(thetamin,thetamax):
# N=quad(lambda theta_jet:thetalogdistri(theta_jet),thetamin,thetamax)
# return N[0]
# Define peak flux P:
def P(z,L_gamma,theta_jet):
L=L_gamma/(1-np.cos(theta_jet/180*np.pi))
C=random.uniform(0.1,1)
ep=200*(L/10**52)**0.5/C/(1+z)
P=L/(4*np.pi*dl(z)**2*nk(ep,z,-1.1,-2.2,15,150)) #15-150 kev of swift/BAT
return P
# BAT trigger probability:
def eta_t(P):
if P<0.45:
eta_t=P**2
return eta_t/0.67 #noamalize the probability of p-detectable
elif P>=0.45:
eta_t=0.67*(1.0-0.4/P)**0.52
return eta_t/0.67 #noamalize the probability of p-detectable
# weak dependence of probability on the observed peak flux:
def eta_z(P):
eta_z=0.26+0.032*np.e**(1.61*np.log10(P))
return eta_z
# the probability of alignment for a GRB with jet opening angle theta_{j}:
def eta_a(theta_jet):
# eta_a=1.4*(1-np.cos(theta_jet))/(4*np.pi) #where 1.4 sr is instrument solid angle
normal=1-np.cos(theta_jet)
return normal
# def Nluminus(z,theta_jet,Luminusmin,Luminusmax):
# N=quad(lambda L_gamma:eta_a(theta_jet)*eta_t(P(z,L_gamma,theta_jet)
# )*eta_z(P(z,L_gamma,theta_jet))*Luminosityfunction(L_gamma),
# Luminusmin,Luminusmax)
# return N[0]
def luminosity(z,s,t90):
l=4*np.pi*dl(z)**2*s*k(80,z,-1,-2.5,15,150)*(1+z)/t90
return l
def P_obseved(z,s,t90):
l=luminosity(z,s,t90)
p=l/(4*np.pi*dl(z)**2*nk(80,z,-1,-2.5,15,150))
return p
def pdflog(series,down,up,num):
step=(up-down)/num
pdf=[]
for i in range(num):
counter=0
for j in range(len(series)):
if 10**(down+i*step)<series[j]<10**(down+(i+1)*step):
counter=counter+1
pdf=np.append(pdf,counter)
pdf=pdf/np.sum(pdf)
return pdf
# #Define a operation to delete the 'nan' element:
# def deletenan(series1,series2):
# series=np.append(series1,series2)
# a=series[:len(series):3]
# b=series[1:len(series):3]
# c=series[2:len(series):3]
# a=np.nan_to_num(a)
# itemindex=np.argwhere(a==0)
# a=np.delete(a,itemindex,axis=0)
# b=np.delete(b,itemindex,axis=0)
# c=np.delete(c,itemindex,axis=0)
# return a,b,c
|
[
"numpy.sum",
"scipy.integrate.quad",
"random.uniform",
"pandas.read_excel",
"numpy.append",
"numpy.mean",
"numpy.exp",
"numpy.cos",
"numpy.log10",
"numpy.arccos",
"numpy.sqrt"
] |
[((1513, 1540), 'numpy.arccos', 'np.arccos', (['(1 - Egama / eiso)'], {}), '(1 - Egama / eiso)\n', (1522, 1540), True, 'import numpy as np\n'), ((1816, 1843), 'numpy.arccos', 'np.arccos', (['(1 - Egama / eiso)'], {}), '(1 - Egama / eiso)\n', (1825, 1843), True, 'import numpy as np\n'), ((2965, 2992), 'numpy.arccos', 'np.arccos', (['(1 - Egama / eiso)'], {}), '(1 - Egama / eiso)\n', (2974, 2992), True, 'import numpy as np\n'), ((3286, 3313), 'numpy.arccos', 'np.arccos', (['(1 - Egama / eiso)'], {}), '(1 - Egama / eiso)\n', (3295, 3313), True, 'import numpy as np\n'), ((3478, 3541), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/dingding/Desktop/calculate/6.9/erg.xlsx"""'], {}), "('/Users/dingding/Desktop/calculate/6.9/erg.xlsx')\n", (3491, 3541), True, 'import pandas as pd\n'), ((4017, 4084), 'pandas.read_excel', 'pd.read_excel', (['"""/Users/dingding/Desktop/calculate/6.9/photons.xlsx"""'], {}), "('/Users/dingding/Desktop/calculate/6.9/photons.xlsx')\n", (4030, 4084), True, 'import pandas as pd\n'), ((4651, 4667), 'numpy.mean', 'np.mean', (['series1'], {}), '(series1)\n', (4658, 4667), True, 'import numpy as np\n'), ((4678, 4694), 'numpy.mean', 'np.mean', (['series2'], {}), '(series2)\n', (4685, 4694), True, 'import numpy as np\n'), ((4872, 4882), 'numpy.sum', 'np.sum', (['up'], {}), '(up)\n', (4878, 4882), True, 'import numpy as np\n'), ((4889, 4901), 'numpy.sum', 'np.sum', (['down'], {}), '(down)\n', (4895, 4901), True, 'import numpy as np\n'), ((5345, 5356), 'numpy.sum', 'np.sum', (['up1'], {}), '(up1)\n', (5351, 5356), True, 'import numpy as np\n'), ((5365, 5376), 'numpy.sum', 'np.sum', (['up2'], {}), '(up2)\n', (5371, 5376), True, 'import numpy as np\n'), ((5385, 5396), 'numpy.sum', 'np.sum', (['up3'], {}), '(up3)\n', (5391, 5396), True, 'import numpy as np\n'), ((5405, 5416), 'numpy.sum', 'np.sum', (['up4'], {}), '(up4)\n', (5411, 5416), True, 'import numpy as np\n'), ((5427, 5440), 'numpy.sum', 'np.sum', (['down1'], {}), '(down1)\n', (5433, 5440), True, 'import numpy as np\n'), ((5451, 5464), 'numpy.sum', 'np.sum', (['down2'], {}), '(down2)\n', (5457, 5464), True, 'import numpy as np\n'), ((6845, 6884), 'numpy.sqrt', 'np.sqrt', (['(omegam * (1 + z) ** 3 + omegal)'], {}), '(omegam * (1 + z) ** 3 + omegal)\n', (6852, 6884), True, 'import numpy as np\n'), ((6959, 6985), 'scipy.integrate.quad', 'quad', (['(lambda z: n)', 'z', 'zmax'], {}), '(lambda z: n, z, zmax)\n', (6963, 6985), False, 'from scipy.integrate import quad\n'), ((8051, 8073), 'random.uniform', 'random.uniform', (['(0.1)', '(1)'], {}), '(0.1, 1)\n', (8065, 8073), False, 'import random\n'), ((2314, 2346), 'numpy.exp', 'np.exp', (['(-(2 + alpha) * E / Epeak)'], {}), '(-(2 + alpha) * E / Epeak)\n', (2320, 2346), True, 'import numpy as np\n'), ((5096, 5127), 'numpy.append', 'np.append', (['up1', '(series1[i] ** 2)'], {}), '(up1, series1[i] ** 2)\n', (5105, 5127), True, 'import numpy as np\n'), ((5137, 5163), 'numpy.append', 'np.append', (['up2', 'series2[i]'], {}), '(up2, series2[i])\n', (5146, 5163), True, 'import numpy as np\n'), ((5175, 5201), 'numpy.append', 'np.append', (['up3', 'series1[i]'], {}), '(up3, series1[i])\n', (5184, 5201), True, 'import numpy as np\n'), ((5213, 5252), 'numpy.append', 'np.append', (['up4', '(series1[i] * series2[i])'], {}), '(up4, series1[i] * series2[i])\n', (5222, 5252), True, 'import numpy as np\n'), ((5264, 5297), 'numpy.append', 'np.append', (['down1', '(series1[i] ** 2)'], {}), '(down1, series1[i] ** 2)\n', (5273, 5297), True, 'import numpy as np\n'), ((5309, 5337), 'numpy.append', 'np.append', (['down2', 'series1[i]'], {}), '(down2, series1[i])\n', (5318, 5337), True, 'import numpy as np\n'), ((5947, 5994), 'scipy.integrate.quad', 'quad', (['(lambda u: u ** (v - 1) * np.e ** -u)', '(0)', 'z'], {}), '(lambda u: u ** (v - 1) * np.e ** -u, 0, z)\n', (5951, 5994), False, 'from scipy.integrate import quad\n'), ((8833, 8850), 'numpy.cos', 'np.cos', (['theta_jet'], {}), '(theta_jet)\n', (8839, 8850), True, 'import numpy as np\n'), ((9602, 9625), 'numpy.append', 'np.append', (['pdf', 'counter'], {}), '(pdf, counter)\n', (9611, 9625), True, 'import numpy as np\n'), ((9637, 9648), 'numpy.sum', 'np.sum', (['pdf'], {}), '(pdf)\n', (9643, 9648), True, 'import numpy as np\n'), ((216, 248), 'numpy.exp', 'np.exp', (['(-E * (2 + alpha) / Epeak)'], {}), '(-E * (2 + alpha) / Epeak)\n', (222, 248), True, 'import numpy as np\n'), ((7376, 7394), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (7383, 7394), True, 'import numpy as np\n'), ((7717, 7735), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (7724, 7735), True, 'import numpy as np\n'), ((8016, 8047), 'numpy.cos', 'np.cos', (['(theta_jet / 180 * np.pi)'], {}), '(theta_jet / 180 * np.pi)\n', (8022, 8047), True, 'import numpy as np\n'), ((1138, 1177), 'numpy.sqrt', 'np.sqrt', (['(omegam * (1 + x) ** 3 + omegal)'], {}), '(omegam * (1 + x) ** 3 + omegal)\n', (1145, 1177), True, 'import numpy as np\n'), ((369, 390), 'numpy.exp', 'np.exp', (['(beita - alpha)'], {}), '(beita - alpha)\n', (375, 390), True, 'import numpy as np\n'), ((8600, 8611), 'numpy.log10', 'np.log10', (['P'], {}), '(P)\n', (8608, 8611), True, 'import numpy as np\n'), ((7307, 7324), 'numpy.log10', 'np.log10', (['L_gamma'], {}), '(L_gamma)\n', (7315, 7324), True, 'import numpy as np\n'), ((7325, 7345), 'numpy.log10', 'np.log10', (['L_critical'], {}), '(L_critical)\n', (7333, 7345), True, 'import numpy as np\n'), ((7630, 7649), 'numpy.log10', 'np.log10', (['theta_jet'], {}), '(theta_jet)\n', (7638, 7649), True, 'import numpy as np\n'), ((7650, 7674), 'numpy.log10', 'np.log10', (['theta_critical'], {}), '(theta_critical)\n', (7658, 7674), True, 'import numpy as np\n')]
|
"""
Fixed Maximum Cost (FMC) baseline
"""
import logging
from collections import defaultdict
from typing import Tuple, List
import time
import numpy as np
from pup.algorithms import privacy_helper
from pup.algorithms.uniform_prior import cal_prob_dists_num_users_for_grid
from pup.algorithms.util import get_linear_profit_fixed_cost
from pup.common.datatypes import CheckinDataset
from pup.common.enums import MethodType
from pup.common.grid import Grid
from pup.config import Config
from pup.experiment import exp_util
from pup.io import dataio
logger = logging.getLogger(__name__)
def exe_fixed_maximum_cost(data: CheckinDataset, grid: Grid) -> Tuple[List[List], float, np.ndarray, float]:
"""
Execute Fixed Maximum Cost method
Parameters
----------
data
check-in dataset
grid
the grid for experiment evaluation
Returns
-------
typing.List[typing.List]
the matrix of probability distributions of the number of users for each grid cell
total_cost: float
total cost spent on buying data
costs: numpy.ndarray
costs of each region
exe_time: float
execution time
"""
s_time = time.time()
logger.info('Starting FIXED MAXIMUM COST method')
# Load config
price_from_noise_rate = Config.price_from_noise_func_rate
std_from_noise_initial_value = Config.standard_deviation_from_noise_func_initial_value
std_from_noise_rate = Config.standard_deviation_from_noise_func_rate
final_probs_filter_type = Config.final_probs_filter_type
budget_per_region = get_fmc_budget()
# START FMC ---------------------
logger.info('Budget = {}'.format(budget_per_region))
noisy_data, remain_budget_per_region = buy_data_with_budget(
budget_per_region, data,
price_from_noise_rate, std_from_noise_initial_value, std_from_noise_rate)
logger.info('Prepare {} noisy data point with normal random variables'.format(len(noisy_data)))
num_regions = np.prod(grid.get_shape())
cost = budget_per_region - remain_budget_per_region
costs = np.zeros(grid.get_shape())
costs.fill(cost)
total_cost = cost * num_regions
logger.info('Total cost spent on buying data = {}'.format(total_cost))
# Run experiment on the entire grid. One can run on single region by using 1x1 grid
# Calculate the probability distributions of the number of each grid cell
dists_of_num_users = cal_prob_dists_num_users_for_grid(grid, noisy_data, final_probs_filter_type)
exe_time = time.time() - s_time
return dists_of_num_users, total_cost, costs, exe_time
# END FMC ---------------------
def get_fmc_budget() -> float:
""" Get budget for FMC
- First, get based on given percentage
- Second, get based on probing costs if percentage is not given
- Third, get based on a fixed budget if others are not available
Returns
-------
float
budget
"""
fmc_budget_from_cost_percentage = Config.fmc_budget_from_cost_percentage
if fmc_budget_from_cost_percentage <= 0:
# we will not get budget from percentage of the fixed cost
fmc_budget_from_probing = Config.fmc_budget_from_probing
if fmc_budget_from_probing:
# we get budget from costs of SIP
costs = dataio.read_costs(MethodType.PROBING)
budget = int(np.average(costs)) + 1
else:
# we used a fixed budget
budget = Config.budget # prepare budget
else:
# get budget from the percentage of the fixed cost
budget = get_linear_profit_fixed_cost() * fmc_budget_from_cost_percentage / 100.0
return budget
def buy_data_with_budget(budget: float, data: CheckinDataset,
price_from_noise_rate: float,
std_from_noise_initial_value: float,
std_from_noise_rate: float) -> Tuple[CheckinDataset, float]:
""" Buy data points with a given total budget.
Each data point would be given the same amount of budget.
For a particular data point, the budget may be more than enough to buy it without perturbation.
So there can be some budget left. This budget is not used for other data points.
Parameters
----------
budget
maximum budget
data
the dataset to buy data from
price_from_noise_rate
rate of price from noise exponential function
std_from_noise_initial_value
initial value of standard deviation from noise exponential function, i.e. when input values is approx 0
std_from_noise_rate
rate of standard deviation from noise exponential function
Returns
-------
noisy_data: CheckinDataset
noisy data bought
remain_budget: float
remain budget
"""
# calculate the price to pay for each data point
num_data_points = exp_util.cal_num_data_points(data)
price_per_data_point = budget / float(num_data_points)
logger.info('Price per data point = {}'.format(price_per_data_point))
# buy noisy data
remain_budget = 0
noisy_data = defaultdict(defaultdict)
for user, checkins in data.items():
for c_id, c in checkins.items():
noisy_c = privacy_helper.buy_data_at_price(
c, price_per_data_point, price_from_noise_rate, std_from_noise_initial_value, std_from_noise_rate)
noisy_data[user][c_id] = noisy_c
if c.combined_privacy_value < price_per_data_point:
remain_budget += price_per_data_point - c.combined_privacy_value
logger.info('Remain budget for region = {}'.format(remain_budget))
return noisy_data, remain_budget
|
[
"numpy.average",
"pup.io.dataio.read_costs",
"pup.algorithms.util.get_linear_profit_fixed_cost",
"pup.algorithms.privacy_helper.buy_data_at_price",
"pup.algorithms.uniform_prior.cal_prob_dists_num_users_for_grid",
"time.time",
"pup.experiment.exp_util.cal_num_data_points",
"collections.defaultdict",
"logging.getLogger"
] |
[((559, 586), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (576, 586), False, 'import logging\n'), ((1185, 1196), 'time.time', 'time.time', ([], {}), '()\n', (1194, 1196), False, 'import time\n'), ((2441, 2517), 'pup.algorithms.uniform_prior.cal_prob_dists_num_users_for_grid', 'cal_prob_dists_num_users_for_grid', (['grid', 'noisy_data', 'final_probs_filter_type'], {}), '(grid, noisy_data, final_probs_filter_type)\n', (2474, 2517), False, 'from pup.algorithms.uniform_prior import cal_prob_dists_num_users_for_grid\n'), ((4879, 4913), 'pup.experiment.exp_util.cal_num_data_points', 'exp_util.cal_num_data_points', (['data'], {}), '(data)\n', (4907, 4913), False, 'from pup.experiment import exp_util\n'), ((5108, 5132), 'collections.defaultdict', 'defaultdict', (['defaultdict'], {}), '(defaultdict)\n', (5119, 5132), False, 'from collections import defaultdict\n'), ((2534, 2545), 'time.time', 'time.time', ([], {}), '()\n', (2543, 2545), False, 'import time\n'), ((3307, 3344), 'pup.io.dataio.read_costs', 'dataio.read_costs', (['MethodType.PROBING'], {}), '(MethodType.PROBING)\n', (3324, 3344), False, 'from pup.io import dataio\n'), ((5236, 5371), 'pup.algorithms.privacy_helper.buy_data_at_price', 'privacy_helper.buy_data_at_price', (['c', 'price_per_data_point', 'price_from_noise_rate', 'std_from_noise_initial_value', 'std_from_noise_rate'], {}), '(c, price_per_data_point,\n price_from_noise_rate, std_from_noise_initial_value, std_from_noise_rate)\n', (5268, 5371), False, 'from pup.algorithms import privacy_helper\n'), ((3583, 3613), 'pup.algorithms.util.get_linear_profit_fixed_cost', 'get_linear_profit_fixed_cost', ([], {}), '()\n', (3611, 3613), False, 'from pup.algorithms.util import get_linear_profit_fixed_cost\n'), ((3370, 3387), 'numpy.average', 'np.average', (['costs'], {}), '(costs)\n', (3380, 3387), True, 'import numpy as np\n')]
|
from Genome.NN.Layer import Layer
import numpy as np
import pickle
class Brain:
def __init__(self, brain_structure):
self.brain_structure = brain_structure
self.layers = []
self.id = 0
# First layer added here
ids = []
genes = []
for i in range(brain_structure[0]):
ids.append(self.id)
self.id += 1
genes.append([np.random.rand(brain_structure[1]), np.random.rand(brain_structure[1]), np.random.rand(brain_structure[1])])
layer = Layer(ids)
layer.set_genes(genes)
self.layers.append(layer)
for i in range(1, len(brain_structure)):
if i == (len(brain_structure) - 1):
self.add_last_layer(brain_structure[-1])
else:
self.add_random_layer(brain_structure[i], brain_structure[i + 1])
def add_random_layer(self, node_count, next_node_count):
ids = []
genes = []
for i in range(node_count):
ids.append(self.id)
self.id += 1
genes.append([np.random.rand(next_node_count), np.random.rand(next_node_count), np.random.rand(next_node_count)])
layer = Layer(ids)
layer.set_genes(genes)
self.layers[-1].add_layer_connections(layer)
self.layers.append(layer)
def add_last_layer(self, node_count):
ids = []
for i in range(node_count):
ids.append(self.id)
self.id += 1
layer = Layer(ids)
self.layers[-1].add_layer_connections(layer)
self.layers.append(layer)
def set_data(self, data):
self.layers[0].set_layer_input(data)
def feed_forward(self):
for l in range(len(self.layers)):
if l != 0:
self.layers[l].normalize()
self.layers[l].feed_forward()
def make_bebe(self, partner, mutation_rate):
bebe = Brain(self.brain_structure)
for i in range(len(self.layers)):
bebe.layers[i] = self.layers[i].make_bebe(partner.layers[i], bebe.layers[i], mutation_rate)
return bebe
def get_answer(self):
return self.layers[-1].get_layer_input()
def save_model(self, file):
with open(file, 'wb') as config_dictionary_file:
pickle.dump(self, config_dictionary_file)
@staticmethod
def load_model(file):
with open(file, 'rb') as config_dictionary_file:
brain = pickle.load(config_dictionary_file)
return brain
def print_genes(self):
print("The genes od the brain")
for layer in self.layers:
print(layer.get_genes())
#
# brain = Brain(16, 32)
# brain.add_random_layer(32, 32)
# brain.add_random_layer(32, 48)
# brain.add_last_layer(2)
# brain.save_model("../Models/first_baby")
# brain = Brain.load_model("../Models/first_baby")
# print(len(brain.layers))
# brain.print_genes()
#
# brain.set_data(list(range(0, 16)))
# brain.feed_forward()
# print(brain.get_answer())
|
[
"numpy.random.rand",
"pickle.dump",
"pickle.load",
"Genome.NN.Layer.Layer"
] |
[((537, 547), 'Genome.NN.Layer.Layer', 'Layer', (['ids'], {}), '(ids)\n', (542, 547), False, 'from Genome.NN.Layer import Layer\n'), ((1202, 1212), 'Genome.NN.Layer.Layer', 'Layer', (['ids'], {}), '(ids)\n', (1207, 1212), False, 'from Genome.NN.Layer import Layer\n'), ((1500, 1510), 'Genome.NN.Layer.Layer', 'Layer', (['ids'], {}), '(ids)\n', (1505, 1510), False, 'from Genome.NN.Layer import Layer\n'), ((2290, 2331), 'pickle.dump', 'pickle.dump', (['self', 'config_dictionary_file'], {}), '(self, config_dictionary_file)\n', (2301, 2331), False, 'import pickle\n'), ((2454, 2489), 'pickle.load', 'pickle.load', (['config_dictionary_file'], {}), '(config_dictionary_file)\n', (2465, 2489), False, 'import pickle\n'), ((412, 446), 'numpy.random.rand', 'np.random.rand', (['brain_structure[1]'], {}), '(brain_structure[1])\n', (426, 446), True, 'import numpy as np\n'), ((448, 482), 'numpy.random.rand', 'np.random.rand', (['brain_structure[1]'], {}), '(brain_structure[1])\n', (462, 482), True, 'import numpy as np\n'), ((484, 518), 'numpy.random.rand', 'np.random.rand', (['brain_structure[1]'], {}), '(brain_structure[1])\n', (498, 518), True, 'import numpy as np\n'), ((1086, 1117), 'numpy.random.rand', 'np.random.rand', (['next_node_count'], {}), '(next_node_count)\n', (1100, 1117), True, 'import numpy as np\n'), ((1119, 1150), 'numpy.random.rand', 'np.random.rand', (['next_node_count'], {}), '(next_node_count)\n', (1133, 1150), True, 'import numpy as np\n'), ((1152, 1183), 'numpy.random.rand', 'np.random.rand', (['next_node_count'], {}), '(next_node_count)\n', (1166, 1183), True, 'import numpy as np\n')]
|
# !/usr/bin/python3
# coding: utf-8
# Copyright 2015-2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import cv2
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from wand.image import Image as WandImage
from scipy.ndimage import interpolation as inter
from receipt_parser_core import Receipt
from receipt_parser_core.config import read_config
BASE_PATH = os.getcwd()
INPUT_FOLDER = os.path.join(BASE_PATH, "data/img")
TMP_FOLDER = os.path.join(BASE_PATH, "data/tmp")
OUTPUT_FOLDER = os.path.join(BASE_PATH, "data/txt")
ORANGE = '\033[33m'
RESET = '\033[0m'
def prepare_folders():
"""
:return: void
Creates necessary folders
"""
for folder in [
INPUT_FOLDER, TMP_FOLDER, OUTPUT_FOLDER
]:
if not os.path.exists(folder):
os.makedirs(folder)
def find_images(folder):
"""
:param folder: str
Path to folder to search
:return: generator of str
List of images in folder
"""
for file in os.listdir(folder):
full_path = os.path.join(folder, file)
if os.path.isfile(full_path):
try:
_ = Image.open(full_path) # if constructor succeeds
yield file
except:
pass
def rotate_image(input_file, output_file, angle=90):
"""
:param input_file: str
Path to image to rotate
:param output_file: str
Path to output image
:param angle: float
Angle to rotate
:return: void
Rotates image and saves result
"""
with WandImage(filename=input_file) as img:
width, height = img.size
if width < height:
angle = 0
print(ORANGE + '\t~: ' + RESET + 'Rotate image by: ' + str(angle) + "°" + RESET)
with img.clone() as rotated:
rotated.rotate(angle)
rotated.save(filename=output_file)
def deskew_image(image, delta=1, limit=5):
def determine_score(arr, angle):
data = inter.rotate(arr, angle, reshape=False, order=0)
histogram = np.sum(data, axis=1)
score = np.sum((histogram[1:] - histogram[:-1]) ** 2)
return histogram, score
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
scores = []
angles = np.arange(-limit, limit + delta, delta)
for angle in angles:
histogram, score = determine_score(thresh, angle)
scores.append(score)
best_angle = angles[scores.index(max(scores))]
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, best_angle, 1.0)
print(ORANGE + '\t~: ' + RESET + 'Deskew image by: ' + str(best_angle) + ' angle' + RESET)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return rotated
def run_tesseract(input_file, output_file, language="deu"):
"""
:param input_file: str
Path to image to OCR
:param output_file: str
Path to output file
:return: void
Runs tesseract on image and saves result
"""
print(ORANGE + '\t~: ' + RESET + 'Parse image using pytesseract' + RESET)
print(ORANGE + '\t~: ' + RESET + 'Parse image at: ' + input_file + RESET)
print(ORANGE + '\t~: ' + RESET + 'Write result to: ' + output_file + RESET)
with io.BytesIO() as transfer:
with WandImage(filename=input_file) as img:
img.save(transfer)
with Image.open(transfer) as img:
image_data = pytesseract.image_to_string(img, lang=language, timeout=60, config="--psm 6")
out = open(output_file, "w", encoding='utf-8')
out.write(image_data)
out.close()
def rescale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Rescale image' + RESET)
img = cv2.resize(img, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)
return img
def grayscale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Grayscale image' + RESET)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def remove_noise(img):
kernel = np.ones((1, 1), np.uint8)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=1)
print(ORANGE + '\t~: ' + RESET + 'Applying gaussianBlur and medianBlur' + RESET)
img = cv2.threshold(cv2.GaussianBlur(img, (5, 5), 0), 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.threshold(cv2.bilateralFilter(img, 5, 75, 75), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.adaptiveThreshold(cv2.bilateralFilter(img, 9, 75, 75), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 31, 2)
return img
def remove_shadows(img):
rgb_planes = cv2.split(img)
result_planes = []
result_norm_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
result_planes.append(diff_img)
result_norm_planes.append(norm_img)
result = cv2.merge(result_planes)
return result
def detect_orientation(image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
print(ORANGE + '\t~: ' + RESET + 'Get rotation angle:' + str(angle) + RESET)
return image
def enhance_image(img, tmp_path ,high_contrast=True, gaussian_blur=True, rotate=True):
img = rescale_image(img)
if rotate:
cv2.imwrite(tmp_path, img)
rotate_image(tmp_path, tmp_path)
img = cv2.imread(tmp_path)
img = deskew_image(img)
img = remove_shadows(img)
if high_contrast:
img = grayscale_image(img)
if gaussian_blur:
img = remove_noise(img)
return img
def process_receipt(config, filename, rotate=True, grayscale=True, gaussian_blur=True):
input_path = INPUT_FOLDER + "/" + filename
output_path = OUTPUT_FOLDER + "/" + filename.split(".")[0] + ".txt"
print(ORANGE + '~: ' + RESET + 'Process image: ' + ORANGE + input_path + RESET)
prepare_folders()
try:
img = cv2.imread(input_path)
except FileNotFoundError:
return Receipt(config=config, raw="")
tmp_path = os.path.join(
TMP_FOLDER, filename
)
img = enhance_image(img, tmp_path,grayscale, gaussian_blur)
print(ORANGE + '~: ' + RESET + 'Temporary store image at: ' + ORANGE + tmp_path + RESET)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, output_path, config.language)
print(ORANGE + '~: ' + RESET + 'Store parsed text at: ' + ORANGE + output_path + RESET)
raw = open(output_path, 'r').readlines()
return Receipt(config=config, raw=raw)
def main():
prepare_folders()
dir_path = os.getcwd()
config = read_config(config=dir_path + "/config.yml")
images = list(find_images(INPUT_FOLDER))
print(ORANGE + '~: ' + RESET + 'Found: ' + ORANGE + str(len(images)),
RESET + ' images in: ' + ORANGE + INPUT_FOLDER + RESET)
i = 1
for image in images:
input_path = os.path.join(
INPUT_FOLDER,
image
)
tmp_path = os.path.join(
TMP_FOLDER,
image
)
out_path = os.path.join(
OUTPUT_FOLDER,
image + ".txt"
)
if i != 1: print()
print(ORANGE + '~: ' + RESET + 'Process image (' + ORANGE + str(i) + '/' + str(
len(images)) + RESET + ') : ' + input_path + RESET)
img = cv2.imread(input_path)
img = enhance_image(img, tmp_path)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, out_path, config.language)
i = i + 1
if __name__ == '__main__':
main()
|
[
"cv2.GaussianBlur",
"numpy.sum",
"cv2.medianBlur",
"numpy.ones",
"cv2.bilateralFilter",
"cv2.warpAffine",
"os.path.isfile",
"numpy.arange",
"cv2.minAreaRect",
"cv2.normalize",
"cv2.erode",
"cv2.absdiff",
"wand.image.Image",
"cv2.getRotationMatrix2D",
"os.path.join",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"cv2.split",
"cv2.resize",
"io.BytesIO",
"receipt_parser_core.config.read_config",
"cv2.merge",
"os.listdir",
"pytesseract.pytesseract.image_to_string",
"receipt_parser_core.Receipt",
"os.makedirs",
"os.getcwd",
"cv2.threshold",
"scipy.ndimage.interpolation.rotate",
"PIL.Image.open",
"cv2.imread",
"numpy.where"
] |
[((906, 917), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (915, 917), False, 'import os\n'), ((933, 968), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""data/img"""'], {}), "(BASE_PATH, 'data/img')\n", (945, 968), False, 'import os\n'), ((982, 1017), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""data/tmp"""'], {}), "(BASE_PATH, 'data/tmp')\n", (994, 1017), False, 'import os\n'), ((1034, 1069), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""data/txt"""'], {}), "(BASE_PATH, 'data/txt')\n", (1046, 1069), False, 'import os\n'), ((1528, 1546), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1538, 1546), False, 'import os\n'), ((2709, 2748), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2721, 2748), False, 'import cv2\n'), ((2865, 2904), 'numpy.arange', 'np.arange', (['(-limit)', '(limit + delta)', 'delta'], {}), '(-limit, limit + delta, delta)\n', (2874, 2904), True, 'import numpy as np\n'), ((3137, 3185), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center', 'best_angle', '(1.0)'], {}), '(center, best_angle, 1.0)\n', (3160, 3185), False, 'import cv2\n'), ((3297, 3390), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {'flags': 'cv2.INTER_CUBIC', 'borderMode': 'cv2.BORDER_REPLICATE'}), '(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.\n BORDER_REPLICATE)\n', (3311, 3390), False, 'import cv2\n'), ((4396, 4464), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': '(1.2)', 'fy': '(1.2)', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)\n', (4406, 4464), False, 'import cv2\n'), ((4582, 4619), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4594, 4619), False, 'import cv2\n'), ((4673, 4698), 'numpy.ones', 'np.ones', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (4680, 4698), True, 'import numpy as np\n'), ((4709, 4746), 'cv2.dilate', 'cv2.dilate', (['img', 'kernel'], {'iterations': '(1)'}), '(img, kernel, iterations=1)\n', (4719, 4746), False, 'import cv2\n'), ((4757, 4793), 'cv2.erode', 'cv2.erode', (['img', 'kernel'], {'iterations': '(1)'}), '(img, kernel, iterations=1)\n', (4766, 4793), False, 'import cv2\n'), ((5322, 5336), 'cv2.split', 'cv2.split', (['img'], {}), '(img)\n', (5331, 5336), False, 'import cv2\n'), ((5795, 5819), 'cv2.merge', 'cv2.merge', (['result_planes'], {}), '(result_planes)\n', (5804, 5819), False, 'import cv2\n'), ((6951, 6985), 'os.path.join', 'os.path.join', (['TMP_FOLDER', 'filename'], {}), '(TMP_FOLDER, filename)\n', (6963, 6985), False, 'import os\n'), ((7163, 7189), 'cv2.imwrite', 'cv2.imwrite', (['tmp_path', 'img'], {}), '(tmp_path, img)\n', (7174, 7189), False, 'import cv2\n'), ((7398, 7429), 'receipt_parser_core.Receipt', 'Receipt', ([], {'config': 'config', 'raw': 'raw'}), '(config=config, raw=raw)\n', (7405, 7429), False, 'from receipt_parser_core import Receipt\n'), ((7482, 7493), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7491, 7493), False, 'import os\n'), ((7507, 7551), 'receipt_parser_core.config.read_config', 'read_config', ([], {'config': "(dir_path + '/config.yml')"}), "(config=dir_path + '/config.yml')\n", (7518, 7551), False, 'from receipt_parser_core.config import read_config\n'), ((1568, 1594), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (1580, 1594), False, 'import os\n'), ((1606, 1631), 'os.path.isfile', 'os.path.isfile', (['full_path'], {}), '(full_path)\n', (1620, 1631), False, 'import os\n'), ((2088, 2118), 'wand.image.Image', 'WandImage', ([], {'filename': 'input_file'}), '(filename=input_file)\n', (2097, 2118), True, 'from wand.image import Image as WandImage\n'), ((2513, 2561), 'scipy.ndimage.interpolation.rotate', 'inter.rotate', (['arr', 'angle'], {'reshape': '(False)', 'order': '(0)'}), '(arr, angle, reshape=False, order=0)\n', (2525, 2561), True, 'from scipy.ndimage import interpolation as inter\n'), ((2582, 2602), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (2588, 2602), True, 'import numpy as np\n'), ((2619, 2664), 'numpy.sum', 'np.sum', (['((histogram[1:] - histogram[:-1]) ** 2)'], {}), '((histogram[1:] - histogram[:-1]) ** 2)\n', (2625, 2664), True, 'import numpy as np\n'), ((2762, 2830), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (2775, 2830), False, 'import cv2\n'), ((3925, 3937), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3935, 3937), False, 'import io\n'), ((5130, 5165), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img', '(9)', '(75)', '(75)'], {}), '(img, 9, 75, 75)\n', (5149, 5165), False, 'import cv2\n'), ((5501, 5532), 'cv2.medianBlur', 'cv2.medianBlur', (['dilated_img', '(21)'], {}), '(dilated_img, 21)\n', (5515, 5532), False, 'import cv2\n'), ((5604, 5702), 'cv2.normalize', 'cv2.normalize', (['diff_img', 'None'], {'alpha': '(0)', 'beta': '(255)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_8UC1'}), '(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_8UC1)\n', (5617, 5702), False, 'import cv2\n'), ((5901, 5920), 'numpy.where', 'np.where', (['(image > 0)'], {}), '(image > 0)\n', (5909, 5920), True, 'import numpy as np\n'), ((5934, 5957), 'cv2.minAreaRect', 'cv2.minAreaRect', (['coords'], {}), '(coords)\n', (5949, 5957), False, 'import cv2\n'), ((6204, 6230), 'cv2.imwrite', 'cv2.imwrite', (['tmp_path', 'img'], {}), '(tmp_path, img)\n', (6215, 6230), False, 'import cv2\n'), ((6286, 6306), 'cv2.imread', 'cv2.imread', (['tmp_path'], {}), '(tmp_path)\n', (6296, 6306), False, 'import cv2\n'), ((6836, 6858), 'cv2.imread', 'cv2.imread', (['input_path'], {}), '(input_path)\n', (6846, 6858), False, 'import cv2\n'), ((7795, 7828), 'os.path.join', 'os.path.join', (['INPUT_FOLDER', 'image'], {}), '(INPUT_FOLDER, image)\n', (7807, 7828), False, 'import os\n'), ((7882, 7913), 'os.path.join', 'os.path.join', (['TMP_FOLDER', 'image'], {}), '(TMP_FOLDER, image)\n', (7894, 7913), False, 'import os\n'), ((7968, 8011), 'os.path.join', 'os.path.join', (['OUTPUT_FOLDER', "(image + '.txt')"], {}), "(OUTPUT_FOLDER, image + '.txt')\n", (7980, 8011), False, 'import os\n'), ((8241, 8263), 'cv2.imread', 'cv2.imread', (['input_path'], {}), '(input_path)\n', (8251, 8263), False, 'import cv2\n'), ((8315, 8341), 'cv2.imwrite', 'cv2.imwrite', (['tmp_path', 'img'], {}), '(tmp_path, img)\n', (8326, 8341), False, 'import cv2\n'), ((1293, 1315), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1307, 1315), False, 'import os\n'), ((1329, 1348), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1340, 1348), False, 'import os\n'), ((3964, 3994), 'wand.image.Image', 'WandImage', ([], {'filename': 'input_file'}), '(filename=input_file)\n', (3973, 3994), True, 'from wand.image import Image as WandImage\n'), ((4048, 4068), 'PIL.Image.open', 'Image.open', (['transfer'], {}), '(transfer)\n', (4058, 4068), False, 'from PIL import Image\n'), ((4102, 4179), 'pytesseract.pytesseract.image_to_string', 'pytesseract.image_to_string', (['img'], {'lang': 'language', 'timeout': '(60)', 'config': '"""--psm 6"""'}), "(img, lang=language, timeout=60, config='--psm 6')\n", (4129, 4179), False, 'from pytesseract import pytesseract\n'), ((4905, 4937), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (4921, 4937), False, 'import cv2\n'), ((5013, 5048), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['img', '(5)', '(75)', '(75)'], {}), '(img, 5, 75, 75)\n', (5032, 5048), False, 'import cv2\n'), ((5458, 5483), 'numpy.ones', 'np.ones', (['(7, 7)', 'np.uint8'], {}), '((7, 7), np.uint8)\n', (5465, 5483), True, 'import numpy as np\n'), ((5558, 5584), 'cv2.absdiff', 'cv2.absdiff', (['plane', 'bg_img'], {}), '(plane, bg_img)\n', (5569, 5584), False, 'import cv2\n'), ((6904, 6934), 'receipt_parser_core.Receipt', 'Receipt', ([], {'config': 'config', 'raw': '""""""'}), "(config=config, raw='')\n", (6911, 6934), False, 'from receipt_parser_core import Receipt\n'), ((1670, 1691), 'PIL.Image.open', 'Image.open', (['full_path'], {}), '(full_path)\n', (1680, 1691), False, 'from PIL import Image\n')]
|
import re
import keras.backend as keras_backend
from keras.layers import DepthwiseConv2D
import numpy as np
from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property
from blusky.wavelets.i_wavelet_2d import IWavelet2D
class ApplyFatherWavlet2D(HasStrictTraits):
"""
Provides a "convolution" method that will apply a father wavelet to
the endpoints of a cascade. Be sure to first apply layers to remove
any of the padding.
Assuming the input to the cascade is a power of 2 in shape, the result
will be a set of scattering coefficients at all orders of the transform
sampled regularly throughout the image. You can imagine that every
set of coefficients will be computed at the center of a tile, the shape
of which is determined by the "J" parameter. The degree to which these
tiles over lap is controlled by the "overlap_log_2". For interpretation,
consider values of "J" to give a tile of shape (2**(J+2), 2**(J+2)),
over which the texture of the image can be considered stationary.
The tiles can overlap by a factor of "M", however if you use the
default decimation, you must ensure that you have oversampled enough
to properly represent the stride at all scales of the transform.
With default decimation, oversamples=1, overlap_log_2 can be upto
J - 1. For each unit of overlap, you need to pay the cost of an
additional unit of oversampling.
"""
#: (J) This is the "J" scale parameter of the father wavelet used in the
# transform.
J = Int(2)
#: (M) This is defines the overlap of the tiles, so overlap_log_2 = 0
# would be no overlap, overlap_log_2 = 1 would be 50% overlap,
# overlap_log_2 = 2 would be 75% etc.
overlap_log_2 = Int(0)
#: Size of the image input to the Cascade_2d. This needs to be padded to a
# power of "2" to ensure that the coefficients are consistent.
img_size = Tuple
#: The sample rate of the input data
sample_rate = Float
#: Wavelet to use in convolution
wavelet = Instance(IWavelet2D)
#: Equivalent tile size derived from the log scale J
# J = round(log2(min(tile_size))) - 2
_tile_size = Property(Int, depends_on="J")
def _get__tile_size(self):
size = 2 ** (self.J + 2)
if size > self.img_size[0] or size > self.img_size[1]:
mn = min(self.img_size)
msg = "For image {} by {}, max J is {}".format(
self.img_size[0], self.img_size[1], np.log2(mn) - 2
)
raise RuntimeError(msg)
return (2 ** (self.J + 2), 2 ** (self.J + 2))
def _convolve(self, input_layer, trainable=False):
"""
The concept here is to first derive the applied decimation
from the shape of the input layer, then pad the layer and
apply the a convolution with the father wavelet. The padding
and strideof the convolution is designed to return set of coefficients
for a collections of regular (optionally overlapping) tiles.
This will be the case provided the size of the original input to the
transform are a power of 2.
Parameters
----------
input_layer - Keras Layer
A layer to apply the father wavelet to. The applied wavelet
is derived from the shape of the layer and knowlege of the
input image shape.
trainable - Bool (optional)
Toggle setting the convolution to be trainable. Either way it
is initialized with a gabor wavelet.
Returns
-------
conv - Keras Layer
A Keras layer applying the convolution to the input
"""
# create a convenient name
name = re.sub("[_/].*", "", input_layer.name)
name += "phi"
_, nh, nw, _ = input_layer.shape
nh = nh
nw = nw
# amount of decimation to here.
factor_1 = self.img_size[0] // nh
factor_2 = self.img_size[1] // nw
# how much to decimate the wavelet to required bandwidth
wavelet_stride = min(factor_1, factor_2)
# need to guarantee this, ideally crop the wavelet to a
# power of "2"
wav = self.wavelet.kernel(
0.0, shape=(2 ** (self.J + 2) - 1, 2 ** (self.J + 2) - 1)
)
#
wav = wav[::wavelet_stride, ::wavelet_stride]
# needs to be real
if np.iscomplexobj(wav):
wav = wav.real
# define a little helper to intialize the weights.
def init_weights(shape, **kwargs):
dtype = np.float32
weights = np.zeros(shape, dtype=dtype)
for ichan in range(shape[2]):
weights[:, :, ichan, 0] = wav.astype(dtype)
return keras_backend.variable(value=weights, dtype=dtype)
# use the father wavelet scale here instead of the default:
conv_stride = (
max(
2 ** (-self.overlap_log_2) * self._tile_size[0] // factor_1, 1
),
max(
2 ** (-self.overlap_log_2) * self._tile_size[1] // factor_2, 1
),
)
conv_stride = (int(conv_stride[0]), int(conv_stride[0]))
conv = DepthwiseConv2D(
name=name,
kernel_size=wav.shape,
depth_multiplier=1,
data_format="channels_last",
padding="valid",
strides=conv_stride,
trainable=trainable,
depthwise_initializer=init_weights,
)
return conv(input_layer)
def convolve(self, end_points):
"""
Apply father wavelet convolution.
Parameters
----------
end_points - List(Keras Layers)
Typically this would be the multiple end-points of the 2-D Cascade.
Returns
-------
scattering_transform - List(Keras Layers)
The father wavelet applied to each end-point. The stride and
padding of the convolution produces a consistent set of
coefficients at each scale, provided the shape of the original
image is a power of 2. For example, img.shape = (128, 256).
"""
scattering_transform = [self._convolve(i) for i in end_points]
return scattering_transform
|
[
"traits.api.Instance",
"numpy.iscomplexobj",
"traits.api.Property",
"keras.layers.DepthwiseConv2D",
"keras.backend.variable",
"traits.api.Int",
"numpy.log2",
"numpy.zeros",
"re.sub"
] |
[((1557, 1563), 'traits.api.Int', 'Int', (['(2)'], {}), '(2)\n', (1560, 1563), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((1770, 1776), 'traits.api.Int', 'Int', (['(0)'], {}), '(0)\n', (1773, 1776), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((2063, 2083), 'traits.api.Instance', 'Instance', (['IWavelet2D'], {}), '(IWavelet2D)\n', (2071, 2083), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((2202, 2231), 'traits.api.Property', 'Property', (['Int'], {'depends_on': '"""J"""'}), "(Int, depends_on='J')\n", (2210, 2231), False, 'from traits.api import Float, HasStrictTraits, Instance, Int, Tuple, Property\n'), ((3753, 3791), 're.sub', 're.sub', (['"""[_/].*"""', '""""""', 'input_layer.name'], {}), "('[_/].*', '', input_layer.name)\n", (3759, 3791), False, 'import re\n'), ((4435, 4455), 'numpy.iscomplexobj', 'np.iscomplexobj', (['wav'], {}), '(wav)\n', (4450, 4455), True, 'import numpy as np\n'), ((5258, 5459), 'keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'name': 'name', 'kernel_size': 'wav.shape', 'depth_multiplier': '(1)', 'data_format': '"""channels_last"""', 'padding': '"""valid"""', 'strides': 'conv_stride', 'trainable': 'trainable', 'depthwise_initializer': 'init_weights'}), "(name=name, kernel_size=wav.shape, depth_multiplier=1,\n data_format='channels_last', padding='valid', strides=conv_stride,\n trainable=trainable, depthwise_initializer=init_weights)\n", (5273, 5459), False, 'from keras.layers import DepthwiseConv2D\n'), ((4641, 4669), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (4649, 4669), True, 'import numpy as np\n'), ((4793, 4843), 'keras.backend.variable', 'keras_backend.variable', ([], {'value': 'weights', 'dtype': 'dtype'}), '(value=weights, dtype=dtype)\n', (4815, 4843), True, 'import keras.backend as keras_backend\n'), ((2508, 2519), 'numpy.log2', 'np.log2', (['mn'], {}), '(mn)\n', (2515, 2519), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
# Import from package
from pyswarms.discrete import BinaryPSO
@pytest.mark.parametrize(
"options",
[
{"c2": 0.7, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "w": 0.5, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "k": 2, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2},
],
)
def test_keyword_exception(options):
"""Tests if exceptions are thrown when keywords are missing"""
with pytest.raises(KeyError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize(
"options",
[
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": -1, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 6, "p": 2},
{"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 5},
],
)
def test_invalid_k_or_p_values(options):
"""Tests if exception is thrown when passing
an invalid value for k or p"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, options)
@pytest.mark.parametrize("velocity_clamp", [[1, 3], np.array([1, 3])])
def test_vclamp_type_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp type is not a
tuple"""
with pytest.raises(TypeError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(1, 1, 1), (2, 3, 1)])
def test_vclamp_shape_exception(velocity_clamp, options):
"""Tests if exception is raised when velocity_clamp's size is not equal
to 2"""
with pytest.raises(IndexError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
@pytest.mark.parametrize("velocity_clamp", [(3, 2), (10, 8)])
def test_vclamp_maxmin_exception(velocity_clamp, options):
"""Tests if the max velocity_clamp is less than min velocity_clamp and
vice-versa"""
with pytest.raises(ValueError):
BinaryPSO(5, 2, velocity_clamp=velocity_clamp, options=options)
def test_reset_default_values(binary_reset):
"""Tests if best cost and best pos are set properly when the reset()
method is called"""
assert binary_reset.swarm.best_cost == np.inf
assert set(binary_reset.swarm.best_pos) == set(np.array([]))
@pytest.mark.parametrize(
"history, expected_shape",
[
("cost_history", (1000,)),
("mean_pbest_history", (1000,)),
("mean_neighbor_history", (1000,)),
("pos_history", (1000, 10, 2)),
("velocity_history", (1000, 10, 2)),
],
)
def test_training_history_shape(binary_history, history, expected_shape):
"""Test if training histories are of expected shape"""
pso = vars(binary_history)
assert np.array(pso[history]).shape == expected_shape
|
[
"pytest.mark.parametrize",
"pytest.raises",
"numpy.array",
"pyswarms.discrete.BinaryPSO"
] |
[((163, 414), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""options"""', "[{'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2}, {'c1': 0.5, 'w': 0.5, 'k': 2, 'p': \n 2}, {'c1': 0.5, 'c2': 0.7, 'k': 2, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w':\n 0.5, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2}]"], {}), "('options', [{'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 2},\n {'c1': 0.5, 'w': 0.5, 'k': 2, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'k': 2,\n 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'p': 2}, {'c1': 0.5, 'c2': \n 0.7, 'w': 0.5, 'k': 2}])\n", (186, 414), False, 'import pytest\n'), ((634, 831), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""options"""', "[{'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': -1, 'p': 2}, {'c1': 0.5, 'c2': 0.7,\n 'w': 0.5, 'k': 6, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 5}\n ]"], {}), "('options', [{'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': -\n 1, 'p': 2}, {'c1': 0.5, 'c2': 0.7, 'w': 0.5, 'k': 6, 'p': 2}, {'c1': \n 0.5, 'c2': 0.7, 'w': 0.5, 'k': 2, 'p': 5}])\n", (657, 831), False, 'import pytest\n'), ((1381, 1446), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""velocity_clamp"""', '[(1, 1, 1), (2, 3, 1)]'], {}), "('velocity_clamp', [(1, 1, 1), (2, 3, 1)])\n", (1404, 1446), False, 'import pytest\n'), ((1704, 1764), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""velocity_clamp"""', '[(3, 2), (10, 8)]'], {}), "('velocity_clamp', [(3, 2), (10, 8)])\n", (1727, 1764), False, 'import pytest\n'), ((2287, 2513), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""history, expected_shape"""', "[('cost_history', (1000,)), ('mean_pbest_history', (1000,)), (\n 'mean_neighbor_history', (1000,)), ('pos_history', (1000, 10, 2)), (\n 'velocity_history', (1000, 10, 2))]"], {}), "('history, expected_shape', [('cost_history', (1000,\n )), ('mean_pbest_history', (1000,)), ('mean_neighbor_history', (1000,)),\n ('pos_history', (1000, 10, 2)), ('velocity_history', (1000, 10, 2))])\n", (2310, 2513), False, 'import pytest\n'), ((573, 596), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (586, 596), False, 'import pytest\n'), ((606, 630), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)', 'options'], {}), '(5, 2, options)\n', (615, 630), False, 'from pyswarms.discrete import BinaryPSO\n'), ((998, 1023), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1011, 1023), False, 'import pytest\n'), ((1033, 1057), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)', 'options'], {}), '(5, 2, options)\n', (1042, 1057), False, 'from pyswarms.discrete import BinaryPSO\n'), ((1280, 1304), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1293, 1304), False, 'import pytest\n'), ((1314, 1377), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)'], {'velocity_clamp': 'velocity_clamp', 'options': 'options'}), '(5, 2, velocity_clamp=velocity_clamp, options=options)\n', (1323, 1377), False, 'from pyswarms.discrete import BinaryPSO\n'), ((1112, 1128), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1120, 1128), True, 'import numpy as np\n'), ((1602, 1627), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (1615, 1627), False, 'import pytest\n'), ((1637, 1700), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)'], {'velocity_clamp': 'velocity_clamp', 'options': 'options'}), '(5, 2, velocity_clamp=velocity_clamp, options=options)\n', (1646, 1700), False, 'from pyswarms.discrete import BinaryPSO\n'), ((1926, 1951), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1939, 1951), False, 'import pytest\n'), ((1961, 2024), 'pyswarms.discrete.BinaryPSO', 'BinaryPSO', (['(5)', '(2)'], {'velocity_clamp': 'velocity_clamp', 'options': 'options'}), '(5, 2, velocity_clamp=velocity_clamp, options=options)\n', (1970, 2024), False, 'from pyswarms.discrete import BinaryPSO\n'), ((2270, 2282), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2278, 2282), True, 'import numpy as np\n'), ((2738, 2760), 'numpy.array', 'np.array', (['pso[history]'], {}), '(pso[history])\n', (2746, 2760), True, 'import numpy as np\n')]
|
import MulensModel as mm
import Functions as mc
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
from scipy.stats import truncnorm, loguniform, uniform
#plt.style.use('ggplot')
print(plt.style.available)
#print(plt.rcParams["font.family"].available)
#print(matplotlib.get_cachedir())
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#rc('font',**{'family':'serif','serif':['Times New Roman']})
#rc('text', usetex=True)
#plt.rcParams["font.family"] = "serif"
#print(plt.rcParams.keys())
#plt.rcParams['font.size'] = 12
s_pi = mc.logUniDist(0.2, 5)
q_pi = mc.logUniDist(10e-6, 1)
alpha_pi = mc.uniDist(0, 360)
u0_pi = mc.uniDist(0, 2)
t0_pi = mc.uniDist(0, 72)
tE_pi = mc.truncatedLogNormDist(1, 100, 10**1.15, 10**0.45)
rho_pi = mc.logUniDist(10**-4, 10**-2)
distr = tE_pi
y=[]
x=np.linspace(1, 100, 1000)
mu=0
for i in x:
mu+=np.exp(distr.log_PDF(i))*i
y.append(np.exp(distr.log_PDF(i)))
print(mu/len(x))
#print(y)
plt.rcParams["font.family"] = "serif"
plt.rcParams['font.size'] = 12
plt.style.use('seaborn-bright')
plt.rcParams["legend.edgecolor"] = '0'
plt.rcParams["legend.framealpha"] = 1
plt.rcParams["legend.title_fontsize"] = 10
plt.rcParams["legend.fontsize"] = 9
plt.rcParams["grid.linestyle"] = 'dashed'
plt.rcParams["grid.alpha"] = 0.25
plt.plot(x, y, label='Probability\nDensity')
plt.xlabel(r'Parameter [$\chi$]')
plt.ylabel(r'Probability Density [$\rho$]')
plt.title('Probability Density Function')
plt.legend(title='Entries')#, framealpha=1.0, edgecolor='0.0') #
#plt.axis('scaled')
plt.tight_layout()
plt.grid()
plt.savefig('Plots/pdf-test.png')
def centre_offsets_pointilism(supset_model, subset_model, symbols, name = '', dpi = 100):
supset_offsets = (supset_model.sampled.states_array(scaled = True) - supset_model.centre.scaled[:, np.newaxis])
subset_offsets = (subset_model.sampled.states_array(scaled = True) - subset_model.centre.scaled[:, np.newaxis])
n_dim = subset_model.D
style()
# construct shape with corner
figure = corner.corner(subset_offsets.T)
# font/visibility
plt.rcParams['font.size'] = 8
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['axes.labelsize'] = 14
# extract the axes
axes = np.array(figure.axes).reshape((n_dim, n_dim))
# Loop over the diagonal to remove from plot
for i in range(n_dim):
ax = axes[i, i]
ax.cla()
ax.patch.set_alpha(0.0)
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
# loop over lower triangle
for yi in range(n_dim):
for xi in range(yi):
ax = axes[yi, xi]
ax.cla()
# overlay points
ax.scatter(subset_offsets[xi, :], subset_offsets[yi, :], c = np.linspace(0.0, 1.0, subset_model.sampled.n), cmap = 'winter', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
ax.scatter(supset_offsets[xi, :], supset_offsets[yi, :], c = np.linspace(0.0, 1.0, supset_model.sampled.n), cmap = 'spring', alpha = 0.15, marker = ".", s = 20, linewidth = 0.0)
if yi == n_dim - 1: # last row
ax.set_xlabel(symbols[xi])
ax.tick_params(axis = 'x', labelrotation = 45)
else:
ax.axes.get_xaxis().set_ticklabels([])
if xi == 0: # first column
ax.set_ylabel(symbols[yi])
ax.tick_params(axis = 'y', labelrotation = 45)
else:
ax.axes.get_yaxis().set_ticklabels([])
figure.savefig('results/' + name + '-centreed-pointilism.png', bbox_inches = "tight", dpi = dpi, transparent=True)
figure.clf()
return
|
[
"matplotlib.pyplot.title",
"Functions.truncatedLogNormDist",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"Functions.uniDist",
"matplotlib.pyplot.style.use",
"numpy.array",
"numpy.linspace",
"Functions.logUniDist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((613, 634), 'Functions.logUniDist', 'mc.logUniDist', (['(0.2)', '(5)'], {}), '(0.2, 5)\n', (626, 634), True, 'import Functions as mc\n'), ((642, 665), 'Functions.logUniDist', 'mc.logUniDist', (['(1e-05)', '(1)'], {}), '(1e-05, 1)\n', (655, 665), True, 'import Functions as mc\n'), ((677, 695), 'Functions.uniDist', 'mc.uniDist', (['(0)', '(360)'], {}), '(0, 360)\n', (687, 695), True, 'import Functions as mc\n'), ((704, 720), 'Functions.uniDist', 'mc.uniDist', (['(0)', '(2)'], {}), '(0, 2)\n', (714, 720), True, 'import Functions as mc\n'), ((729, 746), 'Functions.uniDist', 'mc.uniDist', (['(0)', '(72)'], {}), '(0, 72)\n', (739, 746), True, 'import Functions as mc\n'), ((755, 810), 'Functions.truncatedLogNormDist', 'mc.truncatedLogNormDist', (['(1)', '(100)', '(10 ** 1.15)', '(10 ** 0.45)'], {}), '(1, 100, 10 ** 1.15, 10 ** 0.45)\n', (778, 810), True, 'import Functions as mc\n'), ((817, 850), 'Functions.logUniDist', 'mc.logUniDist', (['(10 ** -4)', '(10 ** -2)'], {}), '(10 ** -4, 10 ** -2)\n', (830, 850), True, 'import Functions as mc\n'), ((870, 895), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', '(1000)'], {}), '(1, 100, 1000)\n', (881, 895), True, 'import numpy as np\n'), ((1085, 1116), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-bright"""'], {}), "('seaborn-bright')\n", (1098, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1356, 1403), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""Probability\nDensity"""'}), '(x, y, label="""Probability\nDensity""")\n', (1364, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Parameter [$\\\\chi$]"""'], {}), "('Parameter [$\\\\chi$]')\n", (1411, 1434), True, 'import matplotlib.pyplot as plt\n'), ((1435, 1478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability Density [$\\\\rho$]"""'], {}), "('Probability Density [$\\\\rho$]')\n", (1445, 1478), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1520), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability Density Function"""'], {}), "('Probability Density Function')\n", (1488, 1520), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1548), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""Entries"""'}), "(title='Entries')\n", (1531, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1626), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1624, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1627, 1637), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1635, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1671), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Plots/pdf-test.png"""'], {}), "('Plots/pdf-test.png')\n", (1649, 1671), True, 'import matplotlib.pyplot as plt\n'), ((2291, 2312), 'numpy.array', 'np.array', (['figure.axes'], {}), '(figure.axes)\n', (2299, 2312), True, 'import numpy as np\n'), ((2869, 2914), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'subset_model.sampled.n'], {}), '(0.0, 1.0, subset_model.sampled.n)\n', (2880, 2914), True, 'import numpy as np\n'), ((3059, 3104), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'supset_model.sampled.n'], {}), '(0.0, 1.0, supset_model.sampled.n)\n', (3070, 3104), True, 'import numpy as np\n')]
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from ase import Atoms
from ase.constraints import dict2constraint
import copy
import importlib
import numpy as np
from pyiron_atomistics.atomistics.job.interactive import GenericInteractive
from pyiron_atomistics.atomistics.structure.atoms import pyiron_to_ase, Atoms as PAtoms
try:
from ase.cell import Cell
except ImportError:
Cell = None
__author__ = "<NAME>"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
__date__ = "Sep 1, 2018"
def ase_structure_todict(structure):
atoms_dict = {
"symbols": structure.get_chemical_symbols(),
"positions": structure.get_positions(),
"pbc": structure.get_pbc(),
"celldisp": structure.get_celldisp(),
"constraint": [c.todict() for c in structure.constraints],
"info": copy.deepcopy(structure.info),
}
if Cell is not None:
atoms_dict["cell"] = structure.get_cell().todict()
else:
atoms_dict["cell"] = structure.get_cell()
if structure.has("tags"):
atoms_dict["tags"] = structure.get_tags()
if structure.has("masses"):
atoms_dict["masses"] = structure.get_masses()
if structure.has("momenta"):
atoms_dict["momenta"] = structure.get_momenta()
if structure.has("initial_magmoms"):
atoms_dict["magmoms"] = structure.get_initial_magnetic_moments()
if structure.has("initial_charges"):
atoms_dict["charges"] = structure.get_initial_charges()
if structure.calc is not None:
calculator_dict = structure.calc.todict()
calculator_dict["calculator_class"] = (
str(structure.calc.__class__).replace("'", " ").split()[1]
)
calculator_dict["label"] = structure.calc.label
atoms_dict["calculator"] = calculator_dict
return atoms_dict
def ase_calculator_fromdict(class_path, class_dict):
module_loaded = importlib.import_module(".".join(class_path.split(".")[:-1]))
module_class = getattr(module_loaded, class_path.split(".")[-1])
return module_class(**class_dict)
def ase_structure_fromdict(atoms_dict):
def cell_fromdict(celldict):
celldict.pop("pbc", None)
if Cell is not None:
return Cell(**celldict)
else:
return celldict
atoms_dict_copy = copy.deepcopy(atoms_dict)
if "calculator" in atoms_dict_copy.keys():
calculator_dict = atoms_dict_copy["calculator"]
calculator_class = calculator_dict["calculator_class"]
del calculator_dict["calculator_class"]
atoms_dict_copy["calculator"] = ase_calculator_fromdict(
calculator_class, calculator_dict
)
if "constraint" in atoms_dict_copy.keys():
atoms_dict_copy["constraint"] = [
dict2constraint(const_dict) for const_dict in atoms_dict_copy["constraint"]
]
atoms_dict_copy["cell"] = cell_fromdict(celldict=atoms_dict_copy["cell"])
atoms = Atoms(**atoms_dict_copy)
if atoms.calc is not None:
atoms.calc.read(atoms.calc.label)
return atoms
class AseJob(GenericInteractive):
def __init__(self, project, job_name):
super(AseJob, self).__init__(project, job_name)
self.__name__ = "AseJob"
self.__version__ = (
None # Reset the version number to the executable is set automatically
)
@property
def structure(self):
return GenericInteractive.structure.fget(self)
@structure.setter
def structure(self, structure):
if isinstance(structure, PAtoms):
structure = pyiron_to_ase(structure)
GenericInteractive.structure.fset(self, structure)
def to_hdf(self, hdf=None, group_name=None):
super(AseJob, self).to_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
hdf_input["structure"] = ase_structure_todict(self._structure)
def from_hdf(self, hdf=None, group_name=None):
super(AseJob, self).from_hdf(hdf=hdf, group_name=group_name)
with self.project_hdf5.open("input") as hdf_input:
self.structure = ase_structure_fromdict(hdf_input["structure"])
def run_static(self):
pre_run_mode = self.server.run_mode
self.server.run_mode.interactive = True
self.run_if_interactive()
self.interactive_close()
self.server.run_mode = pre_run_mode
def run_if_interactive(self):
if self.structure.calc is None:
self.set_calculator()
super(AseJob, self).run_if_interactive()
self.interactive_collect()
def set_calculator(self):
raise NotImplementedError(
"The _set_calculator function is not implemented for this code."
)
def interactive_structure_setter(self, structure):
self.structure.calc.calculate(structure)
def interactive_positions_setter(self, positions):
self.structure.positions = positions
def interactive_initialize_interface(self):
self.status.running = True
self._structure.calc.set_label(self.working_directory + "/")
self._interactive_library = True
def interactive_close(self):
if self.interactive_is_activated():
super(AseJob, self).interactive_close()
with self.project_hdf5.open("output") as h5:
if "interactive" in h5.list_groups():
for key in h5["interactive"].list_nodes():
h5["generic/" + key] = h5["interactive/" + key]
def interactive_forces_getter(self):
return self.structure.get_forces()
def interactive_pressures_getter(self):
return -self.structure.get_stress(voigt=False)
def interactive_energy_pot_getter(self):
return self.structure.get_potential_energy()
def interactive_energy_tot_getter(self):
return self.structure.get_potential_energy()
def interactive_indices_getter(self):
element_lst = sorted(list(set(self.structure.get_chemical_symbols())))
return np.array(
[element_lst.index(el) for el in self.structure.get_chemical_symbols()]
)
def interactive_positions_getter(self):
return self.structure.positions.copy()
def interactive_steps_getter(self):
return len(self.interactive_cache[list(self.interactive_cache.keys())[0]])
def interactive_time_getter(self):
return self.interactive_steps_getter()
def interactive_volume_getter(self):
return self.structure.get_volume()
def interactive_cells_getter(self):
return self.structure.cell.copy()
def write_input(self):
pass
def collect_output(self):
pass
def run_if_scheduler(self):
self._create_working_directory()
super(AseJob, self).run_if_scheduler()
def interactive_index_organizer(self):
index_merge_lst = self._interactive_species_lst.tolist() + list(
np.unique(self._structure_current.get_chemical_symbols())
)
el_lst = sorted(set(index_merge_lst), key=index_merge_lst.index)
current_structure_index = [
el_lst.index(el) for el in self._structure_current.get_chemical_symbols()
]
previous_structure_index = [
el_lst.index(el) for el in self._structure_previous.get_chemical_symbols()
]
if not np.array_equal(
np.array(current_structure_index),
np.array(previous_structure_index),
):
self._logger.debug("Generic library: indices changed!")
self.interactive_indices_setter(self._structure_current.indices)
def _get_structure(self, frame=-1, wrap_atoms=True):
if (
self.server.run_mode.interactive
or self.server.run_mode.interactive_non_modal
):
# Warning: We only copy symbols, positions and cell information - no tags.
if self.output.indices is not None and len(self.output.indices) != 0:
indices = self.output.indices[frame]
else:
return None
if len(self._interactive_species_lst) == 0:
el_lst = list(np.unique(self._structure_current.get_chemical_symbols()))
else:
el_lst = self._interactive_species_lst.tolist()
if indices is not None:
if wrap_atoms:
positions = self.output.positions[frame]
else:
if len(self.output.unwrapped_positions) > max([frame, 0]):
positions = self.output.unwrapped_positions[frame]
else:
positions = (
self.output.positions[frame]
+ self.output.total_displacements[frame]
)
atoms = Atoms(
symbols=np.array([el_lst[el] for el in indices]),
positions=positions,
cell=self.output.cells[frame],
pbc=self.structure.pbc,
)
# Update indicies to match the indicies in the cache.
atoms.indices = indices
return atoms
else:
return None
else:
if (
self.get("output/generic/cells") is not None
and len(self.get("output/generic/cells")) != 0
):
return super()._get_structure(frame=frame, wrap_atoms=wrap_atoms)
else:
return None
class AseAdapter(object):
def __init__(self, ham, fast_mode=False):
self._ham = ham
self._fast_mode = fast_mode
if self._ham.server.run_mode.interactive and fast_mode:
self.interactive_cache = {
"velocities": [],
"energy_kin": [],
"momenta": [],
"positions": [],
"energy_tot": [],
"energy_pot": [],
}
self._ham.run()
self._ham.interactive_cache = {}
elif self._ham.server.run_mode.interactive:
self.interactive_cache = {"velocities": [], "energy_kin": [], "momenta": []}
self.constraints = []
try:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.numbers,
}
except AttributeError:
self.arrays = {
"positions": self._ham.structure.positions.copy(),
"numbers": self._ham.structure.get_atomic_numbers(),
}
@property
def communicator(self):
return None
def get_masses(self):
return np.array(self._ham.structure.get_masses())
def get_positions(self):
return self.arrays["positions"]
def set_positions(self, positions):
self.arrays["positions"] = positions
def get_forces(self, md=True):
if self._fast_mode:
self._ham.interactive_positions_setter(self.arrays["positions"])
self.interactive_cache["positions"].append(self.arrays["positions"])
self._ham.interactive_execute()
self.interactive_cache["energy_pot"].append(
self._ham.interactive_energy_pot_getter()
)
return np.array(self._ham.interactive_forces_getter())
else:
self._ham.structure.positions = self.arrays["positions"]
if self._ham.server.run_mode.interactive:
self._ham.run()
else:
self._ham.run(delete_existing_job=True)
return self._ham.output.forces[-1]
def interactive_close(self):
self._ham.interactive_store_in_cache(
"velocities", self.interactive_cache["velocities"]
)
self._ham.interactive_store_in_cache(
"energy_kin", self.interactive_cache["energy_kin"]
)
if self._fast_mode:
self._ham.interactive_store_in_cache(
"positions", self.interactive_cache["positions"]
)
self._ham.interactive_store_in_cache(
"energy_pot", self.interactive_cache["energy_pot"][::2]
)
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self.interactive_cache["energy_pot"][::2])
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
else:
self._ham.interactive_store_in_cache(
"energy_tot",
(
np.array(self._ham.output.energy_pot)[::2]
+ np.array(self.interactive_cache["energy_kin"])
).tolist(),
)
self._ham.interactive_close()
def get_number_of_atoms(self):
return self._ham.structure.get_number_of_atoms()
# ASE functions
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get("momenta")
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def set_momenta(self, momenta, apply_constraint=True):
"""Set momenta."""
if apply_constraint and len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
if hasattr(constraint, "adjust_momenta"):
constraint.adjust_momenta(self, momenta)
self.set_array("momenta", momenta, float, (3,))
self.interactive_cache["velocities"].append(self.get_velocities())
self.interactive_cache["energy_kin"].append(self.get_kinetic_energy())
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if "momenta" in self.arrays:
return self.arrays["momenta"].copy()
else:
return np.zeros((len(self), 3))
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, b.shape)
)
b[:] = a
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype, order="C")
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags["C_CONTIGUOUS"]:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError("Array has wrong length: %d != %d." % (len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError(
"Array has wrong shape %s != %s." % (a.shape, (a.shape[0:1] + shape))
)
self.arrays[name] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'initial_magmoms',
'initial_charges'."""
# XXX extend has to calculator properties
return name in self.arrays
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays["positions"]) / m.sum()
if scaled:
if self._fast_mode:
return np.linalg.solve(self._ham.structure.cells[-1].T, com)
else:
return np.linalg.solve(self._ham.output.cells[-1].T, com)
else:
return com
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get("momenta")
if momenta is None:
return None
m = self.get_masses()
# m = self.arrays.get('masses')
# if m is None:
# m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def __len__(self):
return len(self._ham.structure)
|
[
"ase.constraints.dict2constraint",
"copy.deepcopy",
"ase.cell.Cell",
"numpy.asarray",
"pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fset",
"numpy.array",
"pyiron_atomistics.atomistics.structure.atoms.pyiron_to_ase",
"pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fget",
"numpy.linalg.solve",
"numpy.dot",
"numpy.ascontiguousarray",
"ase.Atoms"
] |
[((2640, 2665), 'copy.deepcopy', 'copy.deepcopy', (['atoms_dict'], {}), '(atoms_dict)\n', (2653, 2665), False, 'import copy\n'), ((3278, 3302), 'ase.Atoms', 'Atoms', ([], {}), '(**atoms_dict_copy)\n', (3283, 3302), False, 'from ase import Atoms\n'), ((1159, 1188), 'copy.deepcopy', 'copy.deepcopy', (['structure.info'], {}), '(structure.info)\n', (1172, 1188), False, 'import copy\n'), ((3739, 3778), 'pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fget', 'GenericInteractive.structure.fget', (['self'], {}), '(self)\n', (3772, 3778), False, 'from pyiron_atomistics.atomistics.job.interactive import GenericInteractive\n'), ((3937, 3987), 'pyiron_atomistics.atomistics.job.interactive.GenericInteractive.structure.fset', 'GenericInteractive.structure.fset', (['self', 'structure'], {}), '(self, structure)\n', (3970, 3987), False, 'from pyiron_atomistics.atomistics.job.interactive import GenericInteractive\n'), ((2558, 2574), 'ase.cell.Cell', 'Cell', ([], {}), '(**celldict)\n', (2562, 2574), False, 'from ase.cell import Cell\n'), ((3102, 3129), 'ase.constraints.dict2constraint', 'dict2constraint', (['const_dict'], {}), '(const_dict)\n', (3117, 3129), False, 'from ase.constraints import dict2constraint\n'), ((3904, 3928), 'pyiron_atomistics.atomistics.structure.atoms.pyiron_to_ase', 'pyiron_to_ase', (['structure'], {}), '(structure)\n', (3917, 3928), False, 'from pyiron_atomistics.atomistics.structure.atoms import pyiron_to_ase, Atoms as PAtoms\n'), ((13747, 13764), 'numpy.array', 'np.array', (['momenta'], {}), '(momenta)\n', (13755, 13764), True, 'import numpy as np\n'), ((15749, 15778), 'numpy.array', 'np.array', (['a', 'dtype'], {'order': '"""C"""'}), "(a, dtype, order='C')\n", (15757, 15778), True, 'import numpy as np\n'), ((16956, 16991), 'numpy.dot', 'np.dot', (['m', "self.arrays['positions']"], {}), "(m, self.arrays['positions'])\n", (16962, 16991), True, 'import numpy as np\n'), ((7727, 7760), 'numpy.array', 'np.array', (['current_structure_index'], {}), '(current_structure_index)\n', (7735, 7760), True, 'import numpy as np\n'), ((7774, 7808), 'numpy.array', 'np.array', (['previous_structure_index'], {}), '(previous_structure_index)\n', (7782, 7808), True, 'import numpy as np\n'), ((15008, 15021), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (15018, 15021), True, 'import numpy as np\n'), ((15947, 15970), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['a'], {}), '(a)\n', (15967, 15970), True, 'import numpy as np\n'), ((17076, 17129), 'numpy.linalg.solve', 'np.linalg.solve', (['self._ham.structure.cells[-1].T', 'com'], {}), '(self._ham.structure.cells[-1].T, com)\n', (17091, 17129), True, 'import numpy as np\n'), ((17171, 17221), 'numpy.linalg.solve', 'np.linalg.solve', (['self._ham.output.cells[-1].T', 'com'], {}), '(self._ham.output.cells[-1].T, com)\n', (17186, 17221), True, 'import numpy as np\n'), ((9225, 9265), 'numpy.array', 'np.array', (['[el_lst[el] for el in indices]'], {}), '([el_lst[el] for el in indices])\n', (9233, 9265), True, 'import numpy as np\n'), ((12725, 12776), 'numpy.array', 'np.array', (["self.interactive_cache['energy_pot'][::2]"], {}), "(self.interactive_cache['energy_pot'][::2])\n", (12733, 12776), True, 'import numpy as np\n'), ((12799, 12845), 'numpy.array', 'np.array', (["self.interactive_cache['energy_kin']"], {}), "(self.interactive_cache['energy_kin'])\n", (12807, 12845), True, 'import numpy as np\n'), ((13085, 13131), 'numpy.array', 'np.array', (["self.interactive_cache['energy_kin']"], {}), "(self.interactive_cache['energy_kin'])\n", (13093, 13131), True, 'import numpy as np\n'), ((13020, 13057), 'numpy.array', 'np.array', (['self._ham.output.energy_pot'], {}), '(self._ham.output.energy_pot)\n', (13028, 13057), True, 'import numpy as np\n')]
|
import numpy as np
from logistic_regression import logistic_kernel_regression, compute_label
from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig
from kernel_creation import convert_acid_kernel, convert_acid_quad, convert_mismatch_lev, convert_lect_trig, get_mismatch_dict
from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode
from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian
from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted
from SVM import SVM, svm_compute_label
list_letters = ["A", "C", "G", "T"]
list_trig = [a + b + c for a in list_letters for b in list_letters for c in list_letters]
list_quad = [a + b + c + d for a in list_letters for b in list_letters for c in list_letters for d in list_letters]
list_quint = [a + b + c + d + e for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters]
list_six = [a + b + c + d + e + f for a in list_letters for b in list_letters for c in list_letters for d in list_letters for e in list_letters for f in list_letters]
dico_acid = {'Alanine': [ 'GCU', 'GCC', 'GCA', 'GCG'], 'Arginine': ['CGU', 'CGC', 'CGA', 'CGG' , 'AGA', 'AGG'],
'Asparagine': ['AAU', 'AAC'], 'Acide aspartique': ['GAU', 'GAC'],
'Cysteine': ['UGU', 'UGC'], 'Glutamine': ['CAA', 'CAG'], 'Acide glutamique':['GAA', 'GAG'],
'Glycine':['GGU', 'GGC', 'GGA', 'GGG'], 'Histidine': ['CAU', 'CAC'], 'Isoleucine': ['AUU', 'AUC', 'AUA'],
'Leucine': ['UUA', 'UUG' , 'CUU', 'CUC', 'CUA', 'CUG'], 'Lysine': ['AAA', 'AAG'],
'Methionine': ['AUG'], 'Phenylalanine':['UUU', 'UUC'], 'Proline' :['CCU', 'CCC', 'CCA', 'CCG'],
'Pyrrolysine': ['UAG'], 'Selenocysteine':['UGA'], 'Serine':['UCU', 'UCC', 'UCA', 'UCG' , 'AGU', 'AGC'],
'Threonine':['ACU', 'ACC', 'ACA', 'ACG'], 'Tryptophane':['UGG'], 'Tyrosine':['UAU', 'UAC'],
'Valine':['GUU', 'GUC', 'GUA', 'GUG'], 'Initiation': ['AUG'], 'Terminaison': ['UAG', 'UAA', 'UGA']}
def is_pos_def(x):
return np.all(np.linalg.eigvals(x) > 0)
## Parameters
lamb_log = 0.0000001
lamb_svm = 0.00001
sigma = 0.8
add_param = 10.**(-10)
list_seq_id = list_six
mis_lev = False
if mis_lev:
dict_mismatch = get_mismatch_dict(list_seq_id)
mis_dic = False
size_seq = 6
nb_mis = 0
beg = 0
if mis_dic:
dict_corres = get_correspondances(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
print(list_mis_corres)
mis_dic_full = False
if mis_dic_full:
dict_corres = get_full_corres(list_seq_id, nb_mis, list_letters)
list_mis_corres = dict_corres.keys()
##
list_labels_log = []
list_labels_svm = []
for name in [ "0", "1","2"]:
print ("beginning loading of the data")
# Training data
sequences = read_csv_file_data("data/Xtr"+ name+ ".csv")
#list_converted = convert_spectral_kernel_trig(sequences, list_seq_id)
#list_converted = convert_spectral_kernel_quad(sequences, list_quad)
list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_spectral_kernel_quint(sequences, list_quint)
#list_converted = convert_acid_kernel(sequences, dico_acid)
#list_converted = convert_acid_quad(sequences, dico_acid, list_quad
#list_converted = convert_mismatch_lev(sequences, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted = convert_lect_trig(sequences, list_seq_id, beg)
#list_converted = convert_lect_acid(sequences, dico_acid, beg)
#list_converted = convert_mismatch_dico(sequences, dict_corres,list_mis_corres, list_seq_id)
#list_converted = convert_encode(sequences, list_letters)
training = np.asarray(list_converted, dtype = float)
# to avoid huge values and to save time for the logistic regression :
sm = np.sum(training, axis= 1)
training = training/sm[0]
mean = np.mean(training, axis= 0)
training = training - mean
#vst = np.std(training, axis= 0)
#training = training / vst
#save_data_converted("spectral_kernel/Xtr"+ name+ ".csv", training)
# label training data
label = read_csv_file_label("data/Ytr"+ name+ ".csv")
label= np.asarray(label).reshape((len(label), ))
# select what will be the test for training
size_test = int(training.shape[0]/10)
test_train = training[0:size_test]
label_test_train = label[0:size_test]
print( label_test_train.shape)
size_total = training.shape[0]
training = training[size_test:size_total]
label_train = label[size_test:size_total]
print (label_train.shape)
# Test data
sequences_test = read_csv_file_data("data/Xte"+ name+ ".csv")
#list_converted_test = convert_spectral_kernel_trig(sequences_test, list_seq_id)
#list_converted_test = convert_spectral_kernel_quad(sequences_test, list_quad)
list_converted_test = convert_spectral_kernel_quint(sequences_test, list_quint)
#list_converted_test = convert_acid_kernel(sequences_test, dico_acid)
#list_converted_test = convert_acid_quad(sequences_test, dico_acid, list_quad)
#list_converted_test = convert_mismatch_lev(sequences_test, list_seq_id, dict_mismatch, size_seq, nb_mis)
#list_converted_test = convert_lect_trig(sequences_test, list_seq_id, beg )
#list_converted_test = convert_lect_acid(sequences_test, dico_acid, beg)
#list_converted_test = convert_mismatch_dico(sequences_test, dict_corres,list_mis_corres, list_seq_id)
#list_converted_test = convert_encode(sequences, list_letters)
testing = np.asarray(list_converted_test, dtype = float)
# to avoid huge values and to save time for the logistic regression :
testing = testing/sm[0]
testing = testing - mean
#testing = testing/ vst
# param for each dataset:
"""if name=="0":
lamb_svm = 0.000008
add_param = 10. ** (-10)
if name=="1":
lamb_svm = 0.00001
add_param = 10.**(-10)
if name == "2":
lamb_svm = 0.000005
add_param=10.**(-9)"""
if name=="2":
add_param = 10**(-9)
print ("data loaded")
# Computing the kernel
print ("beginning computing K")
K = compute_K_matrix(training)
add = add_param*np.identity(K.shape[0])
K_add = K + add # to make it positive definite
#K = compute_K_gaussian(training, sigma)
#K_add = K
print(K)
print("K shape", K.shape)
print(is_pos_def(K_add))
K_test_train = compute_test_matrix(training, test_train)
print (K_test_train.shape)
print ("K computed")
"""#Training : kernel logistic regression
alpha = logistic_kernel_regression(K, label_train, lamb_log, 15, K_test_train, label_test_train)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = compute_label(Ktest, alpha)
list_labels_log = list_labels_log + labels_test"""
# Training : SVM
alpha = SVM(K_add, label_train, lamb_svm, K_test_train, label_test_train)
print(alpha)
# Testing : kernel logistic regression
Ktest = compute_test_matrix(training, testing)
labels_test = svm_compute_label(Ktest, alpha)
list_labels_svm = list_labels_svm + labels_test
save_label(0, list_labels_svm,"results/SVM-quint-centered-mixed.csv" )
|
[
"read_fn.save_label",
"kernel_creation.get_mismatch_dict",
"kernel_creation.compute_test_matrix",
"numpy.sum",
"numpy.linalg.eigvals",
"SVM.SVM",
"numpy.asarray",
"numpy.identity",
"read_fn.read_csv_file_label",
"kernel_creation.compute_K_matrix",
"SVM.svm_compute_label",
"numpy.mean",
"kernel_creation.convert_spectral_kernel_quint",
"kernel_creation.get_full_corres",
"kernel_creation.get_correspondances",
"read_fn.read_csv_file_data"
] |
[((7342, 7412), 'read_fn.save_label', 'save_label', (['(0)', 'list_labels_svm', '"""results/SVM-quint-centered-mixed.csv"""'], {}), "(0, list_labels_svm, 'results/SVM-quint-centered-mixed.csv')\n", (7352, 7412), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((2416, 2446), 'kernel_creation.get_mismatch_dict', 'get_mismatch_dict', (['list_seq_id'], {}), '(list_seq_id)\n', (2433, 2446), False, 'from kernel_creation import convert_acid_kernel, convert_acid_quad, convert_mismatch_lev, convert_lect_trig, get_mismatch_dict\n'), ((2527, 2581), 'kernel_creation.get_correspondances', 'get_correspondances', (['list_seq_id', 'nb_mis', 'list_letters'], {}), '(list_seq_id, nb_mis, list_letters)\n', (2546, 2581), False, 'from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode\n'), ((2706, 2756), 'kernel_creation.get_full_corres', 'get_full_corres', (['list_seq_id', 'nb_mis', 'list_letters'], {}), '(list_seq_id, nb_mis, list_letters)\n', (2721, 2756), False, 'from kernel_creation import get_correspondances, convert_mismatch_dico, get_full_corres, convert_encode\n'), ((2953, 2999), 'read_fn.read_csv_file_data', 'read_csv_file_data', (["('data/Xtr' + name + '.csv')"], {}), "('data/Xtr' + name + '.csv')\n", (2971, 2999), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((3167, 3219), 'kernel_creation.convert_spectral_kernel_quint', 'convert_spectral_kernel_quint', (['sequences', 'list_quint'], {}), '(sequences, list_quint)\n', (3196, 3219), False, 'from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig\n'), ((3844, 3883), 'numpy.asarray', 'np.asarray', (['list_converted'], {'dtype': 'float'}), '(list_converted, dtype=float)\n', (3854, 3883), True, 'import numpy as np\n'), ((3970, 3994), 'numpy.sum', 'np.sum', (['training'], {'axis': '(1)'}), '(training, axis=1)\n', (3976, 3994), True, 'import numpy as np\n'), ((4038, 4063), 'numpy.mean', 'np.mean', (['training'], {'axis': '(0)'}), '(training, axis=0)\n', (4045, 4063), True, 'import numpy as np\n'), ((4277, 4324), 'read_fn.read_csv_file_label', 'read_csv_file_label', (["('data/Ytr' + name + '.csv')"], {}), "('data/Ytr' + name + '.csv')\n", (4296, 4324), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((4778, 4824), 'read_fn.read_csv_file_data', 'read_csv_file_data', (["('data/Xte' + name + '.csv')"], {}), "('data/Xte' + name + '.csv')\n", (4796, 4824), False, 'from read_fn import read_csv_file_label, read_csv_file_data, save_label, save_data_converted\n'), ((5017, 5074), 'kernel_creation.convert_spectral_kernel_quint', 'convert_spectral_kernel_quint', (['sequences_test', 'list_quint'], {}), '(sequences_test, list_quint)\n', (5046, 5074), False, 'from kernel_creation import convert_spectral_kernel_quad, convert_spectral_kernel_quint, convert_spectral_kernel_trig\n'), ((5687, 5731), 'numpy.asarray', 'np.asarray', (['list_converted_test'], {'dtype': 'float'}), '(list_converted_test, dtype=float)\n', (5697, 5731), True, 'import numpy as np\n'), ((6311, 6337), 'kernel_creation.compute_K_matrix', 'compute_K_matrix', (['training'], {}), '(training)\n', (6327, 6337), False, 'from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian\n'), ((6584, 6625), 'kernel_creation.compute_test_matrix', 'compute_test_matrix', (['training', 'test_train'], {}), '(training, test_train)\n', (6603, 6625), False, 'from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian\n'), ((7061, 7126), 'SVM.SVM', 'SVM', (['K_add', 'label_train', 'lamb_svm', 'K_test_train', 'label_test_train'], {}), '(K_add, label_train, lamb_svm, K_test_train, label_test_train)\n', (7064, 7126), False, 'from SVM import SVM, svm_compute_label\n'), ((7199, 7237), 'kernel_creation.compute_test_matrix', 'compute_test_matrix', (['training', 'testing'], {}), '(training, testing)\n', (7218, 7237), False, 'from kernel_creation import compute_test_matrix, compute_K_matrix, convert_lect_acid, compute_K_gaussian\n'), ((7256, 7287), 'SVM.svm_compute_label', 'svm_compute_label', (['Ktest', 'alpha'], {}), '(Ktest, alpha)\n', (7273, 7287), False, 'from SVM import SVM, svm_compute_label\n'), ((6358, 6381), 'numpy.identity', 'np.identity', (['K.shape[0]'], {}), '(K.shape[0])\n', (6369, 6381), True, 'import numpy as np\n'), ((2229, 2249), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['x'], {}), '(x)\n', (2246, 2249), True, 'import numpy as np\n'), ((4334, 4351), 'numpy.asarray', 'np.asarray', (['label'], {}), '(label)\n', (4344, 4351), True, 'import numpy as np\n')]
|
# Script wh helps to plot Figures 3A and 3B
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
dff = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
mask = (dff['N_alleles'] != 0) & (dff['Phenotype_G6PD'] != 'G6PD_Normal')
dff_valid = dff[mask]
dff['N_phenotypes'] = 0
dff['Phenotype'] = dff['Phenotype'].apply(lambda x: ','.join([i for i in x.split(',') if i != 'G6PD_Normal']))
dff.loc[mask, 'N_phenotypes'] = dff_valid['Phenotype'].apply(lambda x: len(x.split(',')))
gf = dff.groupby('N_phenotypes')['sample'].count()
GF = {'Nr. phenotypes': list(gf.index), 'Count':100*(gf.values / gf.sum()), 'Group':['(N=5001)']*len(gf)}
GF = pd.DataFrame(GF)
tf = GF.iloc[0:4]
d = {'Nr. phenotypes':'[4,7]', 'Count':sum(GF['Count'].iloc[4:]), 'Group':'(N=5001)'}
tf = tf.append(d, ignore_index=True)
bottom = 0
f, ax1 = plt.subplots(figsize=(2,4))
f.set_size_inches(2.7, 4.0)
for i,j, in zip(list(tf['Count'].values), list(tf['Nr. phenotypes'])):
ax1.bar('N=5001',i,label=j, bottom = bottom, edgecolor = 'black')
bottom = bottom + i
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1], loc='center left',bbox_to_anchor=(1.0, 0.5), title='Nr.alleles', fontsize=14,title_fontsize=14) # title = TITLE,
plt.ylabel('%',fontsize=14)
plt.yticks(np.arange(0, 100,10 ))
plt.subplots_adjust(left=0.23, bottom=0.1, right=0.5, top=0.95, wspace=0.14, hspace=0.24)
plt.savefig('/path/to/Figures/Figure_3A_nrphenotypes.png',format = 'png', dpi = 500)
plt.show()
####################################### FIGURE 3B
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
dff = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
#dff = dff.loc[dff['from'] == 'ESPAÑA']
mask = (dff['N_alleles'] != 0) & (dff['Phenotype_G6PD'] != 'G6PD_Normal')
dff_valid = dff[mask]
dff['N_phenotypes'] = 0
dff['Phenotype'] = dff['Phenotype'].apply(lambda x: ','.join([i for i in x.split(',') if i != 'G6PD_Normal']))
dff.loc[mask, 'N_phenotypes'] = dff_valid['Phenotype'].apply(lambda x: len(x.split(',')))
GENES.sort()
pct_phenot = list()
for gene in GENES:
pct_phenot.append(100*(dff.groupby('Phenotype_' + gene)['sample'].count().values.sum() / len(dff)))
f, ax1 = plt.subplots(figsize=(6,3.5))
plt.grid(axis='x')
plt.barh(GENES, [100]*len(GENES), align='center', height=.35, color='tab:grey',label='Actionable phenotype')
plt.barh(GENES, pct_phenot, align='center', height=.35, color='tab:red',label='Actionable phenotype',edgecolor = 'k')
plt.xlim([0,100])
plt.xlabel('% population with pharmacogenetic phenotype (n=5001)', fontsize=12)
plt.subplots_adjust(left=0.130, bottom=0.140, right=0.945, top=0.97, wspace=0.14, hspace=0.24)
#plt.savefig('/path/to/Figures/Fig3B.png',format = 'png', dpi = 500)
plt.savefig('Fig3B.png',format = 'png', dpi = 500)
plt.show()
'''### Figure 2A
cols = ['N_alleles','SNV_N_alleles','INDELS_N_alleles']
gf = df.groupby(cols[0])['sample'].count().reset_index()
gf = gf.rename(columns={'sample':cols[0] + '_all'})
dgf = dict(zip(list(df.groupby(cols[1])['sample'].count().index), list(df.groupby(cols[1])['sample'].count().values)))
plt.subplots_adjust(left=0.10, bottom=0.08, right=0.85, top=0.90, wspace=0.14, hspace=0.24)
plt.xticks(rotation=0)
plt.ylim(0,100)
plt.xlabel('')
plt.show()
plt.xticks(rotation=90)
plt.ylim(0,100)
plt.ylabel('%')
plt.show()'''
|
[
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.barh",
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.grid"
] |
[((309, 363), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/Alleles_20201228.csv"""'], {'sep': '"""\t"""'}), "('/path/to/Alleles_20201228.csv', sep='\\t')\n", (320, 363), True, 'import pandas as pd\n'), ((629, 686), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/phenotypes_20210107.csv"""'], {'sep': '"""\t"""'}), "('/path/to/phenotypes_20210107.csv', sep='\\t')\n", (640, 686), True, 'import pandas as pd\n'), ((1170, 1186), 'pandas.DataFrame', 'pd.DataFrame', (['GF'], {}), '(GF)\n', (1182, 1186), True, 'import pandas as pd\n'), ((1350, 1378), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2, 4)'}), '(figsize=(2, 4))\n', (1362, 1378), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1802), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""%"""'], {'fontsize': '(14)'}), "('%', fontsize=14)\n", (1784, 1802), True, 'import matplotlib.pyplot as plt\n'), ((1836, 1929), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.23)', 'bottom': '(0.1)', 'right': '(0.5)', 'top': '(0.95)', 'wspace': '(0.14)', 'hspace': '(0.24)'}), '(left=0.23, bottom=0.1, right=0.5, top=0.95, wspace=0.14,\n hspace=0.24)\n', (1855, 1929), True, 'import matplotlib.pyplot as plt\n'), ((1926, 2011), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/path/to/Figures/Figure_3A_nrphenotypes.png"""'], {'format': '"""png"""', 'dpi': '(500)'}), "('/path/to/Figures/Figure_3A_nrphenotypes.png', format='png',\n dpi=500)\n", (1937, 2011), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2019, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2338, 2392), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/Alleles_20201228.csv"""'], {'sep': '"""\t"""'}), "('/path/to/Alleles_20201228.csv', sep='\\t')\n", (2349, 2392), True, 'import pandas as pd\n'), ((2657, 2714), 'pandas.read_csv', 'pd.read_csv', (['"""/path/to/phenotypes_20210107.csv"""'], {'sep': '"""\t"""'}), "('/path/to/phenotypes_20210107.csv', sep='\\t')\n", (2668, 2714), True, 'import pandas as pd\n'), ((3242, 3272), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 3.5)'}), '(figsize=(6, 3.5))\n', (3254, 3272), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3290), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (3280, 3290), True, 'import matplotlib.pyplot as plt\n'), ((3400, 3522), 'matplotlib.pyplot.barh', 'plt.barh', (['GENES', 'pct_phenot'], {'align': '"""center"""', 'height': '(0.35)', 'color': '"""tab:red"""', 'label': '"""Actionable phenotype"""', 'edgecolor': '"""k"""'}), "(GENES, pct_phenot, align='center', height=0.35, color='tab:red',\n label='Actionable phenotype', edgecolor='k')\n", (3408, 3522), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3536), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 100]'], {}), '([0, 100])\n', (3526, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3615), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""% population with pharmacogenetic phenotype (n=5001)"""'], {'fontsize': '(12)'}), "('% population with pharmacogenetic phenotype (n=5001)', fontsize=12)\n", (3546, 3615), True, 'import matplotlib.pyplot as plt\n'), ((3616, 3713), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.13)', 'bottom': '(0.14)', 'right': '(0.945)', 'top': '(0.97)', 'wspace': '(0.14)', 'hspace': '(0.24)'}), '(left=0.13, bottom=0.14, right=0.945, top=0.97, wspace=\n 0.14, hspace=0.24)\n', (3635, 3713), True, 'import matplotlib.pyplot as plt\n'), ((3780, 3827), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Fig3B.png"""'], {'format': '"""png"""', 'dpi': '(500)'}), "('Fig3B.png', format='png', dpi=500)\n", (3791, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3832, 3842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3840, 3842), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1834), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (1822, 1834), True, 'import numpy as np\n')]
|
import numpy as np
import os
from astropy.io import fits
from astropy.stats import sigma_clip, sigma_clipped_stats
from specklepy.logging import logger
from specklepy.reduction.subwindow import SubWindow
from specklepy.utils.time import default_time_stamp
class MasterDark(object):
extensions = {'variance': 'VAR', 'mask': 'MASK'}
def __init__(self, file_list, file_name='MasterDark.fits', file_path=None, out_dir=None, setup=None,
sub_window=None, new=True):
self.files = file_list
self.file_name = self.insert_setup_to_file_name(file_name=file_name, setup=setup)
self.file_path = file_path if file_path is not None else ''
self.out_dir = out_dir if out_dir is not None else ''
# Store sub-window
if isinstance(sub_window, str):
self.sub_window = sub_window
else:
self.sub_window = np.unique(sub_window)[0]
# Initialize maps
self.image = None
self.var = None
self.mask = None
@classmethod
def from_file(cls, file_path):
# Create object from path information
out_dir, file_name = os.path.split(file_path)
obj = cls(file_list=None, file_name=file_name, out_dir=out_dir, setup=None)
# Load data from file
obj.image = fits.getdata(obj.path)
try:
obj.var = fits.getdata(obj.path, obj.extensions.get('variance'))
except KeyError:
logger.debug(f"Loading MasterDark from file {obj.path!r} without {obj.extensions.get('variance')!r} "
f"extension")
try:
obj.mask = fits.getdata(obj.path, obj.extensions.get('mask')).astype(bool)
except KeyError:
logger.debug(f"Loading MasterDark from file {obj.path!r} without {obj.extensions.get('mask')!r} "
f"extension")
obj.sub_window = fits.getheader(obj.path)["HIERARCH SPECKLEPY REDUCTION SUBWIN"]
return obj
@property
def path(self):
return os.path.join(self.out_dir, self.file_name)
@staticmethod
def insert_setup_to_file_name(file_name, setup=None):
if setup is None:
return file_name
else:
base, ext = os.path.splitext(file_name)
return f"{base}_{setup}{ext}"
def combine(self, max_number_frames=None, rejection_threshold=10):
logger.info("Combining master dark frame...")
# if max_number_frames is not None:
# logger.debug(f"Using only the first {max_number_frames} frames of each cube")
means = []
vars = []
number_frames = []
# Iterate through files
for file in self.files:
logger.info(f"Reading DARK frames from file {file!r}...")
path = os.path.join(self.file_path, file)
with fits.open(path) as hdu_list:
data = hdu_list[0].data.squeeze()
if data.ndim == 2:
means.append(data)
vars.append(np.zeros(data.shape))
# self.combine_mask(np.zeros(data.shape, dtype=bool))
number_frames.append(1)
elif data.ndim == 3:
logger.info("Computing statistics of data cube...")
clipped_mean, _, clipped_std = sigma_clipped_stats(data=data, sigma=rejection_threshold, axis=0)
# mean = np.mean(data, axis=0)
# std = np.std(data, axis=0)
#
# # Identify outliers based on sigma-clipping
# mean_mask = sigma_clip(mean, sigma=rejection_threshold, masked=True).mask
# std_mask = sigma_clip(std, sigma=rejection_threshold, masked=True).mask
# mask = np.logical_or(mean_mask, std_mask)
# mask_indexes = np.array(np.where(mask)).transpose()
#
# # Re-compute the identified pixels
# logger.info(f"Re-measuring {len(mask_indexes)} outliers...")
# for mask_index in mask_indexes:
# # Extract t-series for the masked pixel
# arr = data[:, mask_index[0], mask_index[1]]
#
# # Compute sigma-clipped statistics for this pixel
# arr_mean, _, arr_std = sigma_clipped_stats(arr, sigma=rejection_threshold)
# mean[mask_index[0], mask_index[1]] = arr_mean
# std[mask_index[0], mask_index[1]] = arr_std
#
# mean = sigma_clip(mean, sigma=rejection_threshold, masked=True)
# std = sigma_clip(std, sigma=rejection_threshold, masked=True)
# Store results into lists
means.append(clipped_mean)
vars.append(np.square(clipped_std))
# self.combine_mask(np.logical_or(mean.mask, std.mask))
number_frames.append(data.shape[0])
else:
raise ValueError(f"Shape of data {data.shape} is not understood. Data must be either 2 or "
f"3-dimensional!")
# Cast list of arrays into 3-dim arrays
means = np.array(means)
vars = np.array(vars)
# Combine variances
if (vars == 0).all(): # catch case, where all frames have no variance
self.var = np.var(means, axis=0)
else:
self.var = np.average(vars, axis=0, weights=number_frames)
# Build mask based on variances
bpm = self.var == 0 # Bad pixel mask
if bpm.all(): # Catch case, where all frames have no variance
bpm = np.zeros(bpm.shape, dtype=bool)
gpm = ~bpm # Good pixel mask
# Build weights based on variance, and combine images
weights = np.multiply(np.reciprocal(self.var, where=gpm), np.expand_dims(number_frames, (1, 2)))
self.image = np.average(means, axis=0, weights=weights)
# Combine mask
self.mask = bpm
# def combine_var(self, new_var):
# if self.var is None:
# self.var = new_var
# else:
# self.var = np.add(self.var, new_var)
#
# def combine_mask(self, new_mask):
# if self.mask is None:
# self.mask = new_mask
# else:
# self.mask = np.logical_or(self.mask, new_mask)
def write(self, overwrite=True):
# Build primary HDU
header = fits.Header()
for index, file in enumerate(self.files):
header.set(f"HIERARCH SPECKLEPY SOURCE FILE{index:04} NAME", os.path.basename(file))
header.set("HIERARCH SPECKLEPY REDUCTION SUBWIN", self.sub_window)
primary = fits.PrimaryHDU(data=self.image, header=header)
# Build HDU list
hdu_list = fits.HDUList([primary])
# Build variance HDU
if self.var is not None:
var_hdu = fits.ImageHDU(data=self.var, name=self.extensions.get('variance'))
hdu_list.append(var_hdu)
# Build mask HDU
if self.mask is not None:
mask_hdu = fits.ImageHDU(data=self.mask.astype(np.int16), name=self.extensions.get('mask'))
hdu_list.append(mask_hdu)
# Write HDU list to file
logger.info(f"Writing master dark frame to file {self.path!r}")
hdu_list.writeto(self.path, overwrite=overwrite)
def subtract(self, file_path, extension=None, sub_window=None, sub_window_order='xy'):
"""Subtract the master dark from a file containing image data.
The master dark is subtracted from the image or each frame in a data cube. Then uncertainties are propagated.
Arguments:
file_path (str):
Path to the file, containing image data.
extension (str, optional):
Classifier for the image data extension.
sub_window (str, optional):
Sub-window string to initialize sub-windows from.
sub_window_order (str, optional):
Order of axis in the sub-window strings.
"""
logger.info(f"Subtracting master dark {self.file_name!r} from file at {file_path!r}")
# Construct sub-window
sub_window = SubWindow.from_str(sub_window, full=self.sub_window, order=sub_window_order)
# Construct good pixel mask
if self.mask is None:
gpm = np.ones(sub_window(self.image).shape, dtype=bool)
else:
gpm = sub_window(~self.mask)
# Load image data
data = fits.getdata(file_path, extension)
# Subtract
if data.ndim == 2:
data = np.subtract(data, sub_window(self.image), where=gpm)
elif data.ndim == 3:
for f, frame in enumerate(data):
data[f] = np.subtract(frame, sub_window(self.image), where=gpm)
# Propagate variances
try:
var = fits.getdata(file_path, self.extensions.get('variance'))
has_var_hdu = True
var = np.add(var, sub_window(self.var), where=gpm)
except KeyError:
has_var_hdu = False
var = sub_window(self.var)
# Propagate mask
try:
mask = fits.getdata(file_path, self.extensions.get('mask')).astype(bool)
has_mask_hdu = True
mask = np.logical_or(mask, sub_window(self.mask))
except KeyError:
has_mask_hdu = False
mask = sub_window(self.mask)
# Store data to cube
with fits.open(file_path, mode='update') as hdu_list:
# Update header
hdu_list[0].header.set('HIERARCH SPECKLEPY REDUCTION DARKCORR', default_time_stamp())
# Image data
hdu_list[0].data = data
# Variance data
if has_var_hdu:
hdu_list[self.extensions.get('variance')].data = var
else:
var_hdu = fits.ImageHDU(data=var, name=self.extensions.get('variance'))
hdu_list.append(var_hdu)
# Mask data
if has_mask_hdu:
hdu_list[self.extensions.get('mask')].data = mask.astype(np.int16)
else:
mask_hdu = fits.ImageHDU(data=mask.astype(np.int16), name=self.extensions.get('mask'))
hdu_list.append(mask_hdu)
# Write HDU list to file
logger.info(f"Updating dark subtraction in file {file_path!r}")
hdu_list.flush()
|
[
"astropy.stats.sigma_clipped_stats",
"astropy.io.fits.PrimaryHDU",
"numpy.reciprocal",
"astropy.io.fits.Header",
"astropy.io.fits.HDUList",
"os.path.join",
"numpy.unique",
"specklepy.reduction.subwindow.SubWindow.from_str",
"astropy.io.fits.getdata",
"numpy.var",
"specklepy.utils.time.default_time_stamp",
"numpy.average",
"os.path.basename",
"specklepy.logging.logger.info",
"numpy.square",
"astropy.io.fits.open",
"numpy.zeros",
"numpy.expand_dims",
"astropy.io.fits.getheader",
"numpy.array",
"os.path.splitext",
"os.path.split"
] |
[((1150, 1174), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1163, 1174), False, 'import os\n'), ((1310, 1332), 'astropy.io.fits.getdata', 'fits.getdata', (['obj.path'], {}), '(obj.path)\n', (1322, 1332), False, 'from astropy.io import fits\n'), ((2034, 2076), 'os.path.join', 'os.path.join', (['self.out_dir', 'self.file_name'], {}), '(self.out_dir, self.file_name)\n', (2046, 2076), False, 'import os\n'), ((2397, 2442), 'specklepy.logging.logger.info', 'logger.info', (['"""Combining master dark frame..."""'], {}), "('Combining master dark frame...')\n", (2408, 2442), False, 'from specklepy.logging import logger\n'), ((5342, 5357), 'numpy.array', 'np.array', (['means'], {}), '(means)\n', (5350, 5357), True, 'import numpy as np\n'), ((5373, 5387), 'numpy.array', 'np.array', (['vars'], {}), '(vars)\n', (5381, 5387), True, 'import numpy as np\n'), ((6061, 6103), 'numpy.average', 'np.average', (['means'], {'axis': '(0)', 'weights': 'weights'}), '(means, axis=0, weights=weights)\n', (6071, 6103), True, 'import numpy as np\n'), ((6596, 6609), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (6607, 6609), False, 'from astropy.io import fits\n'), ((6850, 6897), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'self.image', 'header': 'header'}), '(data=self.image, header=header)\n', (6865, 6897), False, 'from astropy.io import fits\n'), ((6943, 6966), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[primary]'], {}), '([primary])\n', (6955, 6966), False, 'from astropy.io import fits\n'), ((7400, 7463), 'specklepy.logging.logger.info', 'logger.info', (['f"""Writing master dark frame to file {self.path!r}"""'], {}), "(f'Writing master dark frame to file {self.path!r}')\n", (7411, 7463), False, 'from specklepy.logging import logger\n'), ((8235, 8325), 'specklepy.logging.logger.info', 'logger.info', (['f"""Subtracting master dark {self.file_name!r} from file at {file_path!r}"""'], {}), "(\n f'Subtracting master dark {self.file_name!r} from file at {file_path!r}')\n", (8246, 8325), False, 'from specklepy.logging import logger\n'), ((8374, 8450), 'specklepy.reduction.subwindow.SubWindow.from_str', 'SubWindow.from_str', (['sub_window'], {'full': 'self.sub_window', 'order': 'sub_window_order'}), '(sub_window, full=self.sub_window, order=sub_window_order)\n', (8392, 8450), False, 'from specklepy.reduction.subwindow import SubWindow\n'), ((8683, 8717), 'astropy.io.fits.getdata', 'fits.getdata', (['file_path', 'extension'], {}), '(file_path, extension)\n', (8695, 8717), False, 'from astropy.io import fits\n'), ((1900, 1924), 'astropy.io.fits.getheader', 'fits.getheader', (['obj.path'], {}), '(obj.path)\n', (1914, 1924), False, 'from astropy.io import fits\n'), ((2247, 2274), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (2263, 2274), False, 'import os\n'), ((2720, 2777), 'specklepy.logging.logger.info', 'logger.info', (['f"""Reading DARK frames from file {file!r}..."""'], {}), "(f'Reading DARK frames from file {file!r}...')\n", (2731, 2777), False, 'from specklepy.logging import logger\n'), ((2797, 2831), 'os.path.join', 'os.path.join', (['self.file_path', 'file'], {}), '(self.file_path, file)\n', (2809, 2831), False, 'import os\n'), ((5519, 5540), 'numpy.var', 'np.var', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (5525, 5540), True, 'import numpy as np\n'), ((5578, 5625), 'numpy.average', 'np.average', (['vars'], {'axis': '(0)', 'weights': 'number_frames'}), '(vars, axis=0, weights=number_frames)\n', (5588, 5625), True, 'import numpy as np\n'), ((5802, 5833), 'numpy.zeros', 'np.zeros', (['bpm.shape'], {'dtype': 'bool'}), '(bpm.shape, dtype=bool)\n', (5810, 5833), True, 'import numpy as np\n'), ((5965, 5999), 'numpy.reciprocal', 'np.reciprocal', (['self.var'], {'where': 'gpm'}), '(self.var, where=gpm)\n', (5978, 5999), True, 'import numpy as np\n'), ((6001, 6038), 'numpy.expand_dims', 'np.expand_dims', (['number_frames', '(1, 2)'], {}), '(number_frames, (1, 2))\n', (6015, 6038), True, 'import numpy as np\n'), ((9660, 9695), 'astropy.io.fits.open', 'fits.open', (['file_path'], {'mode': '"""update"""'}), "(file_path, mode='update')\n", (9669, 9695), False, 'from astropy.io import fits\n'), ((10520, 10583), 'specklepy.logging.logger.info', 'logger.info', (['f"""Updating dark subtraction in file {file_path!r}"""'], {}), "(f'Updating dark subtraction in file {file_path!r}')\n", (10531, 10583), False, 'from specklepy.logging import logger\n'), ((895, 916), 'numpy.unique', 'np.unique', (['sub_window'], {}), '(sub_window)\n', (904, 916), True, 'import numpy as np\n'), ((2849, 2864), 'astropy.io.fits.open', 'fits.open', (['path'], {}), '(path)\n', (2858, 2864), False, 'from astropy.io import fits\n'), ((6733, 6755), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (6749, 6755), False, 'import os\n'), ((9813, 9833), 'specklepy.utils.time.default_time_stamp', 'default_time_stamp', ([], {}), '()\n', (9831, 9833), False, 'from specklepy.utils.time import default_time_stamp\n'), ((3035, 3055), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (3043, 3055), True, 'import numpy as np\n'), ((3233, 3284), 'specklepy.logging.logger.info', 'logger.info', (['"""Computing statistics of data cube..."""'], {}), "('Computing statistics of data cube...')\n", (3244, 3284), False, 'from specklepy.logging import logger\n'), ((3336, 3401), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', ([], {'data': 'data', 'sigma': 'rejection_threshold', 'axis': '(0)'}), '(data=data, sigma=rejection_threshold, axis=0)\n', (3355, 3401), False, 'from astropy.stats import sigma_clip, sigma_clipped_stats\n'), ((4930, 4952), 'numpy.square', 'np.square', (['clipped_std'], {}), '(clipped_std)\n', (4939, 4952), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# coding: utf-8
# Contains common methods frequently used across....
# The example reference at the below matplotlib is helpful in choosing an
# appropriate colormap for the output plot
# https://matplotlib.org/examples/color/colormaps_reference.html
# import the necessary packages
import numpy as np
import matplotlib.pyplot as plt
def create_meshgrid(x, y, margin=1, step=0.02):
"""Create a numoy rectangular meshgrid out of an array of
x values and an array of y values
@ref https://stackoverflow.com/questions/36013063
/what-is-the-purpose-of-meshgrid-in-python-numpy
:x: array-like point x
:y: array-like point y
:margin: (int) boundary
:step: (float) stepping the values, default = 0.02
Examples
--------
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 1, 2, 3, 4])
xx,yy=np.meshgrid(x,y)
plt.plot(xx,yy, marker='.', color='k',linestyle='none')
"""
x_min, x_max = x.min() - margin, x.max() + margin
y_min, y_max = y.min() - margin, y.max() + margin
# define the mesh grid, with xx and yy holding the grid of
# points where the function will be evaluated
xx, yy = np.meshgrid(
np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
return xx, yy
def draw_decision_boundary(x,
y,
classifier,
margin=1,
step=0.02,
alpha=0.8,
cmap=plt.cm.coolwarm):
"""Draw decision boundary separating the collections
Parameters
----------
x: {array-like}, shape = [n_samples, n_features]
y: array-like, shape = [n_samples]
margin: margin for the min and max
step_size: float This would be the buffer for clarity
This is spacing between values. For any output out, this is the distance
between two adjacent values, out[i+1] - out[i]
alpha: float
color alpha value
cmap: color map
"""
# set-up the marker generator and color map for plotting
markers = ('s', 'o', 'x', '^', 'v')
# for data, first set-up a grid for plotting.
X0, X1 = x[:, 0], x[:, 1]
xx, yy = create_meshgrid(X0, X1, margin, step)
mesh = np.array([xx.ravel(), yy.ravel()])
print("np.array: {}", format(mesh))
# compute the classifiers output
Z = classifier.predict(mesh.T)
Z = Z.reshape(xx.shape)
# now plot the contour
plt.contourf(xx, yy, Z, alpha=alpha, cmap=cmap)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
for idx, cl in enumerate(np.unique(y)):
print("cl: ", cl)
plt.scatter(
x=x[y == cl, 0],
y=x[y == cl, 1],
alpha=alpha,
marker=markers[idx],
label=cl,
edgecolor='yellow')
def plot_classifier(X,
y,
classifier,
margin=1.0,
step_size=0.01,
alpha=0.8,
test_idx=None,
cmap=plt.cm.Paired):
"""Draw the datapoints and boundaries
Parameters
----------
x: {array-like}, shape = [n_samples, n_features]
y: array-like, shape = [n_samples]
margin: margin for the min and max
step_size: float
This is spacing between values. For any output out, this is the distance
between two adjacent values, out[i+1] - out[i]
alpha: float
blending value to decide transparency - 0 (transparent) and 1 (opaque)
test_idx: list
cmap: object
color map for the output colors of objects
"""
# set-up the marker generator for plotting
markers = ('s', 'o', 'x', '*', 'v')
# setup and define a range for plotting the data
X0, X1 = X[:, 0], X[:, 1]
xx, yy = create_meshgrid(X0, X1, margin=margin, step=step_size)
# compute the output of the classifier
mesh = np.c_[xx.ravel(), yy.ravel()]
mesh_output = classifier.predict(mesh)
# reshape the array
mesh_output = mesh_output.reshape(xx.shape)
# draw and fill contour lines
plt.contourf(xx, yy, mesh_output, alpha=0.4, cmap=cmap)
# now overlay the training coordinates over the plot
# set boundaries
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks((np.arange(int(min(X[:, 0]) - 1), int(max(X[:, 0]) + 1), 1.0)))
plt.yticks((np.arange(int(min(X[:, 1]) - 1), int(max(X[:, 1]) + 1), 1.0)))
# use a separate marker for each training label
for (i, cl) in enumerate(np.unique(y)):
plt.scatter(
x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=alpha,
marker=markers[i],
label=cl,
edgecolors='purple')
# plotting and highlighting the test samples
if test_idx:
# x_test, y_test = X[test_idx, :], y[test_idx]
x_test = X[test_idx, :]
plt.scatter(
x_test[:, 0],
x_test[:, 1],
c='',
edgecolors='purple',
alpha=alpha,
linewidths=1,
marker='o',
s=100,
label='Test Data')
|
[
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.contourf",
"numpy.arange",
"numpy.unique"
] |
[((2481, 2528), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'alpha': 'alpha', 'cmap': 'cmap'}), '(xx, yy, Z, alpha=alpha, cmap=cmap)\n', (2493, 2528), True, 'import matplotlib.pyplot as plt\n'), ((4126, 4181), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'mesh_output'], {'alpha': '(0.4)', 'cmap': 'cmap'}), '(xx, yy, mesh_output, alpha=0.4, cmap=cmap)\n', (4138, 4181), True, 'import matplotlib.pyplot as plt\n'), ((1214, 1243), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'step'], {}), '(x_min, x_max, step)\n', (1223, 1243), True, 'import numpy as np\n'), ((1245, 1274), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'step'], {}), '(y_min, y_max, step)\n', (1254, 1274), True, 'import numpy as np\n'), ((2625, 2637), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2634, 2637), True, 'import numpy as np\n'), ((2674, 2788), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'x[y == cl, 0]', 'y': 'x[y == cl, 1]', 'alpha': 'alpha', 'marker': 'markers[idx]', 'label': 'cl', 'edgecolor': '"""yellow"""'}), "(x=x[y == cl, 0], y=x[y == cl, 1], alpha=alpha, marker=markers[\n idx], label=cl, edgecolor='yellow')\n", (2685, 2788), True, 'import matplotlib.pyplot as plt\n'), ((4567, 4579), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (4576, 4579), True, 'import numpy as np\n'), ((4590, 4703), 'matplotlib.pyplot.scatter', 'plt.scatter', ([], {'x': 'X[y == cl, 0]', 'y': 'X[y == cl, 1]', 'alpha': 'alpha', 'marker': 'markers[i]', 'label': 'cl', 'edgecolors': '"""purple"""'}), "(x=X[y == cl, 0], y=X[y == cl, 1], alpha=alpha, marker=markers[i\n ], label=cl, edgecolors='purple')\n", (4601, 4703), True, 'import matplotlib.pyplot as plt\n'), ((4934, 5070), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_test[:, 0]', 'x_test[:, 1]'], {'c': '""""""', 'edgecolors': '"""purple"""', 'alpha': 'alpha', 'linewidths': '(1)', 'marker': '"""o"""', 's': '(100)', 'label': '"""Test Data"""'}), "(x_test[:, 0], x_test[:, 1], c='', edgecolors='purple', alpha=\n alpha, linewidths=1, marker='o', s=100, label='Test Data')\n", (4945, 5070), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import seaborn as sns
def p_x_given_y(y, mus, sigmas):
mu = mus[0] + sigmas[1, 0] / sigmas[0, 0] * (y - mus[1])
sigma = sigmas[0, 0] - sigmas[1, 0] / sigmas[1, 1] * sigmas[1, 0]
return np.random.normal(mu, sigma)
def p_y_given_x(x, mus, sigmas):
mu = mus[1] + sigmas[0, 1] / sigmas[1, 1] * (x - mus[0])
sigma = sigmas[1, 1] - sigmas[0, 1] / sigmas[0, 0] * sigmas[0, 1]
return np.random.normal(mu, sigma)
def gibbs_sampling(mus, sigmas, iter=10000):
samples = np.zeros((iter, 2))
y = np.random.rand() * 10
for i in range(iter):
x = p_x_given_y(y, mus, sigmas)
y = p_y_given_x(x, mus, sigmas)
samples[i, :] = [x, y]
return samples
if __name__ == "__main__":
mus = np.array([5, 5])
sigmas = np.array([[1, 0.9], [0.9, 1]])
samples = gibbs_sampling(mus, sigmas)
sns.jointplot(samples[:, 0], samples[:, 1])
|
[
"numpy.zeros",
"numpy.array",
"numpy.random.normal",
"seaborn.jointplot",
"numpy.random.rand"
] |
[((218, 245), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (234, 245), True, 'import numpy as np\n'), ((423, 450), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (439, 450), True, 'import numpy as np\n'), ((512, 531), 'numpy.zeros', 'np.zeros', (['(iter, 2)'], {}), '((iter, 2))\n', (520, 531), True, 'import numpy as np\n'), ((759, 775), 'numpy.array', 'np.array', (['[5, 5]'], {}), '([5, 5])\n', (767, 775), True, 'import numpy as np\n'), ((789, 819), 'numpy.array', 'np.array', (['[[1, 0.9], [0.9, 1]]'], {}), '([[1, 0.9], [0.9, 1]])\n', (797, 819), True, 'import numpy as np\n'), ((867, 910), 'seaborn.jointplot', 'sns.jointplot', (['samples[:, 0]', 'samples[:, 1]'], {}), '(samples[:, 0], samples[:, 1])\n', (880, 910), True, 'import seaborn as sns\n'), ((540, 556), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (554, 556), True, 'import numpy as np\n')]
|
# coding: utf-8
# Creates:
# * cachito_fe_vel_comp.pdf
# In[1]:
import os
import numpy as np
import yaml
from astropy.io import ascii as asc
from astropy.time import Time
import astropy.units as u
import astropy.constants as c
from astropy.modeling import models, fitting
from matplotlib import pyplot as plt
#get_ipython().run_line_magic('matplotlib', 'inline')
from utilities_az import supernova
# In[2]:
plt.style.use(['seaborn-paper', 'az-paper-onecol'])
# In[3]:
TEST_FILE_DIR = '../../data/line_info/testing/'
FIG_DIR = './'
DATA_DIR = '../../data/line_info'
# In[4]:
HA = 6563.0
SiII = 6355.0
FeII = 5169.0
IR_dates = Time(['2015-09-05','2015-10-05', '2015-10-10'])
# In[5]:
sn15oz = supernova.LightCurve2('asassn-15oz')
texpl = Time(sn15oz.jdexpl, format='jd')
# In[6]:
new_fit_cachito = asc.read(os.path.join(TEST_FILE_DIR, 'cachito.tab'))
# In[7]:
def calc_velocity(obs_wl, rest_wl):
velocity = c.c*(obs_wl/rest_wl - 1)
return velocity
# In[8]:
phase_cachito = (Time(new_fit_cachito['date'])-texpl).value
velocity_cachito = -1*calc_velocity(new_fit_cachito['vel0'], HA).to(u.km/u.s).value
# In[9]:
#tbdata_feII = asc.read(os.path.join(DATA_DIR, 'FeII_multi.tab'))
#tbdata_feII.remove_columns(['vel1', 'vel_err_left_1', 'vel_err_right_1', 'vel_pew_1', 'vel_pew_err1'])
tbdata_feII = asc.read(os.path.join(DATA_DIR, 'FeII_5169.tab'))
tbdata_feII.rename_column('vel0', 'velocity')
tbdata_feII.rename_column('vel_err_left_0', 'vel_err_left')
tbdata_feII.rename_column('vel_err_right_0', 'vel_err_right')
tbdata_feII.rename_column('vel_pew_0', 'pew')
tbdata_feII.rename_column('vel_pew_err0', 'pew_err')
# In[10]:
phase_feII = (Time(tbdata_feII['date'])-texpl).value
velocity_feII = -1*calc_velocity(tbdata_feII['velocity'], FeII).to(u.km/u.s)
# In[15]:
fig = plt.figure()
fig.subplotpars.update(left=.17, bottom=0.23)
ax_Fe = fig.add_subplot(1,1,1)
ax_Fe.plot((Time(new_fit_cachito['date'])-texpl).value, -1*calc_velocity(new_fit_cachito['vel0'], SiII).to(u.km/u.s)/1000, '^', label='Cachito (as SiII 6533)')
ax_Fe.plot(phase_feII, velocity_feII/1000, 'o', label='FeII (5169)')
ax_Fe.set_xticks(np.arange(0, 90, 10))
ax_Fe.legend()
ax_Fe.set_ylim(5, 11)
ax_Fe.set_xlim(0, 40)
ax_Fe.set_xlabel('Phase (day)')
ax_Fe.set_ylabel('Velocity (1000 km/s)')
plt.savefig(os.path.join(FIG_DIR, 'cachito_fe_vel_comp.pdf'))
|
[
"astropy.time.Time",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.arange",
"utilities_az.supernova.LightCurve2",
"os.path.join"
] |
[((426, 477), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['seaborn-paper', 'az-paper-onecol']"], {}), "(['seaborn-paper', 'az-paper-onecol'])\n", (439, 477), True, 'from matplotlib import pyplot as plt\n'), ((660, 708), 'astropy.time.Time', 'Time', (["['2015-09-05', '2015-10-05', '2015-10-10']"], {}), "(['2015-09-05', '2015-10-05', '2015-10-10'])\n", (664, 708), False, 'from astropy.time import Time\n'), ((734, 770), 'utilities_az.supernova.LightCurve2', 'supernova.LightCurve2', (['"""asassn-15oz"""'], {}), "('asassn-15oz')\n", (755, 770), False, 'from utilities_az import supernova\n'), ((779, 811), 'astropy.time.Time', 'Time', (['sn15oz.jdexpl'], {'format': '"""jd"""'}), "(sn15oz.jdexpl, format='jd')\n", (783, 811), False, 'from astropy.time import Time\n'), ((1864, 1876), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1874, 1876), True, 'from matplotlib import pyplot as plt\n'), ((856, 898), 'os.path.join', 'os.path.join', (['TEST_FILE_DIR', '"""cachito.tab"""'], {}), "(TEST_FILE_DIR, 'cachito.tab')\n", (868, 898), False, 'import os\n'), ((1384, 1423), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""FeII_5169.tab"""'], {}), "(DATA_DIR, 'FeII_5169.tab')\n", (1396, 1423), False, 'import os\n'), ((2203, 2223), 'numpy.arange', 'np.arange', (['(0)', '(90)', '(10)'], {}), '(0, 90, 10)\n', (2212, 2223), True, 'import numpy as np\n'), ((2369, 2417), 'os.path.join', 'os.path.join', (['FIG_DIR', '"""cachito_fe_vel_comp.pdf"""'], {}), "(FIG_DIR, 'cachito_fe_vel_comp.pdf')\n", (2381, 2417), False, 'import os\n'), ((1047, 1076), 'astropy.time.Time', 'Time', (["new_fit_cachito['date']"], {}), "(new_fit_cachito['date'])\n", (1051, 1076), False, 'from astropy.time import Time\n'), ((1724, 1749), 'astropy.time.Time', 'Time', (["tbdata_feII['date']"], {}), "(tbdata_feII['date'])\n", (1728, 1749), False, 'from astropy.time import Time\n'), ((1967, 1996), 'astropy.time.Time', 'Time', (["new_fit_cachito['date']"], {}), "(new_fit_cachito['date'])\n", (1971, 1996), False, 'from astropy.time import Time\n')]
|
import numpy as np
from sklearn.metrics import roc_auc_score,jaccard_score
import cv2
from torch import nn
import torch.nn.functional as F
import math
from functools import wraps
import warnings
import weakref
from torch.optim.optimizer import Optimizer
class WeightedBCE(nn.Module):
def __init__(self, weights=[0.4, 0.6]):
super(WeightedBCE, self).__init__()
self.weights = weights
def forward(self, logit_pixel, truth_pixel):
# print("====",logit_pixel.size())
logit = logit_pixel.view(-1)
truth = truth_pixel.view(-1)
assert(logit.shape==truth.shape)
loss = F.binary_cross_entropy(logit, truth, reduction='none')
pos = (truth>0.5).float()
neg = (truth<0.5).float()
pos_weight = pos.sum().item() + 1e-12
neg_weight = neg.sum().item() + 1e-12
loss = (self.weights[0]*pos*loss/pos_weight + self.weights[1]*neg*loss/neg_weight).sum()
return loss
class WeightedDiceLoss(nn.Module):
def __init__(self, weights=[0.5, 0.5]): # W_pos=0.8, W_neg=0.2
super(WeightedDiceLoss, self).__init__()
self.weights = weights
def forward(self, logit, truth, smooth=1e-5):
batch_size = len(logit)
logit = logit.view(batch_size,-1)
truth = truth.view(batch_size,-1)
assert(logit.shape==truth.shape)
p = logit.view(batch_size,-1)
t = truth.view(batch_size,-1)
w = truth.detach()
w = w*(self.weights[1]-self.weights[0])+self.weights[0]
# p = w*(p*2-1) #convert to [0,1] --> [-1, 1]
# t = w*(t*2-1)
p = w*(p)
t = w*(t)
intersection = (p * t).sum(-1)
union = (p * p).sum(-1) + (t * t).sum(-1)
dice = 1 - (2*intersection + smooth) / (union +smooth)
# print "------",dice.data
loss = dice.mean()
return loss
class WeightedDiceBCE(nn.Module):
def __init__(self,dice_weight=1,BCE_weight=1):
super(WeightedDiceBCE, self).__init__()
self.BCE_loss = WeightedBCE(weights=[0.5, 0.5])
self.dice_loss = WeightedDiceLoss(weights=[0.5, 0.5])
self.BCE_weight = BCE_weight
self.dice_weight = dice_weight
def _show_dice(self, inputs, targets):
inputs[inputs>=0.5] = 1
inputs[inputs<0.5] = 0
# print("2",np.sum(tmp))
targets[targets>0] = 1
targets[targets<=0] = 0
hard_dice_coeff = 1.0 - self.dice_loss(inputs, targets)
return hard_dice_coeff
def forward(self, inputs, targets):
# inputs = inputs.contiguous().view(-1)
# targets = targets.contiguous().view(-1)
# print "dice_loss", self.dice_loss(inputs, targets)
# print "focal_loss", self.focal_loss(inputs, targets)
dice = self.dice_loss(inputs, targets)
BCE = self.BCE_loss(inputs, targets)
# print "dice",dice
# print "focal",focal
dice_BCE_loss = self.dice_weight * dice + self.BCE_weight * BCE
return dice_BCE_loss
def auc_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
aucs = []
for i in range(pred.shape[1]):
prediction = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask = masks[i].cpu().detach().numpy()
# print("rrr",np.max(mask), np.min(mask))
aucs.append(roc_auc_score(mask.reshape(-1), prediction.reshape(-1)))
return np.mean(aucs)
def iou_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
ious = []
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 1
pred_tmp[pred_tmp<0.5] = 0
# print("2",np.sum(tmp))
mask_tmp[mask_tmp>0] = 1
mask_tmp[mask_tmp<=0] = 0
# print("rrr",np.max(mask), np.min(mask))
ious.append(jaccard_score(mask_tmp.reshape(-1), pred_tmp.reshape(-1)))
return np.mean(ious)
def dice_coef(y_true, y_pred):
smooth = 1e-5
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_on_batch(masks, pred):
'''Computes the mean Area Under ROC Curve over a batch during training'''
dices = []
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
# print("www",np.max(prediction), np.min(prediction))
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 1
pred_tmp[pred_tmp<0.5] = 0
# print("2",np.sum(tmp))
mask_tmp[mask_tmp>0] = 1
mask_tmp[mask_tmp<=0] = 0
# print("rrr",np.max(mask), np.min(mask))
dices.append(dice_coef(mask_tmp, pred_tmp))
return np.mean(dices)
def save_on_batch(images1, masks, pred, names, vis_path):
'''Computes the mean Area Under ROC Curve over a batch during training'''
for i in range(pred.shape[0]):
pred_tmp = pred[i][0].cpu().detach().numpy()
mask_tmp = masks[i].cpu().detach().numpy()
pred_tmp[pred_tmp>=0.5] = 255
pred_tmp[pred_tmp<0.5] = 0
mask_tmp[mask_tmp>0] = 255
mask_tmp[mask_tmp<=0] = 0
cv2.imwrite(vis_path+ names[i][:-4]+"_pred.jpg", pred_tmp)
cv2.imwrite(vis_path+names[i][:-4]+"_gt.jpg", mask_tmp)
class _LRScheduler(object):
def __init__(self, optimizer, last_epoch=-1):
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
# Initialize epoch and base learning rates
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `optimizer.step()` has already been replaced, return.
return method
# Keep a weak reference to the optimizer instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step)
self.optimizer._step_count = 0
self._step_count = 0
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_lr(self):
""" Return last computed learning rate by current scheduler.
"""
return self._last_lr
def get_lr(self):
# Compute learning rate using chainable form of the scheduler
raise NotImplementedError
def step(self, epoch=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule. "
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
if epoch is None:
self.last_epoch += 1
values = self.get_lr()
else:
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
values = self._get_closed_form_lr()
else:
values = self.get_lr()
for param_group, lr in zip(self.optimizer.param_groups, values):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
class CosineAnnealingWarmRestarts(_LRScheduler):
r"""Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}`
is the number of epochs since the last restart and :math:`T_{i}` is the number
of epochs between two warm restarts in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right)
When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`.
When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_0 (int): Number of iterations for the first restart.
T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1.
eta_min (float, optional): Minimum learning rate. Default: 0.
last_epoch (int, optional): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1):
if T_0 <= 0 or not isinstance(T_0, int):
raise ValueError("Expected positive integer T_0, but got {}".format(T_0))
if T_mult < 1 or not isinstance(T_mult, int):
raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult))
self.T_0 = T_0
self.T_i = T_0
self.T_mult = T_mult
self.eta_min = eta_min
super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch)
self.T_cur = self.last_epoch
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", DeprecationWarning)
return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2
for base_lr in self.base_lrs]
def step(self, epoch=None):
"""Step could be called after every batch update
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> iters = len(dataloader)
>>> for epoch in range(20):
>>> for i, sample in enumerate(dataloader):
>>> inputs, labels = sample['inputs'], sample['labels']
>>> scheduler.step(epoch + i / iters)
>>> optimizer.zero_grad()
>>> outputs = net(inputs)
>>> loss = criterion(outputs, labels)
>>> loss.backward()
>>> optimizer.step()
This function can be called in an interleaved way.
Example:
>>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult)
>>> for epoch in range(20):
>>> scheduler.step()
>>> scheduler.step(26)
>>> scheduler.step() # scheduler.step(27), instead of scheduler(20)
"""
if epoch is None and self.last_epoch < 0:
epoch = 0
if epoch is None:
epoch = self.last_epoch + 1
self.T_cur = self.T_cur + 1
if self.T_cur >= self.T_i:
self.T_cur = self.T_cur - self.T_i
self.T_i = self.T_i * self.T_mult
else:
if epoch < 0:
raise ValueError("Expected non-negative epoch, but got {}".format(epoch))
if epoch >= self.T_0:
if self.T_mult == 1:
self.T_cur = epoch % self.T_0
else:
n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult))
self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1)
self.T_i = self.T_0 * self.T_mult ** (n)
else:
self.T_i = self.T_0
self.T_cur = epoch
self.last_epoch = math.floor(epoch)
class _enable_get_lr_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_lr_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_lr_called_within_step = False
return self
with _enable_get_lr_call(self):
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
|
[
"torch.nn.functional.binary_cross_entropy",
"numpy.sum",
"cv2.imwrite",
"math.floor",
"numpy.mean",
"math.cos",
"functools.wraps",
"warnings.warn",
"math.log",
"weakref.ref"
] |
[((3473, 3486), 'numpy.mean', 'np.mean', (['aucs'], {}), '(aucs)\n', (3480, 3486), True, 'import numpy as np\n'), ((4124, 4137), 'numpy.mean', 'np.mean', (['ious'], {}), '(ious)\n', (4131, 4137), True, 'import numpy as np\n'), ((4271, 4298), 'numpy.sum', 'np.sum', (['(y_true_f * y_pred_f)'], {}), '(y_true_f * y_pred_f)\n', (4277, 4298), True, 'import numpy as np\n'), ((5000, 5014), 'numpy.mean', 'np.mean', (['dices'], {}), '(dices)\n', (5007, 5014), True, 'import numpy as np\n'), ((628, 682), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['logit', 'truth'], {'reduction': '"""none"""'}), "(logit, truth, reduction='none')\n", (650, 682), True, 'import torch.nn.functional as F\n'), ((5442, 5503), 'cv2.imwrite', 'cv2.imwrite', (["(vis_path + names[i][:-4] + '_pred.jpg')", 'pred_tmp'], {}), "(vis_path + names[i][:-4] + '_pred.jpg', pred_tmp)\n", (5453, 5503), False, 'import cv2\n'), ((5509, 5568), 'cv2.imwrite', 'cv2.imwrite', (["(vis_path + names[i][:-4] + '_gt.jpg')", 'mask_tmp'], {}), "(vis_path + names[i][:-4] + '_gt.jpg', mask_tmp)\n", (5520, 5568), False, 'import cv2\n'), ((15174, 15191), 'math.floor', 'math.floor', (['epoch'], {}), '(epoch)\n', (15184, 15191), False, 'import math\n'), ((6976, 7004), 'weakref.ref', 'weakref.ref', (['method.__self__'], {}), '(method.__self__)\n', (6987, 7004), False, 'import weakref\n'), ((7179, 7190), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (7184, 7190), False, 'from functools import wraps\n'), ((12852, 12983), 'warnings.warn', 'warnings.warn', (['"""To get the last learning rate computed by the scheduler, please use `get_last_lr()`."""', 'DeprecationWarning'], {}), "(\n 'To get the last learning rate computed by the scheduler, please use `get_last_lr()`.'\n , DeprecationWarning)\n", (12865, 12983), False, 'import warnings\n'), ((4342, 4358), 'numpy.sum', 'np.sum', (['y_true_f'], {}), '(y_true_f)\n', (4348, 4358), True, 'import numpy as np\n'), ((4361, 4377), 'numpy.sum', 'np.sum', (['y_pred_f'], {}), '(y_pred_f)\n', (4367, 4377), True, 'import numpy as np\n'), ((8904, 9205), 'warnings.warn', 'warnings.warn', (['"""Seems like `optimizer.step()` has been overridden after learning rate scheduler initialization. Please, make sure to call `optimizer.step()` before `lr_scheduler.step()`. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate"""', 'UserWarning'], {}), "(\n 'Seems like `optimizer.step()` has been overridden after learning rate scheduler initialization. Please, make sure to call `optimizer.step()` before `lr_scheduler.step()`. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate'\n , UserWarning)\n", (8917, 9205), False, 'import warnings\n'), ((9460, 9876), 'warnings.warn', 'warnings.warn', (['"""Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate"""', 'UserWarning'], {}), "(\n 'Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate'\n , UserWarning)\n", (9473, 9876), False, 'import warnings\n'), ((14836, 14899), 'math.log', 'math.log', (['(epoch / self.T_0 * (self.T_mult - 1) + 1)', 'self.T_mult'], {}), '(epoch / self.T_0 * (self.T_mult - 1) + 1, self.T_mult)\n', (14844, 14899), False, 'import math\n'), ((13067, 13108), 'math.cos', 'math.cos', (['(math.pi * self.T_cur / self.T_i)'], {}), '(math.pi * self.T_cur / self.T_i)\n', (13075, 13108), False, 'import math\n')]
|
import unittest
import numpy as np
from scipy.optimize import root
from scipy.interpolate import interp1d
from scipy.stats import entropy, poisson
import warnings
from epipack.numeric_epi_models import (
DynamicBirthRate,
ConstantBirthRate,
DynamicLinearRate,
ConstantLinearRate,
DynamicQuadraticRate,
ConstantQuadraticRate,
EpiModel,
SISModel,
SIModel,
SIRModel,
SEIRModel,
SIRSModel,
)
from epipack.integrators import time_leap_ivp, time_leap_newton
from epipack.stochastic_epi_models import StochasticEpiModel
class EpiTest(unittest.TestCase):
def test_compartments(self):
epi = EpiModel(list("SEIR"))
assert(all([ i == epi.get_compartment_id(C) for i, C in enumerate("SEIR") ]))
assert(epi.get_compartment_id("E") == 1)
assert(epi.get_compartment(1) == "E")
def test_linear_rates(self):
epi = EpiModel(list("SEIR"))
epi.add_transition_processes([
("E", 1.0, "I"),
("I", 1.0, "R"),
])
linear_rates = [ ConstantLinearRate(1.0,1), ConstantLinearRate(1.0,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
epi = EpiModel(list("SEIR"))
_r0 = lambda t, y: 2+np.cos(t)
_r1 = lambda t, y: 2+np.sin(t)
epi.add_transition_processes([
("E", _r0, "I"),
("I", _r1, "R"),
])
linear_rates = [ DynamicLinearRate(_r0,1), DynamicLinearRate(_r1,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_adding_linear_rates(self):
epi = EpiModel(list("SEIR"))
epi.set_processes([
("E", 1.0, "I"),
])
epi.add_transition_processes([
("I", 1.0, "R"),
])
linear_rates = [ ConstantLinearRate(1.0,1), ConstantLinearRate(1.0,2) ]
linear_events = [ np.array([0,-1,+1,0]), np.array([0,0,-1,+1.]) ]
for r0, r1 in zip(linear_rates, epi.linear_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(linear_events, epi.linear_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_quadratic_processes(self):
epi = EpiModel(list("SEIAR"))
quadratic_rates = [ ConstantQuadraticRate(1.0,2,0)]
quadratic_events = [ np.array([-1,+1,0,0,0.])]
epi.add_transmission_processes([
("S", "I", 1.0, "I", "E"),
])
for r0, r1 in zip(quadratic_rates, epi.quadratic_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(quadratic_events, epi.quadratic_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_adding_quadratic_processes(self):
epi = EpiModel(list("SEIAR"))
quadratic_rates = [ ConstantQuadraticRate(1.0,2,0), ConstantQuadraticRate(1.0,3,0) ]
quadratic_events = [ np.array([-1,+1,0,0,0.]), np.array([-1,+1,0,0,0.]) ]
epi.set_processes([
("S", "I", 1.0, "I", "E"),
])
epi.add_transmission_processes([
("S", "A", 1.0, "A", "E"),
])
for r0, r1 in zip(quadratic_rates, epi.quadratic_rate_functions):
assert(r0(0,[0.1,0.2,0.3,0.4,0.5]) == r1(0, [0.1,0.2,0.3,0.4,0.5]))
for e0, e1 in zip(quadratic_events, epi.quadratic_event_updates):
assert(all([_e0==_e1 for _e0, _e1 in zip(e0, e1)]))
def test_SIS_with_simulation_restart_and_euler(self):
N = 100
epi = SISModel(infection_rate=2,recovery_rate=1,initial_population_size=N)
epi.set_initial_conditions({'S':0.99*N,'I':0.01*N})
tt = np.linspace(0,100,2)
result = epi.integrate(tt,['S'])
assert(np.isclose(result['S'][-1],N/2))
tt = np.linspace(0,100,1000)
result = epi.integrate_and_return_by_index(tt,['S'],integrator='euler')
assert(np.isclose(result[0,-1],N/2))
def test_repeated_simulation(self):
N = 100
epi = SISModel(infection_rate=2,recovery_rate=1,initial_population_size=N)
epi.set_initial_conditions({'S':0.99*N,'I':0.01*N})
tt = np.linspace(0,100,100)
old_t = tt[0]
for it, t in enumerate(tt[1:]):
result = epi.integrate_and_return_by_index([old_t,t],integrator='euler',adopt_final_state=True)
old_t = t
assert(np.isclose(result[0,-1],N/2))
def test_birth_death(self):
epi = EpiModel(list("SIR"))
R0 = 2
rho = 1
mu = 0.2
eta = R0 * rho
with self.assertWarns(UserWarning):
epi.set_processes([
("S", "I", eta, "I", "I"),
("I", rho, "R"),
(None, mu, "S"),
("S", mu, None),
("R", mu, None),
("I", mu, None),
])
epi.set_initial_conditions({'S': 0.8, 'I':0.2 })
t = [0,1000]
res = epi.integrate(t)
assert(np.isclose(res['S'][-1],(mu+rho)/eta))
assert(np.isclose(res['I'][-1],mu/eta*(eta-mu-rho)/(mu+rho)))
def test_dynamic_birth(self):
A = "A"
epi = EpiModel([A])
epi.set_initial_conditions({A:1})
with self.assertWarns(UserWarning):
epi.set_processes([
(None, lambda t, y: 2*t, A),
])
res = epi.integrate([0,5])
assert(np.isclose(res[A][-1],5**2+1))
def test_correcting_for_declining_pop_size(self):
A, B = list("AB")
epi = EpiModel([A, B],10,correct_for_dynamical_population_size=True)
epi.add_transition_processes([
#(None, 0.1, A),
])
epi.add_fusion_processes([
(A, B, 1, B),
])
epi.set_initial_conditions({B:4, A:6})
tt = np.linspace(0,30)
result = epi.integrate(tt)
#from matplotlib import pyplot as pl
#pl.plot(tt, result[A], label=A)
#pl.plot(tt, result[B], label=B)
epi.correct_for_dynamical_population_size = False
result = epi.integrate(tt)
#pl.plot(tt, result[A], label=A)
#pl.plot(tt, result[B], label=B)
#pl.legend()
#pl.show()
def test_fusion_and_adding_rates(self):
A, B, C = list("ABC")
epi = EpiModel(list("ABC"))
# this should not raise a warning that rates do not sum to zero
# as it will be actively suppressed
epi.add_fusion_processes([
(A, B, 1, C),
])
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.add_quadratic_events([
((A, B), 1, [(C, -1),(A, +1)]),
])
# now rates should sum to zero
epi.add_quadratic_events([
((A, B), 1, [(B, +1)]),
])
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.add_linear_events([
((A,), 1, [(B,-1)])
])
def test_initial_condition_warnings(self):
A, B, C = list("ABC")
epi = EpiModel(list("ABC"))
with self.assertWarns(UserWarning):
# this should raise a warning that rates do not sum to zero
epi.set_initial_conditions({A:0.1,B:0.2})
with self.assertWarns(UserWarning):
# this should raise a warning that initial conditions were set twice
epi.set_initial_conditions([(A,0.1),(A,0.2)])
def test_custom_models(self):
S, I, R = list("SIR")
eta = 1
epi = SIModel(eta)
epi.set_initial_conditions({"S":0.99, "I":0.01})
epi.integrate([0,1000],adopt_final_state=True)
assert(np.isclose(epi.y0[0],0))
eta = 2
rho = 1
epi = SIRModel(eta,rho)
S0 = 0.99
epi.set_initial_conditions({S:S0, I:1-S0})
R0 = eta/rho
Rinf = lambda x: 1-x-S0*np.exp(-x*R0)
res = epi.integrate([0,100])
SIR_theory = root(Rinf,0.5).x[0]
assert(np.isclose(res[R][-1],SIR_theory))
omega = 1
epi = SEIRModel(eta,rho,omega)
epi.set_initial_conditions({S:S0, I:1-S0})
res = epi.integrate([0,100])
assert(np.isclose(res[R][-1],SIR_theory))
#======================
epi = SISModel(eta, rho, initial_population_size=100)
epi.set_initial_conditions({S: 99, I:1 })
tt = np.linspace(0,1000,2)
result = epi.integrate(tt)
assert(np.isclose(result[S][-1],50))
epi = SIRSModel(eta, rho, omega)
epi.set_initial_conditions({S: 0.99, I:0.01 })
tt = np.linspace(0,1000,2)
result = epi.integrate(tt)
assert(np.isclose(result[R][-1],(1-rho/eta)/(1+omega/rho)))
def test_inference_of_temporal_dependence(self,plot=False):
data = np.array([
(1.0, 2.00),
(10000.0, 2.00),
(10001.0, -2.00),
])
times, rates = data[:,0], data[:,1]
f = interp1d(times, rates, kind='linear')
def infection_rate(t,y):
return f(t)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
# first, initialize the time to t0 = 1, so
# column sum tests do not fail
model.set_initial_conditions({S:99,I:1},initial_time=1)
# Here, the function will fail to evaluate time dependence
# but will warn the user that there were errors in time
# evaluation.
self.assertWarns(UserWarning,model.set_processes,
[
(S, I, infection_rate, I, I),
(I, infection_rate, S),
],
)
assert(not model.rates_have_explicit_time_dependence)
assert(model.rates_have_functional_dependence)
# this should warn the user that rates are functionally dependent
# but that no temporal dependence could be inferred, to in case
# they know that there's a time dependence, they have to state
# that explicitly
self.assertWarns(UserWarning,model.simulate,tmax=2)
model.set_initial_conditions({S:99,I:1},initial_time=1)
# here, the time dependence is given explicitly and so
# the warning will not be shown
model.simulate(tmax=2,rates_have_explicit_time_dependence=True)
def test_temporal_gillespie(self,plot=False):
scl = 40
def R0(t,y=None):
return 4+np.cos(t*scl)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
model.set_processes([
(S, I, R0, I, I),
(I, rec, S),
])
I0 = 1
S0 = N - I0
model.set_initial_conditions({
S: S0,
I: I0,
})
taus = []
N_sample = 10000
for sample in range(N_sample):
tau, _ = model.get_time_leap_and_proposed_compartment_changes(0)
taus.append(tau)
I = lambda t: (4*t + 1/scl*np.sin(t*scl))
I2 = lambda t: I(t)*S0*I0/N+I0*rec*t
pdf = lambda t: (R0(t)*S0*I0/N + I0*rec) * np.exp(-I2(t))
measured, bins = np.histogram(taus,bins=100,density=True)
theory = [ np.exp(-I2(bins[i-1]))-np.exp(-I2(bins[i])) for i in range(1,len(bins)) if measured[i-1] > 0]
experi = [ measured[i-1] for i in range(1,len(bins)) if measured[i-1] > 0]
# make sure the kullback-leibler divergence is below some threshold
if plot: # pragma: no cover
import matplotlib.pyplot as pl
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.yscale('log')
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.show()
assert(entropy(theory, experi) < 0.01)
def test_temporal_gillespie_repeated_simulation(self,plot=False):
scl = 40
def R0(t,y=None):
return 4+np.cos(t*scl)
S, I = list("SI")
N = 100
rec = 1
model = EpiModel([S,I], N)
model.set_processes([
(S, I, R0, I, I),
(I, rec, S),
])
I0 = 1
S0 = N - I0
model.set_initial_conditions({
S: S0,
I: I0,
})
taus = []
N_sample = 10000
if plot:
from tqdm import tqdm
else:
tqdm = lambda x: x
tt = np.linspace(0,1,100)
for sample in tqdm(range(N_sample)):
tau = None
model.set_initial_conditions({
S: S0,
I: I0,
})
for _t in tt[1:]:
time, result = model.simulate(_t,adopt_final_state=True)
#print(time, result['I'])
if result['I'][-1] != I0:
tau = time[1]
break
#print()
if tau is not None:
taus.append(tau)
I = lambda t: (4*t + 1/scl*np.sin(t*scl))
I2 = lambda t: I(t)*S0*I0/N+I0*rec*t
pdf = lambda t: (R0(t)*S0*I0/N + I0*rec) * np.exp(-I2(t))
measured, bins = np.histogram(taus,bins=100,density=True)
theory = [ np.exp(-I2(bins[i-1]))-np.exp(-I2(bins[i])) for i in range(1,len(bins)) if measured[i-1] > 0]
experi = [ measured[i-1] for i in range(1,len(bins)) if measured[i-1] > 0]
# make sure the kullback-leibler divergence is below some threshold
if plot:
import matplotlib.pyplot as pl
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.yscale('log')
pl.figure()
pl.hist(taus,bins=100,density=True)
tt = np.linspace(0,1,100)
pl.plot(tt, pdf(tt))
pl.show()
assert(entropy(theory, experi) < 0.01)
def test_stochastic_well_mixed(self):
S, E, I, R = list("SEIR")
N = 75000
tmax = 100
model = EpiModel([S,E,I,R],N)
model.set_processes([
( S, I, 2, E, I ),
( I, 1, R),
( E, 1, I),
])
model.set_initial_conditions({S: N-100, I: 100})
tt = np.linspace(0,tmax,10000)
result_int = model.integrate(tt)
t, result_sim = model.simulate(tmax,sampling_dt=1,return_compartments=[S, R])
model = StochasticEpiModel([S,E,I,R],N)
model.set_link_transmission_processes([
( I, S, 2, I, E ),
])
model.set_node_transition_processes([
( I, 1, R),
( E, 1, I),
])
model.set_random_initial_conditions({S: N-100, I: 100})
t, result_sim2 = model.simulate(tmax,sampling_dt=1,return_compartments=[S, R])
for c, res in result_sim2.items():
#print(c, np.abs(1-res[-1]/result_int[c][-1]))
#print(c, np.abs(1-res[-1]/result_sim[c][-1]))
assert(np.abs(1-res[-1]/result_int[c][-1]) < 0.05)
assert(np.abs(1-res[-1]/result_sim[c][-1]) < 0.05)
def test_stochastic_fission(self):
A, B, C = list("ABC")
N = 10
epi = EpiModel([A,B,C],N,correct_for_dynamical_population_size=True)
epi.add_fusion_processes([
(A, B, 1.0, C),
])
epi.set_initial_conditions({ A: 5, B: 5})
t, res = epi.simulate(1e9)
assert(res[C][-1] == 5)
def test_birth_stochastics(self):
A, B, C = list("ABC")
epi = EpiModel([A,B,C],10,correct_for_dynamical_population_size=True)
epi.set_initial_conditions({A:5, B:5})
epi.set_processes([
(None, 1, A),
(A, 1, B),
(B, 1, None),
],allow_nonzero_column_sums=True)
_, res = epi.simulate(200,sampling_dt=0.05)
vals = np.concatenate([res[A][_>10], res[B][_>10]])
rv = poisson(vals.mean())
measured, bins = np.histogram(vals,bins=np.arange(10)-0.5,density=True)
theory = [ rv.pmf(i) for i in range(0,len(bins)-1) if measured[i] > 0]
experi = [ measured[i] for i in range(0,len(bins)-1) if measured[i] > 0]
# make sure the kullback-leibler divergence is below some threshold
#for a, b in zip(theory, experi):
# print(a,b)
assert(entropy(theory, experi) < 1e-2)
assert(np.median(res[A]) == 1)
def test_sampling_callback(self):
epi = SIModel(infection_rate=5.0,initial_population_size=100)
epi.set_initial_conditions({"S":90,"I":10})
self.assertRaises(ValueError,epi.simulate,1,sampling_callback=lambda x: x)
i = 0
samples = []
def sampled():
samples.append(epi.y0[0])
t, res = epi.simulate(10,sampling_dt=0.1,sampling_callback=sampled)
assert(all([a==b for a, b in zip(res['S'], samples)]))
def test_integral_solvers(self):
def get_event_rates(t, y):
return y * (0.05 + 0.03 * np.array([ np.cos(t), np.sin(t), np.cos(t)**2, np.sin(t)**2 ]))
rand = 0.834053
t0 = 1.0
y0 = np.array([0.1,0.2,0.3,0.4])
t_nwt = time_leap_newton(t0, y0, get_event_rates, rand)
t_ivp = time_leap_ivp(t0, y0, get_event_rates, rand)
expected = 30.76
numeric = np.array([t_nwt, t_ivp])
assert(np.all( np.abs(numeric-expected)/numeric < 1e-3) )
def test_integrate_until(self):
N = 100
epi = SIModel(infection_rate=5.0,initial_population_size=N)
epi.set_initial_conditions({"S":90,"I":10})
thresh = 0.5
iS = epi.get_compartment_id("S")
stop_condition = lambda t, y: thresh*N - y[iS]
t, res = epi.integrate_until(0,stop_condition,return_compartments=['S'])
assert(np.isclose(thresh*N,res['S'][-1]))
if __name__ == "__main__":
import sys
T = EpiTest()
T.test_fusion_and_adding_rates()
T.test_inference_of_temporal_dependence()
#T.test_integrate_until()
#T.test_integral_solvers()
#T.test_temporal_gillespie_repeated_simulation()
#T.test_sampling_callback()
#T.test_birth_stochastics()
#T.test_stochastic_fission()
#T.test_correcting_for_declining_pop_size()
#T.test_dynamic_birth()
#T.test_stochastic_well_mixed()
#T.test_temporal_gillespie()
#T.test_compartments()
#T.test_linear_rates()
#T.test_adding_linear_rates()
#T.test_quadratic_processes()
#T.test_adding_quadratic_processes()
#T.test_SIS_with_simulation_restart_and_euler()
#T.test_repeated_simulation()
#T.test_custom_models()
#T.test_birth_death()
#T.test_initial_condition_warnings()
|
[
"matplotlib.pyplot.yscale",
"numpy.abs",
"numpy.isclose",
"numpy.histogram",
"matplotlib.pyplot.figure",
"epipack.numeric_epi_models.ConstantLinearRate",
"numpy.sin",
"numpy.exp",
"numpy.arange",
"scipy.interpolate.interp1d",
"epipack.numeric_epi_models.ConstantQuadraticRate",
"numpy.linspace",
"epipack.numeric_epi_models.DynamicLinearRate",
"epipack.numeric_epi_models.SISModel",
"epipack.numeric_epi_models.SIRSModel",
"epipack.stochastic_epi_models.StochasticEpiModel",
"matplotlib.pyplot.show",
"numpy.median",
"epipack.integrators.time_leap_newton",
"numpy.cos",
"scipy.optimize.root",
"numpy.concatenate",
"epipack.numeric_epi_models.EpiModel",
"epipack.numeric_epi_models.SIModel",
"matplotlib.pyplot.hist",
"scipy.stats.entropy",
"epipack.numeric_epi_models.SEIRModel",
"numpy.array",
"epipack.integrators.time_leap_ivp",
"epipack.numeric_epi_models.SIRModel"
] |
[((4359, 4429), 'epipack.numeric_epi_models.SISModel', 'SISModel', ([], {'infection_rate': '(2)', 'recovery_rate': '(1)', 'initial_population_size': 'N'}), '(infection_rate=2, recovery_rate=1, initial_population_size=N)\n', (4367, 4429), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((4501, 4523), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(2)'], {}), '(0, 100, 2)\n', (4512, 4523), True, 'import numpy as np\n'), ((4578, 4612), 'numpy.isclose', 'np.isclose', (["result['S'][-1]", '(N / 2)'], {}), "(result['S'][-1], N / 2)\n", (4588, 4612), True, 'import numpy as np\n'), ((4625, 4650), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (4636, 4650), True, 'import numpy as np\n'), ((4744, 4776), 'numpy.isclose', 'np.isclose', (['result[0, -1]', '(N / 2)'], {}), '(result[0, -1], N / 2)\n', (4754, 4776), True, 'import numpy as np\n'), ((4846, 4916), 'epipack.numeric_epi_models.SISModel', 'SISModel', ([], {'infection_rate': '(2)', 'recovery_rate': '(1)', 'initial_population_size': 'N'}), '(infection_rate=2, recovery_rate=1, initial_population_size=N)\n', (4854, 4916), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((4988, 5012), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (4999, 5012), True, 'import numpy as np\n'), ((5220, 5252), 'numpy.isclose', 'np.isclose', (['result[0, -1]', '(N / 2)'], {}), '(result[0, -1], N / 2)\n', (5230, 5252), True, 'import numpy as np\n'), ((5845, 5887), 'numpy.isclose', 'np.isclose', (["res['S'][-1]", '((mu + rho) / eta)'], {}), "(res['S'][-1], (mu + rho) / eta)\n", (5855, 5887), True, 'import numpy as np\n'), ((5899, 5965), 'numpy.isclose', 'np.isclose', (["res['I'][-1]", '(mu / eta * (eta - mu - rho) / (mu + rho))'], {}), "(res['I'][-1], mu / eta * (eta - mu - rho) / (mu + rho))\n", (5909, 5965), True, 'import numpy as np\n'), ((6020, 6033), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A]'], {}), '([A])\n', (6028, 6033), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((6270, 6304), 'numpy.isclose', 'np.isclose', (['res[A][-1]', '(5 ** 2 + 1)'], {}), '(res[A][-1], 5 ** 2 + 1)\n', (6280, 6304), True, 'import numpy as np\n'), ((6397, 6461), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A, B]', '(10)'], {'correct_for_dynamical_population_size': '(True)'}), '([A, B], 10, correct_for_dynamical_population_size=True)\n', (6405, 6461), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((6687, 6705), 'numpy.linspace', 'np.linspace', (['(0)', '(30)'], {}), '(0, 30)\n', (6698, 6705), True, 'import numpy as np\n'), ((8545, 8557), 'epipack.numeric_epi_models.SIModel', 'SIModel', (['eta'], {}), '(eta)\n', (8552, 8557), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((8685, 8709), 'numpy.isclose', 'np.isclose', (['epi.y0[0]', '(0)'], {}), '(epi.y0[0], 0)\n', (8695, 8709), True, 'import numpy as np\n'), ((8758, 8776), 'epipack.numeric_epi_models.SIRModel', 'SIRModel', (['eta', 'rho'], {}), '(eta, rho)\n', (8766, 8776), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9006, 9040), 'numpy.isclose', 'np.isclose', (['res[R][-1]', 'SIR_theory'], {}), '(res[R][-1], SIR_theory)\n', (9016, 9040), True, 'import numpy as np\n'), ((9075, 9101), 'epipack.numeric_epi_models.SEIRModel', 'SEIRModel', (['eta', 'rho', 'omega'], {}), '(eta, rho, omega)\n', (9084, 9101), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9203, 9237), 'numpy.isclose', 'np.isclose', (['res[R][-1]', 'SIR_theory'], {}), '(res[R][-1], SIR_theory)\n', (9213, 9237), True, 'import numpy as np\n'), ((9286, 9333), 'epipack.numeric_epi_models.SISModel', 'SISModel', (['eta', 'rho'], {'initial_population_size': '(100)'}), '(eta, rho, initial_population_size=100)\n', (9294, 9333), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9399, 9422), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(2)'], {}), '(0, 1000, 2)\n', (9410, 9422), True, 'import numpy as np\n'), ((9471, 9500), 'numpy.isclose', 'np.isclose', (['result[S][-1]', '(50)'], {}), '(result[S][-1], 50)\n', (9481, 9500), True, 'import numpy as np\n'), ((9516, 9542), 'epipack.numeric_epi_models.SIRSModel', 'SIRSModel', (['eta', 'rho', 'omega'], {}), '(eta, rho, omega)\n', (9525, 9542), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((9613, 9636), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(2)'], {}), '(0, 1000, 2)\n', (9624, 9636), True, 'import numpy as np\n'), ((9685, 9747), 'numpy.isclose', 'np.isclose', (['result[R][-1]', '((1 - rho / eta) / (1 + omega / rho))'], {}), '(result[R][-1], (1 - rho / eta) / (1 + omega / rho))\n', (9695, 9747), True, 'import numpy as np\n'), ((9819, 9874), 'numpy.array', 'np.array', (['[(1.0, 2.0), (10000.0, 2.0), (10001.0, -2.0)]'], {}), '([(1.0, 2.0), (10000.0, 2.0), (10001.0, -2.0)])\n', (9827, 9874), True, 'import numpy as np\n'), ((9982, 10019), 'scipy.interpolate.interp1d', 'interp1d', (['times', 'rates'], {'kind': '"""linear"""'}), "(times, rates, kind='linear')\n", (9990, 10019), False, 'from scipy.interpolate import interp1d\n'), ((10153, 10172), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, I]', 'N'], {}), '([S, I], N)\n', (10161, 10172), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((11611, 11630), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, I]', 'N'], {}), '([S, I], N)\n', (11619, 11630), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((12249, 12291), 'numpy.histogram', 'np.histogram', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (12261, 12291), True, 'import numpy as np\n'), ((13250, 13269), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, I]', 'N'], {}), '([S, I], N)\n', (13258, 13269), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((13665, 13687), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (13676, 13687), True, 'import numpy as np\n'), ((14391, 14433), 'numpy.histogram', 'np.histogram', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (14403, 14433), True, 'import numpy as np\n'), ((15280, 15305), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[S, E, I, R]', 'N'], {}), '([S, E, I, R], N)\n', (15288, 15305), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((15509, 15536), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax', '(10000)'], {}), '(0, tmax, 10000)\n', (15520, 15536), True, 'import numpy as np\n'), ((15680, 15715), 'epipack.stochastic_epi_models.StochasticEpiModel', 'StochasticEpiModel', (['[S, E, I, R]', 'N'], {}), '([S, E, I, R], N)\n', (15698, 15715), False, 'from epipack.stochastic_epi_models import StochasticEpiModel\n'), ((16469, 16535), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A, B, C]', 'N'], {'correct_for_dynamical_population_size': '(True)'}), '([A, B, C], N, correct_for_dynamical_population_size=True)\n', (16477, 16535), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((16818, 16885), 'epipack.numeric_epi_models.EpiModel', 'EpiModel', (['[A, B, C]', '(10)'], {'correct_for_dynamical_population_size': '(True)'}), '([A, B, C], 10, correct_for_dynamical_population_size=True)\n', (16826, 16885), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((17160, 17208), 'numpy.concatenate', 'np.concatenate', (['[res[A][_ > 10], res[B][_ > 10]]'], {}), '([res[A][_ > 10], res[B][_ > 10]])\n', (17174, 17208), True, 'import numpy as np\n'), ((17760, 17816), 'epipack.numeric_epi_models.SIModel', 'SIModel', ([], {'infection_rate': '(5.0)', 'initial_population_size': '(100)'}), '(infection_rate=5.0, initial_population_size=100)\n', (17767, 17816), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((18421, 18451), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4]'], {}), '([0.1, 0.2, 0.3, 0.4])\n', (18429, 18451), True, 'import numpy as np\n'), ((18465, 18512), 'epipack.integrators.time_leap_newton', 'time_leap_newton', (['t0', 'y0', 'get_event_rates', 'rand'], {}), '(t0, y0, get_event_rates, rand)\n', (18481, 18512), False, 'from epipack.integrators import time_leap_ivp, time_leap_newton\n'), ((18529, 18573), 'epipack.integrators.time_leap_ivp', 'time_leap_ivp', (['t0', 'y0', 'get_event_rates', 'rand'], {}), '(t0, y0, get_event_rates, rand)\n', (18542, 18573), False, 'from epipack.integrators import time_leap_ivp, time_leap_newton\n'), ((18617, 18641), 'numpy.array', 'np.array', (['[t_nwt, t_ivp]'], {}), '([t_nwt, t_ivp])\n', (18625, 18641), True, 'import numpy as np\n'), ((18776, 18830), 'epipack.numeric_epi_models.SIModel', 'SIModel', ([], {'infection_rate': '(5.0)', 'initial_population_size': 'N'}), '(infection_rate=5.0, initial_population_size=N)\n', (18783, 18830), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((19098, 19134), 'numpy.isclose', 'np.isclose', (['(thresh * N)', "res['S'][-1]"], {}), "(thresh * N, res['S'][-1])\n", (19108, 19134), True, 'import numpy as np\n'), ((1173, 1199), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(1)'], {}), '(1.0, 1)\n', (1191, 1199), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1200, 1226), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(2)'], {}), '(1.0, 2)\n', (1218, 1226), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1254, 1278), 'numpy.array', 'np.array', (['[0, -1, +1, 0]'], {}), '([0, -1, +1, 0])\n', (1262, 1278), True, 'import numpy as np\n'), ((1277, 1303), 'numpy.array', 'np.array', (['[0, 0, -1, +1.0]'], {}), '([0, 0, -1, +1.0])\n', (1285, 1303), True, 'import numpy as np\n'), ((1845, 1870), 'epipack.numeric_epi_models.DynamicLinearRate', 'DynamicLinearRate', (['_r0', '(1)'], {}), '(_r0, 1)\n', (1862, 1870), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1871, 1896), 'epipack.numeric_epi_models.DynamicLinearRate', 'DynamicLinearRate', (['_r1', '(2)'], {}), '(_r1, 2)\n', (1888, 1896), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((1924, 1948), 'numpy.array', 'np.array', (['[0, -1, +1, 0]'], {}), '([0, -1, +1, 0])\n', (1932, 1948), True, 'import numpy as np\n'), ((1947, 1973), 'numpy.array', 'np.array', (['[0, 0, -1, +1.0]'], {}), '([0, 0, -1, +1.0])\n', (1955, 1973), True, 'import numpy as np\n'), ((2531, 2557), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(1)'], {}), '(1.0, 1)\n', (2549, 2557), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((2558, 2584), 'epipack.numeric_epi_models.ConstantLinearRate', 'ConstantLinearRate', (['(1.0)', '(2)'], {}), '(1.0, 2)\n', (2576, 2584), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((2612, 2636), 'numpy.array', 'np.array', (['[0, -1, +1, 0]'], {}), '([0, -1, +1, 0])\n', (2620, 2636), True, 'import numpy as np\n'), ((2635, 2661), 'numpy.array', 'np.array', (['[0, 0, -1, +1.0]'], {}), '([0, 0, -1, +1.0])\n', (2643, 2661), True, 'import numpy as np\n'), ((3048, 3080), 'epipack.numeric_epi_models.ConstantQuadraticRate', 'ConstantQuadraticRate', (['(1.0)', '(2)', '(0)'], {}), '(1.0, 2, 0)\n', (3069, 3080), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((3109, 3138), 'numpy.array', 'np.array', (['[-1, +1, 0, 0, 0.0]'], {}), '([-1, +1, 0, 0, 0.0])\n', (3117, 3138), True, 'import numpy as np\n'), ((3643, 3675), 'epipack.numeric_epi_models.ConstantQuadraticRate', 'ConstantQuadraticRate', (['(1.0)', '(2)', '(0)'], {}), '(1.0, 2, 0)\n', (3664, 3675), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((3675, 3707), 'epipack.numeric_epi_models.ConstantQuadraticRate', 'ConstantQuadraticRate', (['(1.0)', '(3)', '(0)'], {}), '(1.0, 3, 0)\n', (3696, 3707), False, 'from epipack.numeric_epi_models import DynamicBirthRate, ConstantBirthRate, DynamicLinearRate, ConstantLinearRate, DynamicQuadraticRate, ConstantQuadraticRate, EpiModel, SISModel, SIModel, SIRModel, SEIRModel, SIRSModel\n'), ((3737, 3766), 'numpy.array', 'np.array', (['[-1, +1, 0, 0, 0.0]'], {}), '([-1, +1, 0, 0, 0.0])\n', (3745, 3766), True, 'import numpy as np\n'), ((3763, 3792), 'numpy.array', 'np.array', (['[-1, +1, 0, 0, 0.0]'], {}), '([-1, +1, 0, 0, 0.0])\n', (3771, 3792), True, 'import numpy as np\n'), ((12653, 12664), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (12662, 12664), True, 'import matplotlib.pyplot as pl\n'), ((12677, 12714), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (12684, 12714), True, 'import matplotlib.pyplot as pl\n'), ((12730, 12752), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (12741, 12752), True, 'import numpy as np\n'), ((12796, 12812), 'matplotlib.pyplot.yscale', 'pl.yscale', (['"""log"""'], {}), "('log')\n", (12805, 12812), True, 'import matplotlib.pyplot as pl\n'), ((12825, 12836), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (12834, 12836), True, 'import matplotlib.pyplot as pl\n'), ((12849, 12886), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (12856, 12886), True, 'import matplotlib.pyplot as pl\n'), ((12902, 12924), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (12913, 12924), True, 'import numpy as np\n'), ((12968, 12977), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (12975, 12977), True, 'import matplotlib.pyplot as pl\n'), ((12993, 13016), 'scipy.stats.entropy', 'entropy', (['theory', 'experi'], {}), '(theory, experi)\n', (13000, 13016), False, 'from scipy.stats import entropy, poisson\n'), ((14776, 14787), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (14785, 14787), True, 'import matplotlib.pyplot as pl\n'), ((14800, 14837), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (14807, 14837), True, 'import matplotlib.pyplot as pl\n'), ((14853, 14875), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (14864, 14875), True, 'import numpy as np\n'), ((14919, 14935), 'matplotlib.pyplot.yscale', 'pl.yscale', (['"""log"""'], {}), "('log')\n", (14928, 14935), True, 'import matplotlib.pyplot as pl\n'), ((14948, 14959), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (14957, 14959), True, 'import matplotlib.pyplot as pl\n'), ((14972, 15009), 'matplotlib.pyplot.hist', 'pl.hist', (['taus'], {'bins': '(100)', 'density': '(True)'}), '(taus, bins=100, density=True)\n', (14979, 15009), True, 'import matplotlib.pyplot as pl\n'), ((15025, 15047), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (15036, 15047), True, 'import numpy as np\n'), ((15091, 15100), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (15098, 15100), True, 'import matplotlib.pyplot as pl\n'), ((15116, 15139), 'scipy.stats.entropy', 'entropy', (['theory', 'experi'], {}), '(theory, experi)\n', (15123, 15139), False, 'from scipy.stats import entropy, poisson\n'), ((17636, 17659), 'scipy.stats.entropy', 'entropy', (['theory', 'experi'], {}), '(theory, experi)\n', (17643, 17659), False, 'from scipy.stats import entropy, poisson\n'), ((17683, 17700), 'numpy.median', 'np.median', (['res[A]'], {}), '(res[A])\n', (17692, 17700), True, 'import numpy as np\n'), ((1649, 1658), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (1655, 1658), True, 'import numpy as np\n'), ((1688, 1697), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1694, 1697), True, 'import numpy as np\n'), ((8971, 8986), 'scipy.optimize.root', 'root', (['Rinf', '(0.5)'], {}), '(Rinf, 0.5)\n', (8975, 8986), False, 'from scipy.optimize import root\n'), ((11522, 11537), 'numpy.cos', 'np.cos', (['(t * scl)'], {}), '(t * scl)\n', (11528, 11537), True, 'import numpy as np\n'), ((13161, 13176), 'numpy.cos', 'np.cos', (['(t * scl)'], {}), '(t * scl)\n', (13167, 13176), True, 'import numpy as np\n'), ((16260, 16299), 'numpy.abs', 'np.abs', (['(1 - res[-1] / result_int[c][-1])'], {}), '(1 - res[-1] / result_int[c][-1])\n', (16266, 16299), True, 'import numpy as np\n'), ((16323, 16362), 'numpy.abs', 'np.abs', (['(1 - res[-1] / result_sim[c][-1])'], {}), '(1 - res[-1] / result_sim[c][-1])\n', (16329, 16362), True, 'import numpy as np\n'), ((8898, 8913), 'numpy.exp', 'np.exp', (['(-x * R0)'], {}), '(-x * R0)\n', (8904, 8913), True, 'import numpy as np\n'), ((12098, 12113), 'numpy.sin', 'np.sin', (['(t * scl)'], {}), '(t * scl)\n', (12104, 12113), True, 'import numpy as np\n'), ((14240, 14255), 'numpy.sin', 'np.sin', (['(t * scl)'], {}), '(t * scl)\n', (14246, 14255), True, 'import numpy as np\n'), ((17287, 17300), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (17296, 17300), True, 'import numpy as np\n'), ((18665, 18691), 'numpy.abs', 'np.abs', (['(numeric - expected)'], {}), '(numeric - expected)\n', (18671, 18691), True, 'import numpy as np\n'), ((18313, 18322), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (18319, 18322), True, 'import numpy as np\n'), ((18324, 18333), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (18330, 18333), True, 'import numpy as np\n'), ((18335, 18344), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (18341, 18344), True, 'import numpy as np\n'), ((18349, 18358), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (18355, 18358), True, 'import numpy as np\n')]
|
import random
import ctypes
import sys
import wgpu.backends.rs # noqa
import numpy as np
from pytest import skip
from testutils import run_tests, get_default_device
from testutils import can_use_wgpu_lib, is_ci
from renderutils import render_to_texture, render_to_screen # noqa
if not can_use_wgpu_lib:
skip("Skipping tests that need the wgpu lib", allow_module_level=True)
elif is_ci and sys.platform == "win32":
skip("These tests fail on dx12 for some reason", allow_module_level=True)
# %% 1D
def test_compute_tex_1d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 1, 1, 4
data1 = (ctypes.c_uint8 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<rgba16sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 1, 1, 4
data1 = (ctypes.c_int16 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<r32sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 1, 1, 1
data1 = (ctypes.c_int32 * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_1d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_1d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_1d<r32float,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i: i32 = i32(index.x);
let color1 : vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 1, 1, 1
data1 = (ctypes.c_float * nc * nx)()
for x in range(nx):
for c in range(nc):
data1[x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d1,
(nx, ny, nz, nc),
data1,
)
# %% 2D
def test_compute_tex_2d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 1, 4
data1 = (ctypes.c_uint8 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<rgba16sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 8, 1, 4
data1 = (ctypes.c_int16 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_2d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<r32sint, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 1, 1
data1 = (ctypes.c_int32 * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_2d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1:texture_2d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_2d<r32float, write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec2<i32>(index.xy);
let color1: vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 1, 1
data1 = (ctypes.c_float * nc * nx * ny)()
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d2,
(nx, ny, nz, nc),
data1,
)
# %% 3D
def test_compute_tex_3d_rgba8uint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<u32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<rgba8uint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1 = vec4<i32>(textureLoad(r_tex1, i, 0));
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, vec4<u32>(color2));
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 6, 4
data1 = (ctypes.c_uint8 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba8uint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_rgba16sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<rgba16sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 128, 8, 6, 4
data1 = (ctypes.c_int16 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.rgba16sint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_r32sint():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<i32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<r32sint,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<i32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<i32>(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 256, 8, 6, 1
data1 = (ctypes.c_int32 * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32sint,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
def test_compute_tex_3d_r32float():
compute_shader = """
[[group(0), binding(0)]]
var r_tex1: texture_3d<f32>;
[[group(0), binding(1)]]
var r_tex2: texture_storage_3d<r32float,write>;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] index: vec3<u32>) {
let i = vec3<i32>(index);
let color1: vec4<f32> = textureLoad(r_tex1, i, 0);
let color2 = vec4<f32>(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a);
textureStore(r_tex2, i, color2);
}
"""
# Generate data
nx, ny, nz, nc = 64, 8, 6, 1
data1 = (ctypes.c_float * nc * nx * ny * nz)()
for z in range(nz):
for y in range(ny):
for x in range(nx):
for c in range(nc):
data1[z][y][x][c] = random.randint(0, 20)
# Compute and validate
_compute_texture(
compute_shader,
wgpu.TextureFormat.r32float,
wgpu.TextureDimension.d3,
(nx, ny, nz, nc),
data1,
)
# %%
def _compute_texture(compute_shader, texture_format, texture_dim, texture_size, data1):
"""
Apply a computation on a texture and validate the result. The shader should:
* Add the x-coordinate to the red channel.
* Add 1 to the green channel.
* Multiply the blue channel by 2.
* The alpha channel must remain equal.
"""
nx, ny, nz, nc = texture_size
nbytes = ctypes.sizeof(data1)
bpp = nbytes // (nx * ny * nz) # bytes per pixel
device = get_default_device()
cshader = device.create_shader_module(code=compute_shader)
# Create textures and views
texture1 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.TEXTURE_BINDING | wgpu.TextureUsage.COPY_DST,
)
texture2 = device.create_texture(
size=(nx, ny, nz),
dimension=texture_dim,
format=texture_format,
usage=wgpu.TextureUsage.STORAGE_BINDING | wgpu.TextureUsage.COPY_SRC,
)
texture_view1 = texture1.create_view()
texture_view2 = texture2.create_view()
# Create buffer that we need to upload the data
buffer_usage = wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST
buffer = device.create_buffer_with_data(data=data1, usage=buffer_usage)
assert buffer.usage == buffer_usage
texture_sample_type = "float"
if "uint" in texture_format:
texture_sample_type = "uint"
elif "sint" in texture_format:
texture_sample_type = "sint"
# Define bindings
# One can see here why we need 2 textures: one is readonly, one writeonly
bindings = [
{"binding": 0, "resource": texture_view1},
{"binding": 1, "resource": texture_view2},
]
binding_layouts = [
{
"binding": 0,
"visibility": wgpu.ShaderStage.COMPUTE,
"texture": {
"sample_type": texture_sample_type,
"view_dimension": texture_dim,
},
},
{
"binding": 1,
"visibility": wgpu.ShaderStage.COMPUTE,
"storage_texture": {
"access": wgpu.StorageTextureAccess.write_only,
"format": texture_format,
"view_dimension": texture_dim,
},
},
]
bind_group_layout = device.create_bind_group_layout(entries=binding_layouts)
pipeline_layout = device.create_pipeline_layout(
bind_group_layouts=[bind_group_layout]
)
bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings)
# Create a pipeline and run it
compute_pipeline = device.create_compute_pipeline(
layout=pipeline_layout,
compute={"module": cshader, "entry_point": "main"},
)
assert compute_pipeline.get_bind_group_layout(0) is bind_group_layout
command_encoder = device.create_command_encoder()
command_encoder.copy_buffer_to_texture(
{
"buffer": buffer,
"offset": 0,
"bytes_per_row": bpp * nx,
"rows_per_image": ny,
},
{"texture": texture1, "mip_level": 0, "origin": (0, 0, 0)},
(nx, ny, nz),
)
compute_pass = command_encoder.begin_compute_pass()
compute_pass.push_debug_group("foo")
compute_pass.insert_debug_marker("setting pipeline")
compute_pass.set_pipeline(compute_pipeline)
compute_pass.insert_debug_marker("setting bind group")
compute_pass.set_bind_group(
0, bind_group, [], 0, 999999
) # last 2 elements not used
compute_pass.insert_debug_marker("dispatch!")
compute_pass.dispatch(nx, ny, nz)
compute_pass.pop_debug_group()
compute_pass.end_pass()
command_encoder.copy_texture_to_buffer(
{"texture": texture2, "mip_level": 0, "origin": (0, 0, 0)},
{
"buffer": buffer,
"offset": 0,
"bytes_per_row": bpp * nx,
"rows_per_image": ny,
},
(nx, ny, nz),
)
device.queue.submit([command_encoder.finish()])
# Read the current data of the output buffer
data2 = data1.__class__.from_buffer(device.queue.read_buffer(buffer))
# Numpy arrays are easier to work with
a1 = np.ctypeslib.as_array(data1).reshape(nz, ny, nx, nc)
a2 = np.ctypeslib.as_array(data2).reshape(nz, ny, nx, nc)
# Validate!
for x in range(nx):
assert np.all(a2[:, :, x, 0] == a1[:, :, x, 0] + x)
if nc >= 2:
assert np.all(a2[:, :, :, 1] == a1[:, :, :, 1] + 1)
if nc >= 3:
assert np.all(a2[:, :, :, 2] == a1[:, :, :, 2] * 2)
if nc >= 4:
assert np.all(a2[:, :, :, 3] == a1[:, :, :, 3])
if __name__ == "__main__":
run_tests(globals())
|
[
"random.randint",
"ctypes.sizeof",
"testutils.get_default_device",
"pytest.skip",
"numpy.ctypeslib.as_array",
"numpy.all"
] |
[((313, 383), 'pytest.skip', 'skip', (['"""Skipping tests that need the wgpu lib"""'], {'allow_module_level': '(True)'}), "('Skipping tests that need the wgpu lib', allow_module_level=True)\n", (317, 383), False, 'from pytest import skip\n'), ((13362, 13382), 'ctypes.sizeof', 'ctypes.sizeof', (['data1'], {}), '(data1)\n', (13375, 13382), False, 'import ctypes\n'), ((13451, 13471), 'testutils.get_default_device', 'get_default_device', ([], {}), '()\n', (13469, 13471), False, 'from testutils import run_tests, get_default_device\n'), ((428, 501), 'pytest.skip', 'skip', (['"""These tests fail on dx12 for some reason"""'], {'allow_module_level': '(True)'}), "('These tests fail on dx12 for some reason', allow_module_level=True)\n", (432, 501), False, 'from pytest import skip\n'), ((17373, 17417), 'numpy.all', 'np.all', (['(a2[:, :, x, 0] == a1[:, :, x, 0] + x)'], {}), '(a2[:, :, x, 0] == a1[:, :, x, 0] + x)\n', (17379, 17417), True, 'import numpy as np\n'), ((17449, 17493), 'numpy.all', 'np.all', (['(a2[:, :, :, 1] == a1[:, :, :, 1] + 1)'], {}), '(a2[:, :, :, 1] == a1[:, :, :, 1] + 1)\n', (17455, 17493), True, 'import numpy as np\n'), ((17525, 17569), 'numpy.all', 'np.all', (['(a2[:, :, :, 2] == a1[:, :, :, 2] * 2)'], {}), '(a2[:, :, :, 2] == a1[:, :, :, 2] * 2)\n', (17531, 17569), True, 'import numpy as np\n'), ((17601, 17641), 'numpy.all', 'np.all', (['(a2[:, :, :, 3] == a1[:, :, :, 3])'], {}), '(a2[:, :, :, 3] == a1[:, :, :, 3])\n', (17607, 17641), True, 'import numpy as np\n'), ((1291, 1312), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (1305, 1312), False, 'import random\n'), ((2279, 2300), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (2293, 2300), False, 'import random\n'), ((3263, 3284), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (3277, 3284), False, 'import random\n'), ((4254, 4275), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (4268, 4275), False, 'import random\n'), ((17202, 17230), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['data1'], {}), '(data1)\n', (17223, 17230), True, 'import numpy as np\n'), ((17264, 17292), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['data2'], {}), '(data2)\n', (17285, 17292), True, 'import numpy as np\n'), ((5306, 5327), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (5320, 5327), False, 'import random\n'), ((6342, 6363), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (6356, 6363), False, 'import random\n'), ((7372, 7393), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (7386, 7393), False, 'import random\n'), ((8410, 8431), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (8424, 8431), False, 'import random\n'), ((9507, 9528), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (9521, 9528), False, 'import random\n'), ((10587, 10608), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (10601, 10608), False, 'import random\n'), ((11662, 11683), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (11676, 11683), False, 'import random\n'), ((12744, 12765), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (12758, 12765), False, 'import random\n')]
|
# Note: model title and parameter table are inserted automatically
r"""
This model provides the scattering intensity, $I(q)$, for a lyotropic lamellar
phase where a random distribution in solution are assumed. The SLD of the head
region is taken to be different from the SLD of the tail region.
Definition
----------
The scattering intensity $I(q)$ is
.. math::
I(q) = 2\pi\frac{\text{scale}}{2(\delta_H + \delta_T)} P(q) \frac{1}{q^2}
The form factor $P(q)$ is
.. math::
P(q) = \frac{4}{q^2}
\left\lbrace
\Delta \rho_H
\left[\sin[q(\delta_H + \delta_T)\ - \sin(q\delta_T)\right]
+ \Delta\rho_T\sin(q\delta_T)
\right\rbrace^2
where $\delta_T$ is *length_tail*, $\delta_H$ is *length_head*,
$\Delta\rho_H$ is the head contrast (*sld_head* $-$ *sld_solvent*),
and $\Delta\rho_T$ is tail contrast (*sld* $-$ *sld_solvent*).
The total thickness of the lamellar sheet is
a_H + \delta_T + \delta_T + \delta_H$. Note that in a non aqueous solvent
the chemical "head" group may be the "Tail region" and vice-versa.
The 2D scattering intensity is calculated in the same way as 1D, where
the $q$ vector is defined as
.. math:: q = \sqrt{q_x^2 + q_y^2}
References
----------
#. <NAME>, <NAME>, and <NAME>, *J. Phys. II France*, 3, (1993) 487-502
#. <NAME>, <NAME>, <NAME>, <NAME>,
*J. Phys. Chem. B*, 105, (2001) 11081-11088
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:** <NAME> and <NAME> **Date** April 17, 2014
"""
import numpy as np
from numpy import inf
name = "lamellar_hg"
title = "Random lamellar phase with Head and Tail Groups"
description = """\
[Random lamellar phase with Head and Tail Groups]
I(q)= 2*pi*P(q)/(2(H+T)*q^(2)), where
P(q)= see manual
layer thickness =(H+T+T+H) = 2(Head+Tail)
sld = Tail scattering length density
sld_head = Head scattering length density
sld_solvent = solvent scattering length density
background = incoherent background
scale = scale factor
"""
category = "shape:lamellae"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["length_tail", "Ang", 15, [0, inf], "volume", "Tail thickness ( total = H+T+T+H)"],
["length_head", "Ang", 10, [0, inf], "volume", "Head thickness"],
["sld", "1e-6/Ang^2", 0.4, [-inf,inf], "sld", "Tail scattering length density"],
["sld_head", "1e-6/Ang^2", 3.0, [-inf,inf], "sld", "Head scattering length density"],
["sld_solvent", "1e-6/Ang^2", 6, [-inf,inf], "sld", "Solvent scattering length density"]]
# pylint: enable=bad-whitespace, line-too-long
# No volume normalization despite having a volume parameter
# This should perhaps be volume normalized?
form_volume = """
return 1.0;
"""
Iq = """
const double qsq = q*q;
const double drh = sld_head - sld_solvent;
const double drt = sld - sld_solvent; //correction 13FEB06 by L.Porcar
const double qT = q*length_tail;
double Pq, inten;
Pq = drh*(sin(q*(length_head+length_tail))-sin(qT)) + drt*sin(qT);
Pq *= Pq;
Pq *= 4.0/(qsq);
inten = 2.0e-4*M_PI*Pq/qsq;
// normalize by the bilayer thickness
inten /= 2.0*(length_head+length_tail);
return inten;
"""
def random():
"""Return a random parameter set for the model."""
thickness = 10**np.random.uniform(1, 4)
length_head = thickness * np.random.uniform(0, 1)
length_tail = thickness - length_head
pars = dict(
length_head=length_head,
length_tail=length_tail,
)
return pars
#
tests = [
[{'scale': 1.0, 'background': 0.0, 'length_tail': 15.0, 'length_head': 10.0,
'sld': 0.4, 'sld_head': 3.0, 'sld_solvent': 6.0},
[0.001], [653143.9209]],
]
# ADDED by: RKH ON: 18Mar2016 converted from sasview previously, now renaming everything & sorting the docs
|
[
"numpy.random.uniform"
] |
[((3537, 3560), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(4)'], {}), '(1, 4)\n', (3554, 3560), True, 'import numpy as np\n'), ((3591, 3614), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3608, 3614), True, 'import numpy as np\n')]
|
# %%
# line printer
def printer(info):
print('\n\n================================= {} =================================================\n\n'.format(info))
# %%
import itertools
# to get a counter starting from 0 to infinte
counter = itertools.count() # return type is the iterator and count will start from 0 and you can use it with for loop
# or next() command
# if you want to get a counter starting from certain number
start = 10
counter = itertools.count(start=start, step=-5) # step function is optional and can be negative
# itertools.count() can be used with zip function like
counter = itertools.count(start=start, step=-5)
l = [100, 200, 300, 400]
zipped = zip(counter, l)
print(list(zipped))
# %%
from itertools import zip_longest
# zip is used to map two list and shortest list length will be considered, Iteration continues until the longest
# iterable is exhausted
l1 = ['sanket', 'sanchita', 'rohan', 'devi', 'adarsh', 'vishnu', 'prashant', 'chirag']
l2 = [1, 2, 3, 4, 5, 6]
print(list(zip(l1, l2)))
print(list(zip_longest(l1, l2)))
# %%
from itertools import cycle
# cycle is used as circular linked list where we can iterate through certain value over and over again
# it can be use with list, tuple, string
counter = cycle([1, 2, 3, 4])
counter = cycle(('On', 'Off'))
counter = cycle('san') # it will repeat 's' 'a' and 'n'
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
print(next(counter))
# %%
from itertools import repeat
# repeat can be used to repeat single value multiple time
counter = repeat(2, times=3) # repeat 2, 3 times-- times is optional element and if not provided repeat many times
print(next(counter))
print(next(counter))
print(next(counter))
# example
squares = map(pow, range(1, 10), repeat(2, times=3))
print(list(squares))
# %%
from itertools import starmap
'''
def starmap(function, iterable):
# starmap(pow, [(2,5), (3,2), (10,3)]) --> 32 9 1000
for args in iterable:
yield function(*args)
'''
def power(x, y):
return x ** y
# the above example in repeat can be used with starmap as
squares = starmap(power, [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)])
print(list(squares))
# how *args work
# for k in [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)]:
# print(*k)
# %%
from itertools import combinations, permutations
# used for getting all the possible combination
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
result = combinations(letters, 3) # produce the combination where (a,b) and (b,a) is same so only one will be mentioned
print(list(result))
# if order matters the use permutations
result = permutations(letters, 3)
print(list(result))
# for example if we want to craete 4 digit code using number where combiantion can also include same number multiple
# time
from itertools import product
# computes the cartesion product
# product(iterable, repeat=n) this is permutation with replacement
# for solution see https://www.hackerrank.com/challenges/itertools-product/problem
result = product(number, repeat=3) # arrange numbers in group of 3
print(list(result))
# in product function we have to provide repeat argumnt, the similar function that can be used is
# combinations_with_replacement
from itertools import combinations_with_replacement
result = combinations_with_replacement(number, 4) # arrange number in a group of 4 where each number can be used
# multiple times
print(list(result))
# for permutations with replacement use below link for solution
# https://stackoverflow.com/questions/46255220/how-to-produce-permutations-with-replacement-in-python
'''
import itertools
choices = [-1, 1]
n = 3
l = [choices] * n
list(itertools.product(*l))
'''
# %%
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
# if you want to iter through all letters, number, names you first have to craete a list add all those to a single list
# and then iter through new list, this can be solved using chain
from itertools import chain
iterator = chain(letters, number, names)
print(iterator) # object of iterator
# print(list(iterator)) # print the complete list
# %%
# islice is used to slice the iterable object which is memory efficient
letters = ['a', 'b', 'c', 'd', 'e']
number = [0, 1, 2, 3]
names = ['sanket', 'raj']
import itertools
from itertools import islice
result = islice(letters, 1, 3)
print(list(result))
result = itertools.islice(range(10), 1, 5) # islice take (iterable, start, stop) or (iterable, stop)
# islice is used when , suppose you have a very long iterator which is hard to load into memory and then slicing, it can
# be costly, lets, suppose we have a file
import os
with open('collections_module/islice_text', 'r') as file:
# file object is an iterator
header = islice(file, 3)
for line in header:
print(line, end='')
#
# %%
import numpy as np
np.random.seed(42) # seeding is done for pseudo number generator
selectors = np.random.randint(1, 10000, 50)
'''
letters = np.array(['a', 'b', 'c', 'd', 'e'])
print(selectors)
print(letters[selectors])
'''
# filterfalse work as same as filter instead of returning true value it returns false values
from itertools import filterfalse
result = filterfalse(lambda x: x > 5000, selectors) # return an iterator
print(list(result))
# %%
# dropwhile can be used when you want to filter until the condition is met for the first time
# similar to dropwhile is takewhile but the opposite of the former
import numpy as np
np.random.seed(42)
selectors = np.random.randint(1, 10000, 50)
print(selectors)
from itertools import dropwhile
result = dropwhile(lambda x: x > 500, selectors)
print(len(list(result)))
# %%
# accumulate -- it is used to work on the iterable and is used to perform cumulative operations
import numpy as np
np.random.seed(42)
selectors = np.random.randint(1, 10, 10)
print(selectors)
from itertools import accumulate
import operator
result = accumulate(selectors, operator.mul)
print(list(result))
printer('string work')
# working with the string
selectors = np.random.choice(['a', 'b', 'c', 'f', 'g'], size=10)
print(selectors)
result = accumulate(selectors, lambda x, y: x + y)
print(list(result))
#%%
# groupby work as same as pandas.groupby
people = [
{
'name': '<NAME>',
'city': 'Gotham',
'state': 'NY'
},
{
'name': '<NAME>',
'city': 'Kings Landing',
'state': 'NY'
},
{
'name': '<NAME>',
'city': 'Boulder',
'state': 'CO'
},
{
'name': '<NAME>',
'city': 'Denver',
'state': 'CO'
},
{
'name': '<NAME>',
'city': 'Hinton',
'state': 'WV'
},
{
'name': '<NAME>',
'city': 'Rand',
'state': 'WV'
},
{
'name': '<NAME>',
'city': 'Asheville',
'state': 'NC'
},
{
'name': '<NAME>',
'city': 'Charlotte',
'state': 'NC'
},
{
'name': '<NAME>',
'city': 'Faketown',
'state': 'NC'
}
]
def get_state(person):
return person['state']
# for groupby to work efficiently, the key has to be sorted else it will not work as expected
from itertools import groupby
result = groupby(people, get_state)
for key, group in result:
# here group is an iterator
#
for g in group:
print(key,' ', g)
#%%
# in order to create multiple copies of iterator you can use tee
import numpy as np
np.random.seed(42) # seeding is done for pseudo number generator
selectors = np.random.randint(1, 10000, 50)
from itertools import filterfalse
from itertools import tee
result = filterfalse(lambda x: x < 500, selectors)
copy1, copy2 = tee(result) # don't use the original result iterator
print(list(copy1))
print(list(copy2))
|
[
"numpy.random.seed",
"numpy.random.randint",
"itertools.cycle",
"itertools.permutations",
"itertools.zip_longest",
"numpy.random.choice",
"itertools.product",
"itertools.chain",
"itertools.filterfalse",
"itertools.accumulate",
"itertools.count",
"itertools.combinations",
"itertools.islice",
"itertools.tee",
"itertools.groupby",
"itertools.repeat",
"itertools.starmap",
"itertools.combinations_with_replacement",
"itertools.dropwhile"
] |
[((244, 261), 'itertools.count', 'itertools.count', ([], {}), '()\n', (259, 261), False, 'import itertools\n'), ((456, 493), 'itertools.count', 'itertools.count', ([], {'start': 'start', 'step': '(-5)'}), '(start=start, step=-5)\n', (471, 493), False, 'import itertools\n'), ((609, 646), 'itertools.count', 'itertools.count', ([], {'start': 'start', 'step': '(-5)'}), '(start=start, step=-5)\n', (624, 646), False, 'import itertools\n'), ((1257, 1276), 'itertools.cycle', 'cycle', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1262, 1276), False, 'from itertools import cycle\n'), ((1287, 1307), 'itertools.cycle', 'cycle', (["('On', 'Off')"], {}), "(('On', 'Off'))\n", (1292, 1307), False, 'from itertools import cycle\n'), ((1318, 1330), 'itertools.cycle', 'cycle', (['"""san"""'], {}), "('san')\n", (1323, 1330), False, 'from itertools import cycle\n'), ((1595, 1613), 'itertools.repeat', 'repeat', (['(2)'], {'times': '(3)'}), '(2, times=3)\n', (1601, 1613), False, 'from itertools import repeat\n'), ((2147, 2211), 'itertools.starmap', 'starmap', (['power', '[(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)]'], {}), '(power, [(0, 2), (1, 3), (2, 2), (3, 3), (4, 2), (9, 3)])\n', (2154, 2211), False, 'from itertools import starmap\n'), ((2528, 2552), 'itertools.combinations', 'combinations', (['letters', '(3)'], {}), '(letters, 3)\n', (2540, 2552), False, 'from itertools import combinations, permutations\n'), ((2710, 2734), 'itertools.permutations', 'permutations', (['letters', '(3)'], {}), '(letters, 3)\n', (2722, 2734), False, 'from itertools import combinations, permutations\n'), ((3103, 3128), 'itertools.product', 'product', (['number'], {'repeat': '(3)'}), '(number, repeat=3)\n', (3110, 3128), False, 'from itertools import product\n'), ((3376, 3416), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['number', '(4)'], {}), '(number, 4)\n', (3405, 3416), False, 'from itertools import combinations_with_replacement\n'), ((4117, 4146), 'itertools.chain', 'chain', (['letters', 'number', 'names'], {}), '(letters, number, names)\n', (4122, 4146), False, 'from itertools import chain\n'), ((4456, 4477), 'itertools.islice', 'islice', (['letters', '(1)', '(3)'], {}), '(letters, 1, 3)\n', (4462, 4477), False, 'from itertools import islice\n'), ((4979, 4997), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4993, 4997), True, 'import numpy as np\n'), ((5057, 5088), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)', '(50)'], {}), '(1, 10000, 50)\n', (5074, 5088), True, 'import numpy as np\n'), ((5326, 5368), 'itertools.filterfalse', 'filterfalse', (['(lambda x: x > 5000)', 'selectors'], {}), '(lambda x: x > 5000, selectors)\n', (5337, 5368), False, 'from itertools import filterfalse\n'), ((5600, 5618), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5614, 5618), True, 'import numpy as np\n'), ((5631, 5662), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)', '(50)'], {}), '(1, 10000, 50)\n', (5648, 5662), True, 'import numpy as np\n'), ((5723, 5762), 'itertools.dropwhile', 'dropwhile', (['(lambda x: x > 500)', 'selectors'], {}), '(lambda x: x > 500, selectors)\n', (5732, 5762), False, 'from itertools import dropwhile\n'), ((5911, 5929), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5925, 5929), True, 'import numpy as np\n'), ((5942, 5970), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (5959, 5970), True, 'import numpy as np\n'), ((6048, 6083), 'itertools.accumulate', 'accumulate', (['selectors', 'operator.mul'], {}), '(selectors, operator.mul)\n', (6058, 6083), False, 'from itertools import accumulate\n'), ((6167, 6219), 'numpy.random.choice', 'np.random.choice', (["['a', 'b', 'c', 'f', 'g']"], {'size': '(10)'}), "(['a', 'b', 'c', 'f', 'g'], size=10)\n", (6183, 6219), True, 'import numpy as np\n'), ((6246, 6287), 'itertools.accumulate', 'accumulate', (['selectors', '(lambda x, y: x + y)'], {}), '(selectors, lambda x, y: x + y)\n', (6256, 6287), False, 'from itertools import accumulate\n'), ((7351, 7377), 'itertools.groupby', 'groupby', (['people', 'get_state'], {}), '(people, get_state)\n', (7358, 7377), False, 'from itertools import groupby\n'), ((7580, 7598), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (7594, 7598), True, 'import numpy as np\n'), ((7658, 7689), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)', '(50)'], {}), '(1, 10000, 50)\n', (7675, 7689), True, 'import numpy as np\n'), ((7761, 7802), 'itertools.filterfalse', 'filterfalse', (['(lambda x: x < 500)', 'selectors'], {}), '(lambda x: x < 500, selectors)\n', (7772, 7802), False, 'from itertools import filterfalse\n'), ((7819, 7830), 'itertools.tee', 'tee', (['result'], {}), '(result)\n', (7822, 7830), False, 'from itertools import tee\n'), ((1808, 1826), 'itertools.repeat', 'repeat', (['(2)'], {'times': '(3)'}), '(2, times=3)\n', (1814, 1826), False, 'from itertools import repeat\n'), ((4881, 4896), 'itertools.islice', 'islice', (['file', '(3)'], {}), '(file, 3)\n', (4887, 4896), False, 'from itertools import islice\n'), ((1044, 1063), 'itertools.zip_longest', 'zip_longest', (['l1', 'l2'], {}), '(l1, l2)\n', (1055, 1063), False, 'from itertools import zip_longest\n')]
|
from functools import wraps
import numpy as np
import torch
from mani_skill_learn.utils.data import split_in_dict_array, concat_list_of_array
def disable_gradients(network):
for param in network.parameters():
param.requires_grad = False
def worker_init_fn(worker_id):
"""The function is designed for pytorch multi-process dataloader.
Note that we use the pytorch random generator to generate a base_seed. Please try to be consistent.
References:
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
"""
base_seed = torch.IntTensor(1).random_().item()
# print(worker_id, base_seed)
np.random.seed(base_seed + worker_id)
def no_grad(f):
wraps(f)
def wrapper(*args, **kwargs):
with torch.no_grad():
return f(*args, **kwargs)
return wrapper
def run_with_mini_batch(function, data, batch_size):
"""
Run a pytorch function with mini-batch when the batch size of dat is very large.
:param function: the function
:param data: the input data which should be in dict array structure
:param batch_size: the batch_size of the mini-batch
:return: all the outputs.
"""
data_list = split_in_dict_array(data, batch_size, axis=0)
ans = []
for data_i in data_list:
ans_i = function(data_i)
ans.append(ans_i)
return concat_list_of_array(ans, axis=0)
|
[
"numpy.random.seed",
"torch.IntTensor",
"functools.wraps",
"torch.no_grad",
"mani_skill_learn.utils.data.concat_list_of_array",
"mani_skill_learn.utils.data.split_in_dict_array"
] |
[((658, 695), 'numpy.random.seed', 'np.random.seed', (['(base_seed + worker_id)'], {}), '(base_seed + worker_id)\n', (672, 695), True, 'import numpy as np\n'), ((718, 726), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (723, 726), False, 'from functools import wraps\n'), ((1214, 1259), 'mani_skill_learn.utils.data.split_in_dict_array', 'split_in_dict_array', (['data', 'batch_size'], {'axis': '(0)'}), '(data, batch_size, axis=0)\n', (1233, 1259), False, 'from mani_skill_learn.utils.data import split_in_dict_array, concat_list_of_array\n'), ((1372, 1405), 'mani_skill_learn.utils.data.concat_list_of_array', 'concat_list_of_array', (['ans'], {'axis': '(0)'}), '(ans, axis=0)\n', (1392, 1405), False, 'from mani_skill_learn.utils.data import split_in_dict_array, concat_list_of_array\n'), ((775, 790), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (788, 790), False, 'import torch\n'), ((584, 602), 'torch.IntTensor', 'torch.IntTensor', (['(1)'], {}), '(1)\n', (599, 602), False, 'import torch\n')]
|
"""
test melange.propagators
"""
from jax import random
from jax import vmap
import jax.numpy as jnp
from melange.propagators import *
from melange.tests.utils import checker_function, get_nondefault_potential_initializer
import tqdm
import numpy as np
from jax.config import config; config.update("jax_enable_x64", True)
def test_1D_ULA_propagator(key = random.PRNGKey(0), num_runs=1000):
"""
take a batch of 1000 particles distributed according to N(0,2), run dynamics with ULA for 1000 steps with dt=0.01 on a potential whose invariant is N(0,2)
and assert that the mean and variance is unchanged within a tolerance
"""
key, genkey = random.split(key)
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
x_ula_starter = random.multivariate_normal(key = genkey, mean = mu, cov = cov, shape=[num_runs])
dt=1e-2
batch_ula_move = vmap(ULA_move, in_axes=(0, None, None, 0, None))
potential_parameter = jnp.array([0.])
for i in tqdm.trange(100):
key, ula_keygen = random.split(key, 2)
ula_keys = random.split(ula_keygen, num_runs)
x_ULA = batch_ula_move(x_ula_starter, potential, dt, ula_keys, potential_parameter)
x_ula_starter = x_ULA
ula_mean, ula_std = x_ula_starter.mean(), x_ula_starter.std()
assert checker_function(ula_mean,0.2)
assert checker_function(ula_std - jnp.sqrt(2), 0.2)
def test_1D_driven_propagator(key = random.PRNGKey(0), num_runs=1000):
"""
take a batch of 1000 particles distributed according to N(0,2), run dynamics with driven langevin algorithm for 1000 steps with dt=0.01 on a potential whose invariant is N(0,2)
and assert that the mean and variance is unchanged within a tolerance.
"""
key, genkey = random.split(key)
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
x_driven_starter = random.multivariate_normal(key = genkey, mean = mu, cov = cov, shape=[num_runs])
dt=1e-2
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
batch_driver_move = vmap(driven_Langevin_move, in_axes=(0,None,None,None,None,None,None,None,0))
potential_parameter = jnp.array([0.])
for i in tqdm.trange(100):
key, drive_keygen = random.split(key, 2)
drive_keys = random.split(drive_keygen, num_runs)
x_drive = batch_driver_move(x_driven_starter,
potential,
dt,
A,
b,
potential_parameter,
jnp.array([0.]),
jnp.array([0.]),
drive_keys)
x_driven_starter = x_drive
driven_mean, driven_std = x_driven_starter.mean(), x_driven_starter.std()
assert checker_function(driven_mean,0.2)
assert checker_function(driven_std - jnp.sqrt(2), 0.2)
def test_1d_kernel_consistency(key = random.PRNGKey(0)):
"""
with a 'dummy' driven forward kernel, assert that the log forward probability
is equal to that of the ULA propagator in one dimension
"""
from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio
dt=0.1
forward_potential_parameters= jnp.array([0.])
backward_potential_parameters = jnp.array([0.])
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
xs = random.multivariate_normal(key = key, mean = jnp.array([1.]), cov = jnp.array([[1.]]), shape=[2])
EM_propagator, EM_kernel = generate_Euler_Maruyama_propagators()
driven_propagator, driven_kernel = generate_driven_Langevin_propagators()
EM_logp_ratio = Euler_Maruyama_log_proposal_ratio(xs[0], xs[1], potential, forward_potential_parameters, dt, potential, backward_potential_parameters, dt)
driven_logp_ratio = driven_Langevin_log_proposal_ratio(xs[0],
xs[1],
potential,
potential,
dt,
dt,
A,
b,
forward_potential_parameters,
backward_potential_parameters,
A_parameter = forward_potential_parameters,
b_parameter = forward_potential_parameters)
assert np.isclose(EM_logp_ratio, driven_logp_ratio)
def test_forward_ULA_driven_samplers(key = random.PRNGKey(0)):
"""
given a randomization key, execute `forward_ULA_sampler` and `forward_driven_diffusion_sampler`
with a time-independent potential that has the same mean and variance as the distribution of (5000) initial
samples. We only assert that the statistics of the post-propagated samples obey the same statistics (within a tolerance).
"""
from melange.propagators import forward_ULA_sampler, forward_driven_diffusion_sampler
dt=0.1
potential_parameters= jnp.zeros((100,1))
A_parameters = potential_parameters
b_parameters = potential_parameters
#make dummy A and b functions
def A(x, a_param): return jnp.zeros((x.shape[0], x.shape[0]))
def b(x, b_param): return jnp.zeros(x.shape[0])
potential, (mu, cov), dG = get_nondefault_potential_initializer(1)
xs = random.multivariate_normal(key = key, mean = mu, cov = cov, shape=[5000])
og_mean, og_variance = xs.mean(), xs.var()
#print(og_mean, og_variance)
ULA_trajs = forward_ULA_sampler(xs, potential, jnp.array([dt]*len(potential_parameters)), key, potential_parameters)
#print(ULA_trajs[-1].mean(), ULA_trajs[-1].var())
driven_trajs = forward_driven_diffusion_sampler(xs, potential, dt, key, A, b, potential_parameters, A_parameters, b_parameters)
#print(driven_trajs[-1].mean(), driven_trajs[-1].var())
mean_tolerance = 0.2
assert checker_function(ULA_trajs[-1].mean(), mean_tolerance)
assert checker_function(driven_trajs[-1].mean(), mean_tolerance)
variance_tolerance = 0.2
assert checker_function(ULA_trajs[-1].var() - 2., variance_tolerance)
assert checker_function(driven_trajs[-1].var()-2., variance_tolerance)
|
[
"jax.config.config.update",
"jax.numpy.array",
"melange.propagators.generate_Euler_Maruyama_propagators",
"jax.vmap",
"tqdm.trange",
"melange.propagators.driven_Langevin_log_proposal_ratio",
"jax.random.PRNGKey",
"jax.random.multivariate_normal",
"melange.propagators.generate_driven_Langevin_propagators",
"melange.tests.utils.get_nondefault_potential_initializer",
"numpy.isclose",
"melange.propagators.forward_driven_diffusion_sampler",
"jax.numpy.zeros",
"melange.propagators.Euler_Maruyama_log_proposal_ratio",
"melange.tests.utils.checker_function",
"jax.numpy.sqrt",
"jax.random.split"
] |
[((284, 321), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (297, 321), False, 'from jax.config import config\n'), ((356, 373), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (370, 373), False, 'from jax import random\n'), ((658, 675), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (670, 675), False, 'from jax import random\n'), ((707, 746), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (743, 746), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((767, 841), 'jax.random.multivariate_normal', 'random.multivariate_normal', ([], {'key': 'genkey', 'mean': 'mu', 'cov': 'cov', 'shape': '[num_runs]'}), '(key=genkey, mean=mu, cov=cov, shape=[num_runs])\n', (793, 841), False, 'from jax import random\n'), ((881, 929), 'jax.vmap', 'vmap', (['ULA_move'], {'in_axes': '(0, None, None, 0, None)'}), '(ULA_move, in_axes=(0, None, None, 0, None))\n', (885, 929), False, 'from jax import vmap\n'), ((956, 972), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (965, 972), True, 'import jax.numpy as jnp\n'), ((986, 1002), 'tqdm.trange', 'tqdm.trange', (['(100)'], {}), '(100)\n', (997, 1002), False, 'import tqdm\n'), ((1306, 1337), 'melange.tests.utils.checker_function', 'checker_function', (['ula_mean', '(0.2)'], {}), '(ula_mean, 0.2)\n', (1322, 1337), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((1430, 1447), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1444, 1447), False, 'from jax import random\n'), ((1755, 1772), 'jax.random.split', 'random.split', (['key'], {}), '(key)\n', (1767, 1772), False, 'from jax import random\n'), ((1804, 1843), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (1840, 1843), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((1867, 1941), 'jax.random.multivariate_normal', 'random.multivariate_normal', ([], {'key': 'genkey', 'mean': 'mu', 'cov': 'cov', 'shape': '[num_runs]'}), '(key=genkey, mean=mu, cov=cov, shape=[num_runs])\n', (1893, 1941), False, 'from jax import random\n'), ((2136, 2224), 'jax.vmap', 'vmap', (['driven_Langevin_move'], {'in_axes': '(0, None, None, None, None, None, None, None, 0)'}), '(driven_Langevin_move, in_axes=(0, None, None, None, None, None, None,\n None, 0))\n', (2140, 2224), False, 'from jax import vmap\n'), ((2239, 2255), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (2248, 2255), True, 'import jax.numpy as jnp\n'), ((2269, 2285), 'tqdm.trange', 'tqdm.trange', (['(100)'], {}), '(100)\n', (2280, 2285), False, 'import tqdm\n'), ((2934, 2968), 'melange.tests.utils.checker_function', 'checker_function', (['driven_mean', '(0.2)'], {}), '(driven_mean, 0.2)\n', (2950, 2968), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((3065, 3082), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (3079, 3082), False, 'from jax import random\n'), ((3470, 3486), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (3479, 3486), True, 'import jax.numpy as jnp\n'), ((3522, 3538), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (3531, 3538), True, 'import jax.numpy as jnp\n'), ((3723, 3762), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (3759, 3762), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((3902, 3939), 'melange.propagators.generate_Euler_Maruyama_propagators', 'generate_Euler_Maruyama_propagators', ([], {}), '()\n', (3937, 3939), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((3979, 4017), 'melange.propagators.generate_driven_Langevin_propagators', 'generate_driven_Langevin_propagators', ([], {}), '()\n', (4015, 4017), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((4039, 4185), 'melange.propagators.Euler_Maruyama_log_proposal_ratio', 'Euler_Maruyama_log_proposal_ratio', (['xs[0]', 'xs[1]', 'potential', 'forward_potential_parameters', 'dt', 'potential', 'backward_potential_parameters', 'dt'], {}), '(xs[0], xs[1], potential,\n forward_potential_parameters, dt, potential,\n backward_potential_parameters, dt)\n', (4072, 4185), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((4202, 4444), 'melange.propagators.driven_Langevin_log_proposal_ratio', 'driven_Langevin_log_proposal_ratio', (['xs[0]', 'xs[1]', 'potential', 'potential', 'dt', 'dt', 'A', 'b', 'forward_potential_parameters', 'backward_potential_parameters'], {'A_parameter': 'forward_potential_parameters', 'b_parameter': 'forward_potential_parameters'}), '(xs[0], xs[1], potential, potential, dt,\n dt, A, b, forward_potential_parameters, backward_potential_parameters,\n A_parameter=forward_potential_parameters, b_parameter=\n forward_potential_parameters)\n', (4236, 4444), False, 'from melange.propagators import generate_Euler_Maruyama_propagators, generate_driven_Langevin_propagators, Euler_Maruyama_log_proposal_ratio, driven_Langevin_log_proposal_ratio\n'), ((4865, 4909), 'numpy.isclose', 'np.isclose', (['EM_logp_ratio', 'driven_logp_ratio'], {}), '(EM_logp_ratio, driven_logp_ratio)\n', (4875, 4909), True, 'import numpy as np\n'), ((4954, 4971), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (4968, 4971), False, 'from jax import random\n'), ((5440, 5459), 'jax.numpy.zeros', 'jnp.zeros', (['(100, 1)'], {}), '((100, 1))\n', (5449, 5459), True, 'import jax.numpy as jnp\n'), ((5712, 5751), 'melange.tests.utils.get_nondefault_potential_initializer', 'get_nondefault_potential_initializer', (['(1)'], {}), '(1)\n', (5748, 5751), False, 'from melange.tests.utils import checker_function, get_nondefault_potential_initializer\n'), ((5759, 5826), 'jax.random.multivariate_normal', 'random.multivariate_normal', ([], {'key': 'key', 'mean': 'mu', 'cov': 'cov', 'shape': '[5000]'}), '(key=key, mean=mu, cov=cov, shape=[5000])\n', (5785, 5826), False, 'from jax import random\n'), ((6099, 6215), 'melange.propagators.forward_driven_diffusion_sampler', 'forward_driven_diffusion_sampler', (['xs', 'potential', 'dt', 'key', 'A', 'b', 'potential_parameters', 'A_parameters', 'b_parameters'], {}), '(xs, potential, dt, key, A, b,\n potential_parameters, A_parameters, b_parameters)\n', (6131, 6215), False, 'from melange.propagators import forward_ULA_sampler, forward_driven_diffusion_sampler\n'), ((1030, 1050), 'jax.random.split', 'random.split', (['key', '(2)'], {}), '(key, 2)\n', (1042, 1050), False, 'from jax import random\n'), ((1070, 1104), 'jax.random.split', 'random.split', (['ula_keygen', 'num_runs'], {}), '(ula_keygen, num_runs)\n', (1082, 1104), False, 'from jax import random\n'), ((2024, 2059), 'jax.numpy.zeros', 'jnp.zeros', (['(x.shape[0], x.shape[0])'], {}), '((x.shape[0], x.shape[0]))\n', (2033, 2059), True, 'import jax.numpy as jnp\n'), ((2090, 2111), 'jax.numpy.zeros', 'jnp.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (2099, 2111), True, 'import jax.numpy as jnp\n'), ((2315, 2335), 'jax.random.split', 'random.split', (['key', '(2)'], {}), '(key, 2)\n', (2327, 2335), False, 'from jax import random\n'), ((2357, 2393), 'jax.random.split', 'random.split', (['drive_keygen', 'num_runs'], {}), '(drive_keygen, num_runs)\n', (2369, 2393), False, 'from jax import random\n'), ((3603, 3638), 'jax.numpy.zeros', 'jnp.zeros', (['(x.shape[0], x.shape[0])'], {}), '((x.shape[0], x.shape[0]))\n', (3612, 3638), True, 'import jax.numpy as jnp\n'), ((3669, 3690), 'jax.numpy.zeros', 'jnp.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3678, 3690), True, 'import jax.numpy as jnp\n'), ((5596, 5631), 'jax.numpy.zeros', 'jnp.zeros', (['(x.shape[0], x.shape[0])'], {}), '((x.shape[0], x.shape[0]))\n', (5605, 5631), True, 'import jax.numpy as jnp\n'), ((5660, 5681), 'jax.numpy.zeros', 'jnp.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (5669, 5681), True, 'import jax.numpy as jnp\n'), ((1375, 1386), 'jax.numpy.sqrt', 'jnp.sqrt', (['(2)'], {}), '(2)\n', (1383, 1386), True, 'import jax.numpy as jnp\n'), ((2694, 2710), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (2703, 2710), True, 'import jax.numpy as jnp\n'), ((2745, 2761), 'jax.numpy.array', 'jnp.array', (['[0.0]'], {}), '([0.0])\n', (2754, 2761), True, 'import jax.numpy as jnp\n'), ((3009, 3020), 'jax.numpy.sqrt', 'jnp.sqrt', (['(2)'], {}), '(2)\n', (3017, 3020), True, 'import jax.numpy as jnp\n'), ((3817, 3833), 'jax.numpy.array', 'jnp.array', (['[1.0]'], {}), '([1.0])\n', (3826, 3833), True, 'import jax.numpy as jnp\n'), ((3840, 3858), 'jax.numpy.array', 'jnp.array', (['[[1.0]]'], {}), '([[1.0]])\n', (3849, 3858), True, 'import jax.numpy as jnp\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 13:50:54 2020
This is the load to load data based on occupancy maps
@author: cheng
"""
import numpy as np
import time
import os
from augmentation import rotation
from maps import Maps
from occupancy import circle_group_grid
def loaddata(dataset_list, args, datatype="train"):
# Store the data across datasets
# All the datasets are merged for training
if datatype=="train" or datatype=="test":
offsets = np.empty((0, args.obs_seq+args.pred_seq-1, 8))
traj_data = np.empty((0, args.obs_seq+args.pred_seq, 4))
occupancy = np.empty((0, args.obs_seq+args.pred_seq-1, args.enviro_pdim[0], args.enviro_pdim[1], 3))
if dataset_list[0] == "train_merged":
# ToDo change this to make compatible with linus
data = np.load("../processed_data/train/%s.npz"%(dataset_list[0]))
_offsets, _traj_data, _occupancy = data["offsets"], data["traj_data"], data["occupancy"]
print(dataset_list[0], "contains %.0f trajectories"%len(_offsets))
offsets = np.concatenate((offsets, _offsets), axis=0)
traj_data = np.concatenate((traj_data, _traj_data), axis=0)
occupancy = np.concatenate((occupancy, _occupancy), axis=0)
else:
for i, dataset in enumerate(dataset_list):
# Only take the orinal data
# ToDo, here needs to be test if augumentation will boost the performance
if dataset != "train_merged":
# ToDo change this to make compatible with linus
data = np.load("../processed_data/train/%s.npz"%(dataset))
_offsets, _traj_data, _occupancy = data["offsets"], data["traj_data"], data["occupancy"]
print(dataset, "contains %.0f trajectories"%len(_offsets))
offsets = np.concatenate((offsets, _offsets), axis=0)
traj_data = np.concatenate((traj_data, _traj_data), axis=0)
occupancy = np.concatenate((occupancy, _occupancy), axis=0)
# NOTE: When load the challenge data, there is no need to merge them
# The submission requires each challenge data set (in total 20) to be separated
# Hence, each time only one challenge data set is called
elif datatype == "challenge":
offsets = np.empty((0, args.obs_seq-1, 8))
traj_data = np.empty((0, args.obs_seq, 4))
occupancy = np.empty((0, args.obs_seq-1, args.enviro_pdim[0], args.enviro_pdim[1], 3))
for dataset in dataset_list:
# ToDo change this to make compatible with linus
data = np.load("../processed_data/challenge/%s.npz"%(dataset))
_offsets, _traj_data, _occupancy = data["offsets"], data["traj_data"], data["occupancy"]
offsets = np.concatenate((offsets, _offsets), axis=0)
traj_data = np.concatenate((traj_data, _traj_data), axis=0)
occupancy = np.concatenate((occupancy, _occupancy), axis=0)
elif datatype=="test":
assert len(dataset_list)==1, print("Only one untouched dataset is left fot testing!")
elif datatype=="challenge":
assert len(dataset_list)==1, print("predict one by one")
if datatype=="train":
# ToDo change this to make compatible with linus
if not os.path.exists("../processed_data/train/train_merged.npz"):
# Save the merged training data
# ToDo change this to make compatible with linus
np.savez("../processed_data/train/train_merged.npz",
offsets=offsets,
traj_data = traj_data,
occupancy = occupancy)
return offsets, traj_data, occupancy
def preprocess_data(seq_length, size, dirname, path=None, data=None, aug_num=1, save=True):
'''
Parameters
----------
seq_length : int
This is the complete length of each trajectory offset and occupancy,
Note: one-step difference for the offset and occupancy and traj_data.
size : [height, width, channels]
The occupancy grid size and channels:
orientation, speed and position for the neighbors in the vicinity
dirname : string
"train" or "challenge"
path : string, optional
only for extract offsets, traj_data, and occupancy from the original data files
data : numpy, optional
it is the predicted complete trajectories after the first prediction,
it is used to calculate the occupancy in the predicted time.
aug_num : int, optional
the number for augmenting the data by rotation.
save : boolen, optional
Only save the processed training data. The default is True.
Returns
-------
offsets : numpy array
[frameId, userId, x, y, delta_x, delta_y, theata, velocity].
traj_data : numpy array
[frameId, userId, x, y]
Note: this is one-step longer
occupancy : numpy array
[height, width, channels].
'''
start = time.time()
if np.all(data)==None:
data = np.genfromtxt(path, delimiter='')
# challenge dataset have nan for prediction time steps
data = data[~np.isnan(data).any(axis=1)]
dataname = path.split('\\')[-1].split('.')[0]
print("process data %s ..."%dataname)
for r in range(aug_num):
# Agument the data by orientating if the agumentation number if more than one
if r > 0:
data[:, 2:4] = rotation(data[:, 2:4], r/aug_num)
# Get the environment maps
maps = Maps(data)
traj_map = maps.trajectory_map()
orient_map, speed_map = maps.motion_map(max_speed=10)
map_info = [traj_map, orient_map, speed_map]
enviro_maps = concat_maps(map_info)
print("enviro_maps shape", enviro_maps.shape)
offsets = np.reshape(maps.offsets, (-1,seq_length,8))
print("offsets shape", offsets.shape)
traj_data = np.reshape(maps.sorted_data, (-1, seq_length+1, 4))
print("traj_data shape", traj_data.shape)
occupancy = circle_group_grid(offsets, maps.sorted_data, size)
print("occupancy shape", occupancy.shape)
if save:
if r == 0:
# Save the original one
np.savez("../processed_data/%s/%s"%(dirname, dataname),
offsets=offsets,
traj_data = traj_data,
occupancy = occupancy)
end = time.time()
else:
# Save the rotated one(s)
np.savez("../processed_data/%s/%s_%.0f"%(dirname, dataname, r),
offsets=offsets,
traj_data = traj_data,
occupancy = occupancy)
end = time.time()
print("It takes ", round(end-start, 2), "seconds!\n")
else:
return offsets, traj_data, occupancy
def concat_maps(map_info):
# save the map information into different channels
enviro_maps = np.empty((map_info[0].shape[0], map_info[0].shape[1], len(map_info)))
for i, map in enumerate(map_info):
enviro_maps[:, :, i] = map
return enviro_maps
|
[
"numpy.load",
"numpy.concatenate",
"occupancy.circle_group_grid",
"numpy.empty",
"os.path.exists",
"numpy.genfromtxt",
"numpy.isnan",
"time.time",
"augmentation.rotation",
"numpy.reshape",
"maps.Maps",
"numpy.savez",
"numpy.all"
] |
[((5232, 5243), 'time.time', 'time.time', ([], {}), '()\n', (5241, 5243), False, 'import time\n'), ((483, 533), 'numpy.empty', 'np.empty', (['(0, args.obs_seq + args.pred_seq - 1, 8)'], {}), '((0, args.obs_seq + args.pred_seq - 1, 8))\n', (491, 533), True, 'import numpy as np\n'), ((550, 596), 'numpy.empty', 'np.empty', (['(0, args.obs_seq + args.pred_seq, 4)'], {}), '((0, args.obs_seq + args.pred_seq, 4))\n', (558, 596), True, 'import numpy as np\n'), ((615, 712), 'numpy.empty', 'np.empty', (['(0, args.obs_seq + args.pred_seq - 1, args.enviro_pdim[0], args.enviro_pdim\n [1], 3)'], {}), '((0, args.obs_seq + args.pred_seq - 1, args.enviro_pdim[0], args.\n enviro_pdim[1], 3))\n', (623, 712), True, 'import numpy as np\n'), ((5251, 5263), 'numpy.all', 'np.all', (['data'], {}), '(data)\n', (5257, 5263), True, 'import numpy as np\n'), ((5286, 5319), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""""""'}), "(path, delimiter='')\n", (5299, 5319), True, 'import numpy as np\n'), ((5837, 5847), 'maps.Maps', 'Maps', (['data'], {}), '(data)\n', (5841, 5847), False, 'from maps import Maps\n'), ((6140, 6185), 'numpy.reshape', 'np.reshape', (['maps.offsets', '(-1, seq_length, 8)'], {}), '(maps.offsets, (-1, seq_length, 8))\n', (6150, 6185), True, 'import numpy as np\n'), ((6250, 6303), 'numpy.reshape', 'np.reshape', (['maps.sorted_data', '(-1, seq_length + 1, 4)'], {}), '(maps.sorted_data, (-1, seq_length + 1, 4))\n', (6260, 6303), True, 'import numpy as np\n'), ((6373, 6423), 'occupancy.circle_group_grid', 'circle_group_grid', (['offsets', 'maps.sorted_data', 'size'], {}), '(offsets, maps.sorted_data, size)\n', (6390, 6423), False, 'from occupancy import circle_group_grid\n'), ((839, 898), 'numpy.load', 'np.load', (["('../processed_data/train/%s.npz' % dataset_list[0])"], {}), "('../processed_data/train/%s.npz' % dataset_list[0])\n", (846, 898), True, 'import numpy as np\n'), ((1118, 1161), 'numpy.concatenate', 'np.concatenate', (['(offsets, _offsets)'], {'axis': '(0)'}), '((offsets, _offsets), axis=0)\n', (1132, 1161), True, 'import numpy as np\n'), ((1186, 1233), 'numpy.concatenate', 'np.concatenate', (['(traj_data, _traj_data)'], {'axis': '(0)'}), '((traj_data, _traj_data), axis=0)\n', (1200, 1233), True, 'import numpy as np\n'), ((1258, 1305), 'numpy.concatenate', 'np.concatenate', (['(occupancy, _occupancy)'], {'axis': '(0)'}), '((occupancy, _occupancy), axis=0)\n', (1272, 1305), True, 'import numpy as np\n'), ((2485, 2519), 'numpy.empty', 'np.empty', (['(0, args.obs_seq - 1, 8)'], {}), '((0, args.obs_seq - 1, 8))\n', (2493, 2519), True, 'import numpy as np\n'), ((2538, 2568), 'numpy.empty', 'np.empty', (['(0, args.obs_seq, 4)'], {}), '((0, args.obs_seq, 4))\n', (2546, 2568), True, 'import numpy as np\n'), ((2589, 2665), 'numpy.empty', 'np.empty', (['(0, args.obs_seq - 1, args.enviro_pdim[0], args.enviro_pdim[1], 3)'], {}), '((0, args.obs_seq - 1, args.enviro_pdim[0], args.enviro_pdim[1], 3))\n', (2597, 2665), True, 'import numpy as np\n'), ((3530, 3588), 'os.path.exists', 'os.path.exists', (['"""../processed_data/train/train_merged.npz"""'], {}), "('../processed_data/train/train_merged.npz')\n", (3544, 3588), False, 'import os\n'), ((3707, 3822), 'numpy.savez', 'np.savez', (['"""../processed_data/train/train_merged.npz"""'], {'offsets': 'offsets', 'traj_data': 'traj_data', 'occupancy': 'occupancy'}), "('../processed_data/train/train_merged.npz', offsets=offsets,\n traj_data=traj_data, occupancy=occupancy)\n", (3715, 3822), True, 'import numpy as np\n'), ((5736, 5771), 'augmentation.rotation', 'rotation', (['data[:, 2:4]', '(r / aug_num)'], {}), '(data[:, 2:4], r / aug_num)\n', (5744, 5771), False, 'from augmentation import rotation\n'), ((2789, 2844), 'numpy.load', 'np.load', (["('../processed_data/challenge/%s.npz' % dataset)"], {}), "('../processed_data/challenge/%s.npz' % dataset)\n", (2796, 2844), True, 'import numpy as np\n'), ((2968, 3011), 'numpy.concatenate', 'np.concatenate', (['(offsets, _offsets)'], {'axis': '(0)'}), '((offsets, _offsets), axis=0)\n', (2982, 3011), True, 'import numpy as np\n'), ((3036, 3083), 'numpy.concatenate', 'np.concatenate', (['(traj_data, _traj_data)'], {'axis': '(0)'}), '((traj_data, _traj_data), axis=0)\n', (3050, 3083), True, 'import numpy as np\n'), ((3108, 3155), 'numpy.concatenate', 'np.concatenate', (['(occupancy, _occupancy)'], {'axis': '(0)'}), '((occupancy, _occupancy), axis=0)\n', (3122, 3155), True, 'import numpy as np\n'), ((6585, 6705), 'numpy.savez', 'np.savez', (["('../processed_data/%s/%s' % (dirname, dataname))"], {'offsets': 'offsets', 'traj_data': 'traj_data', 'occupancy': 'occupancy'}), "('../processed_data/%s/%s' % (dirname, dataname), offsets=offsets,\n traj_data=traj_data, occupancy=occupancy)\n", (6593, 6705), True, 'import numpy as np\n'), ((6802, 6813), 'time.time', 'time.time', ([], {}), '()\n', (6811, 6813), False, 'import time\n'), ((6908, 7037), 'numpy.savez', 'np.savez', (["('../processed_data/%s/%s_%.0f' % (dirname, dataname, r))"], {'offsets': 'offsets', 'traj_data': 'traj_data', 'occupancy': 'occupancy'}), "('../processed_data/%s/%s_%.0f' % (dirname, dataname, r), offsets=\n offsets, traj_data=traj_data, occupancy=occupancy)\n", (6916, 7037), True, 'import numpy as np\n'), ((7133, 7144), 'time.time', 'time.time', ([], {}), '()\n', (7142, 7144), False, 'import time\n'), ((1661, 1712), 'numpy.load', 'np.load', (["('../processed_data/train/%s.npz' % dataset)"], {}), "('../processed_data/train/%s.npz' % dataset)\n", (1668, 1712), True, 'import numpy as np\n'), ((1956, 1999), 'numpy.concatenate', 'np.concatenate', (['(offsets, _offsets)'], {'axis': '(0)'}), '((offsets, _offsets), axis=0)\n', (1970, 1999), True, 'import numpy as np\n'), ((2032, 2079), 'numpy.concatenate', 'np.concatenate', (['(traj_data, _traj_data)'], {'axis': '(0)'}), '((traj_data, _traj_data), axis=0)\n', (2046, 2079), True, 'import numpy as np\n'), ((2112, 2159), 'numpy.concatenate', 'np.concatenate', (['(occupancy, _occupancy)'], {'axis': '(0)'}), '((occupancy, _occupancy), axis=0)\n', (2126, 2159), True, 'import numpy as np\n'), ((5412, 5426), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (5420, 5426), True, 'import numpy as np\n')]
|
# Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from collections import namedtuple
import logging
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.metrics as metrics
import utility
class ModelBase(object):
__metaclass__ = abc.ABCMeta
@property
def number_of_steps(self):
"""Number of training examples to use"""
return 500000
@property
def use_ranges_for_training(self):
"""Choose features overlapping with provided ranges during training"""
return False
@property
def batch_size(self):
return 64
@property
def max_window_duration_seconds(self):
""" Window max duration in seconds. A value of zero indicates that
we would instead like to choose a fixed-length window. """
return None
# We often allocate a much smaller buffer than would fit the specified time
# sampled at 5 mins intervals, on the basis that the sample is almost
# always much more sparse.
@property
def window_max_points(self):
return None
@property
def min_viable_timeslice_length(self):
return 500
@property
def max_replication_factor(self):
return 100.0
def __init__(self, num_feature_dimensions, vessel_metadata):
self.num_feature_dimensions = num_feature_dimensions
if vessel_metadata:
self.vessel_metadata = vessel_metadata
self.fishing_ranges_map = vessel_metadata.fishing_ranges_map
else:
self.vessel_metadata = None
self.fishing_ranges_map = None
self.training_objectives = None
def build_training_file_list(self, base_feature_path, split):
boundary = 1 if (split == utility.TRAINING_SPLIT) else self.batch_size
random_state = np.random.RandomState()
training_mmsis = self.vessel_metadata.weighted_training_list(
random_state,
split,
self.max_replication_factor,
boundary=boundary)
return [
'%s/%s.tfrecord' % (base_feature_path, mmsi)
for mmsi in training_mmsis
]
@staticmethod
def read_metadata(all_available_mmsis,
metadata_file,
fishing_ranges,
fishing_upweight=1.0):
return utility.read_vessel_multiclass_metadata(
all_available_mmsis, metadata_file, fishing_ranges,
fishing_upweight)
@abc.abstractmethod
def build_training_net(self, features, timestamps, mmsis):
"""Build net suitable for training model
Args:
features : features to feed into net
timestamps: a list of timestamps, one for each feature point.
mmsis: a list of mmsis, one for each batch element.
Returns:
TrainNetInfo
"""
optimizer = trainers = None
return optimizer, trainers
@abc.abstractmethod
def build_inference_net(self, features, timestamps, mmsis):
"""Build net suitable for running inference on model
Args:
features : features to feed into net
timestamps: a list of timestamps, one for each feature point.
mmsis: a list of mmsis, one for each batch element.
Returns:
A list of objects derived from EvaluationBase providing
functionality to log evaluation statistics as well as to
return the results of inference as JSON.
"""
return []
|
[
"utility.read_vessel_multiclass_metadata",
"numpy.random.RandomState"
] |
[((2400, 2423), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2421, 2423), True, 'import numpy as np\n'), ((2931, 3044), 'utility.read_vessel_multiclass_metadata', 'utility.read_vessel_multiclass_metadata', (['all_available_mmsis', 'metadata_file', 'fishing_ranges', 'fishing_upweight'], {}), '(all_available_mmsis, metadata_file,\n fishing_ranges, fishing_upweight)\n', (2970, 3044), False, 'import utility\n')]
|
# cluster_features.py
#
# Based on snippets here:
# http://scikit-learn.org/dev/auto_examples/cluster/plot_cluster_iris.html#sphx-glr-auto-examples-cluster-plot-cluster-iris-py
from __future__ import print_function
import time
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
def get_payment_data(csv_filename):
df = pd.read_csv(csv_filename, header = 0)
# put the original column names in a python list
feature_names = list(df.columns.values)
# create a numpy array with the numeric values for input into scikit-learn
numpy_array = df.as_matrix()
Y = numpy_array[:,24]
X = numpy_array[:, [i for i in xrange(np.shape(numpy_array)[1]-1)]]
return (X, Y, feature_names)
if __name__ == "__main__":
(X, Y, feature_names) = get_payment_data("default_on_payment.csv")
print('Shape of the inputs: %s, shape of the labels: %s' % (str(X.shape), str(Y.shape)))
# split into a training and testing set
# Training instances: 22,500
# Test instances: 7500
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25, random_state=42)
print('Train set inputs: %s' % (str(X_train.shape)))
print('Test set inputs %s' % (str(X_test.shape)))
print('Train set labels: %s' % (str(Y_train.shape)))
print('Test set labels: %s' % (str(Y_test.shape)))
# ---------------------------------------------------------------------------
# Scaling
# ----------------------------------------------------------------------------
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# ---------------------------------------------------------------------------
# PCA Transformation of Features
# ----------------------------------------------------------------------------
pca = PCA(n_components=3)
X_train_new = pca.fit_transform(X_train, y=None)
fig = plt.figure(1)
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
#ax.scatter(X_train_new[:, 0], X_train_new[:, 1], X_train_new[:, 2],c=labels.astype(np.float))
ax.scatter(X_train_new[:, 0], X_train_new[:, 1], X_train_new[:, 2])
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('PC0')
ax.set_ylabel('PC1')
ax.set_zlabel('PC2')
plt.show()
# ---------------------------------------------------------------------------
# K-Means Clustering
# ----------------------------------------------------------------------------
num_clusters = 2
classifier_KMC = KMeans(n_clusters = num_clusters, n_jobs=-1, random_state=1)
start_time = time.time()
classifier_KMC.fit(X_train, y=None)
end_time = time.time()
labels1 = classifier_KMC.labels_
# Classify the train and test set vectors
train_labels = classifier_KMC.predict(X_train)
test_labels = classifier_KMC.predict(X_test)
# Returns 68.9% on training set
accuracy_KMC_train = accuracy_score(Y_train, train_labels)
accuracy_KMC_test = accuracy_score(Y_test, test_labels)
# Plotting
fig = plt.figure(1)
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
classifier_KMC.fit(X_train)
labels = classifier_KMC.labels_
#ax.scatter(X_train[:, 1], X_train[:, 2], X_train[:, 3], X_train[:, 3],c=labels.astype(np.float))
ax.scatter(X_train[:, 0], X_train[:, 1],c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('F0')
ax.set_ylabel('F1')
ax.set_zlabel('F2')
plt.show()
## Plot the ground truth
#fig = plt.figure(1)
#plt.clf()
#ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
#plt.cla()
# predictions_KMC = classifier_KMC.predict(X_test)
|
[
"sklearn.cross_validation.train_test_split",
"sklearn.preprocessing.StandardScaler",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"sklearn.cluster.KMeans",
"sklearn.metrics.accuracy_score",
"time.time",
"numpy.shape",
"matplotlib.pyplot.figure",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.cla"
] |
[((695, 730), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'header': '(0)'}), '(csv_filename, header=0)\n', (706, 730), True, 'import pandas as pd\n'), ((1418, 1473), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(X, Y, test_size=0.25, random_state=42)\n', (1434, 1473), False, 'from sklearn.cross_validation import train_test_split\n'), ((1894, 1910), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1908, 1910), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2231, 2250), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(3)'}), '(n_components=3)\n', (2234, 2250), False, 'from sklearn.decomposition import PCA\n'), ((2319, 2332), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2329, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2394), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'rect': '[0, 0, 0.95, 1]', 'elev': '(48)', 'azim': '(134)'}), '(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\n', (2348, 2394), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2749, 2759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2757, 2759), True, 'import matplotlib.pyplot as plt\n'), ((2994, 3052), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters', 'n_jobs': '(-1)', 'random_state': '(1)'}), '(n_clusters=num_clusters, n_jobs=-1, random_state=1)\n', (3000, 3052), False, 'from sklearn.cluster import KMeans\n'), ((3073, 3084), 'time.time', 'time.time', ([], {}), '()\n', (3082, 3084), False, 'import time\n'), ((3140, 3151), 'time.time', 'time.time', ([], {}), '()\n', (3149, 3151), False, 'import time\n'), ((3399, 3436), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_train', 'train_labels'], {}), '(Y_train, train_labels)\n', (3413, 3436), False, 'from sklearn.metrics import accuracy_score\n'), ((3461, 3496), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'test_labels'], {}), '(Y_test, test_labels)\n', (3475, 3496), False, 'from sklearn.metrics import accuracy_score\n'), ((3526, 3539), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3536, 3539), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3553), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3551, 3553), True, 'import matplotlib.pyplot as plt\n'), ((3563, 3615), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'rect': '[0, 0, 0.95, 1]', 'elev': '(48)', 'azim': '(134)'}), '(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\n', (3569, 3615), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3619, 3628), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3626, 3628), True, 'import matplotlib.pyplot as plt\n'), ((4051, 4061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4059, 4061), True, 'import matplotlib.pyplot as plt\n'), ((1013, 1034), 'numpy.shape', 'np.shape', (['numpy_array'], {}), '(numpy_array)\n', (1021, 1034), True, 'import numpy as np\n')]
|
import numpy as np
from numba import jit
import pyflann
from petsc4py import PETSc
from mpi4py import MPI
from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer
@jit(nopython=True)
def get_global_index(x, y, ydim):
return y + x * ydim
@jit(nopython=True)
def get_global_index_volumetric(x, y, z, xdim, ydim):
return x + xdim * (y + z * ydim)
@jit(nopython=True)
def compute_gradient(v1, v2, sigma: float):
abs = np.abs(v1 - v2)
return np.exp(-abs * abs / (2. * sigma * sigma))
@jit(nopython=True)
def compute_gradient_norm(v1, v2, sigma: float):
norm = np.linalg.norm(v1 - v2)
return np.exp(-norm * norm / (2. * sigma * sigma))
class OperatorAssembler(DataObject, OperatorContainer):
def __init__(self, comm=MPI.COMM_WORLD, verbose=False):
DataObject.__init__(self, comm, verbose)
OperatorContainer.__init__(self)
self.__graph_type = GraphType.DIRECTED
@property
def graph_type(self) -> GraphType:
return self.__graph_type
@graph_type.setter
def graph_type(self, t: GraphType):
self.__graph_type = t
def setSimilarityFunc(self, fn, params):
self.__similarity_measure_fn = fn
self.__similarity_measure_params = params
def reset(self):
OperatorContainer.reset(self)
def __construct_adjacency_matrix_general_data(self):
data = self.getData()[0]
# determine dimension of a problem
N = data.shape[0]
# building index (FLANN - Fast Library for Approximate Nearest Neighbors)
pyflann.set_distance_type('euclidean')
flann = pyflann.FLANN()
flann.build_index(data)
# create matrix object
self.mat_adj = PETSc.Mat()
self.mat_adj.create(self.comm)
self.mat_adj.setSizes([N, N])
self.mat_adj.setType(self.mat_type)
if self.graph_type == GraphType.DIRECTED:
self.__construct_adjacency_matrix_general_data_directed_graph(flann)
else:
self.__construct_adjacency_matrix_general_data_undirected_graph(flann)
# finalizing assembly of adjacency matrix
self.mat_adj.assemble()
del flann
def __construct_adjacency_matrix_general_data_directed_graph(self, flann):
self.mat_adj.setPreallocationNNZ(self.connectivity)
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
sim_func = compute_gradient_norm
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# building adjacency matrix of similarity graph
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
v1 = data[I]
# find nearest neighbours to sample v1
# sometimes self-adjoint vertex is included, thus finding n+1 nearest neighbours
result, dist = flann.nn_index(v1, self.connectivity + 1)
used_nn = 0
for J in range(0, self.connectivity + 1):
idx = result[0, J]
if idx != I and used_nn < self.connectivity:
v2 = data[result[0, J]]
g = sim_func(v1, v2, sim_func_params)
if g > 0.:
self.mat_adj[I, idx] = g
used_nn += 1
elif used_nn >= self.connectivity:
break
def __construct_adjacency_matrix_general_data_undirected_graph(self, flann):
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
sim_func = compute_gradient_norm
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# building adjacency matrix of similarity graph
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
v1 = data[I]
# find nearest neighbours to sample v1
# sometimes self-adjoint vertex is included, thus finding n+1 nearest neighbours
result, dist = flann.nn_index(v1, self.connectivity + 1)
for J in range(0, self.connectivity + 1):
idx = result[0, J]
if idx != I:
v2 = data[result[0, J]]
g = sim_func(v1, v2, sim_func_params)
if g > 0.:
self.mat_adj[I, idx] = g
self.mat_adj[idx, I] = g
def __construct_adjacency_matrix_vol_img(self):
if self.connectivity != 6 and self.connectivity != 18 and self.connectivity != 26:
raise Exception('Connectivity (con) must be set to 6, 18, or 26')
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
sim_func = compute_gradient
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# determine dimension of a problem
dims = data.GetDimensions()
dim_x = dims[0] - 1
dim_y = dims[1] - 1
dim_z = dims[2] - 1
N = dim_x * dim_y * dim_z
# create matrix object
self.mat_adj = PETSc.Mat()
self.mat_adj.create(self.comm)
self.mat_adj.setSizes([N, N])
self.mat_adj.setType(self.mat_type)
self.mat_adj.setPreallocationNNZ(self.connectivity)
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
# compute local derivatives on structured non-uniform grid that is determined using sigma and
# connectivity of derivatives (6, 18, or 26)
data_scalars = data.GetCellData().GetScalars()
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
# determine (x, y, z)-coordinates
z = I // (dim_x * dim_y)
i = I - z * dim_x * dim_y
y = i // dim_x
x = i - y * dim_x
p1 = get_global_index_volumetric(x, y, z, dim_x, dim_y)
v1 = data_scalars.GetTuple1(p1) / 255.
if z > 0:
if self.connectivity > 6 and y > 0:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y - 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y - 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y - 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and y < dim_y - 1:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y + 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y + 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y + 1, z - 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if y > 0:
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y - 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y - 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y - 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if x > 0:
p2 = get_global_index_volumetric(x - 1, y, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if y < dim_y - 1:
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y + 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y + 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y + 1, z, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if z < dim_z - 1:
if self.connectivity > 6 and y > 0:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y - 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y - 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y - 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x > 0:
p2 = get_global_index_volumetric(x - 1, y, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity > 6 and y < dim_y - 1:
if self.connectivity == 26 and x > 0:
p2 = get_global_index_volumetric(x - 1, y + 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
p2 = get_global_index_volumetric(x, y + 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
if self.connectivity == 26 and x < dim_x - 1:
p2 = get_global_index_volumetric(x + 1, y + 1, z + 1, dim_x, dim_y)
v2 = data_scalars.GetTuple1(p2) / 255.
g = sim_func(v1, v2, sim_func_params)
self.mat_adj[p1, p2] = g
# finalizing assembly of adjacency matrix
self.mat_adj.assemble()
def __construct_adjacency_matrix_img(self):
if self.connectivity != 4 and self.connectivity != 8:
PETSc.Sys.Print('Connectivity (con) must be set to 4 or 8')
raise PETSc.Error(62)
rows = self.data.shape[0]
cols = self.data.shape[1]
N = rows * cols
# Get function for measuring similarity and its parameters
sim_func, sim_func_params = self.getSimilarityMeasure()
if sim_func is None:
if len(self.data.shape) == 3:
sim_func = compute_gradient_norm
else:
sim_func = compute_gradient
if sim_func_params == PETSc.DEFAULT:
sim_func_params = 0.5
data = self.getData()[0]
# create matrix object
self.mat_adj = PETSc.Mat()
self.mat_adj.create(self.comm)
self.mat_adj.setSizes([N, N])
self.mat_adj.setType(self.mat_type)
self.mat_adj.setPreallocationNNZ(self.connectivity)
self.mat_adj.setFromOptions()
self.mat_adj.setUp()
i_start, i_end = self.mat_adj.getOwnershipRange()
for I in range(i_start, i_end):
# determine (x, y) coordinates
x = I // cols
y = I - x * cols
p1 = I
v1 = self.data[x, y] / 255.
if x > 0:
if y > 0 and self.connectivity == 8:
p2 = get_global_index(x - 1, y - 1, cols)
v2 = data[x - 1, y - 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
p2 = get_global_index(x - 1, y, cols)
v2 = data[x - 1, y] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y < cols - 1 and self.connectivity == 8:
p2 = get_global_index(x - 1, y + 1, cols)
v2 = data[x - 1, y + 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y > 0:
p2 = get_global_index(x, y - 1, cols)
v2 = data[x, y - 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y < cols - 1:
p2 = get_global_index(x, y + 1, cols)
v2 = data[x, y + 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if x < rows - 1:
if y > 0 and self.connectivity == 8:
p2 = get_global_index(x + 1, y - 1, cols)
v2 = data[x + 1, y - 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
p2 = get_global_index(x + 1, y, cols)
v2 = data[x + 1, y] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
if y < cols - 1 and self.connectivity == 8:
p2 = get_global_index(x + 1, y + 1, cols)
v2 = data[x + 1, y + 1] / 255.
self.mat_adj[p1, p2] = sim_func(v1, v2, sim_func_params)
# finalizing assembly of adjacency matrix
self.mat_adj.assemble()
def assembly(self):
self.reset()
data_type = self.getData()[1]
if self.fn_similarity_params is not None and self.verbose:
if type(self.fn_similarity_params) == float:
str_params = ', param=%.2f' % self.fn_similarity_params
else:
str_params = ', params=['
str_params += ''.join('{}, '.format(k) for k in self.fn_similarity_params)
str_params = str_params[:-2] + ']'
else:
str_params = ''
if data_type == DataType.IMG:
if self.connectivity == PETSc.DEFAULT:
self.connectivity = 4
if self.verbose:
s = 'Construct operator (%s, GRAPH_%s) for image: connectivity=%d'
v = (self.operator_type.name, GraphType.UNDIRECTED.name, self.connectivity)
PETSc.Sys.Print(s % v + str_params)
self.__construct_adjacency_matrix_img()
elif data_type == DataType.VOL_IMG:
if self.connectivity == PETSc.DEFAULT:
self.connectivity = 6
if self.verbose:
s = 'Construct operator (%s, GRAPH_%s) for volumetric image: connectivity=%d'
v = (self.operator_type.name, self.graph_type.name, self.connectivity)
PETSc.Sys.Print(s % v + str_params)
self.__construct_adjacency_matrix_vol_img()
else:
if self.connectivity == PETSc.DEFAULT:
self.connectivity = 3
if self.verbose:
s = 'Construct operator (%s, GRAPH_%s) for general data: connectivity=%d'
v = (self.operator_type.name, self.graph_type.name, self.connectivity)
PETSc.Sys.Print(s % v + str_params)
self.__construct_adjacency_matrix_general_data()
# if data_type == DataType.IMG:
# if self.connectivity == PETSc.DEFAULT:
# self.connectivity = 4
#
# if self.verbose:
# PETSc.Sys.Print(
# 'Construct operator (%s) for image: connectivity=%d, sigma=%2g'
# % (self.operator_type.name, self.connectivity, self.sigma)
# )
#
# self.__construct_adjacency_matrix_img()
# elif data_type == DataType.VOL_IMG: # volumetric image
# if self.connectivity == PETSc.DEFAULT:
# self.connectivity = 6
#
# if self.verbose:
# if self.fn_similarity_params is not None:
# s = 'Construct operator (%s, GRAPH_ %s) for volumetric image: connectivity=%d, '
# v = (self.operator_type.name, self.graph_type.name, self.connectivity)
# sv = s % v
# if type(self.fn_similarity_params) == float:
# sp = 'param=%.2f' % self.fn_similarity_params
# else:
# sp = 'params=('
# sp += ''.join('{}, '.format(k) for k in self.fn_similarity_params)
# sp = sp[:-2] + ')'
# sv += sp
# else:
# s = 'Construct operator (%s, GRAPH_%s) for volumetric image: connectivity=%d params=None'
# v = (self.operator_type.name, self.graph_type.name, self.connectivity)
# sv = s % v
# PETSc.Sys.Print(sv)
#
# exit(-1)
#
# self.__construct_adjacency_matrix_vol_img()
# else:
# if self.connectivity == PETSc.DEFAULT:
# self.connectivity = 6
#
# if self.verbose:
# PETSc.Sys.Print(
# 'Construct operator (%s) for general data: connectivity=%d, params=%2g'
# % (self.operator_type.name, self.connectivity, self.__similarity_measure_params)
# )
#
# self.__construct_adjacency_matrix_general_data()
N = self.mat_adj.getSize()[0]
# compute degree matrix D_i = deg(v_i)
self.vec_diag = self.mat_adj.createVecLeft()
self.mat_adj.getRowSum(self.vec_diag)
if self.operator_type != OperatorType.MARKOV_1 or self.operator_type != OperatorType.MARKOV_2:
self.mat_op = PETSc.Mat().createAIJ((N, N), comm=self.comm)
self.mat_op.setPreallocationNNZ(self.connectivity + 1)
self.mat_op.setFromOptions()
self.mat_op.setUp()
self.mat_op.setDiagonal(self.vec_diag)
self.mat_op.assemble()
# L = D - A
self.mat_op.axpy(-1., self.mat_adj)
else: # P = D^-1 A (MARKOV_1) or Ng, Weiss (MARKOV_2)
self.mat_op = self.mat_adj.duplicate()
self.mat_op.setFromOptions()
self.mat_op.setType(self.mat_type)
self.mat_op.setUp()
self.mat_op.copy(self.mat_op)
if self.operator_type != OperatorType.LAPLACIAN_UNNORMALIZED:
tmp_vec = self.vec_diag.duplicate()
self.vec_diag.copy(tmp_vec)
if self.operator_type == OperatorType.LAPLACIAN_NORMALIZED or self.operator_type == OperatorType.MARKOV_2:
tmp_vec.sqrtabs()
tmp_vec.reciprocal()
self.mat_op.diagonalScale(tmp_vec, tmp_vec)
elif self.operator_type == OperatorType.MARKOV_1:
tmp_vec.reciprocal()
self.mat_op.diagonalScale(tmp_vec)
else: # L_rw
tmp_vec.reciprocal()
self.mat_op.diagonalScale(tmp_vec) # left diagonal scale
del tmp_vec
self.mat_op.assemble()
|
[
"petsc4py.PETSc.Mat",
"speclus4py.types.DataObject.__init__",
"speclus4py.types.OperatorContainer.__init__",
"numpy.abs",
"speclus4py.types.OperatorContainer.reset",
"petsc4py.PETSc.Sys.Print",
"numba.jit",
"numpy.exp",
"numpy.linalg.norm",
"pyflann.FLANN",
"pyflann.set_distance_type",
"petsc4py.PETSc.Error"
] |
[((205, 223), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (208, 223), False, 'from numba import jit\n'), ((285, 303), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (288, 303), False, 'from numba import jit\n'), ((398, 416), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (401, 416), False, 'from numba import jit\n'), ((543, 561), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (546, 561), False, 'from numba import jit\n'), ((471, 486), 'numpy.abs', 'np.abs', (['(v1 - v2)'], {}), '(v1 - v2)\n', (477, 486), True, 'import numpy as np\n'), ((498, 540), 'numpy.exp', 'np.exp', (['(-abs * abs / (2.0 * sigma * sigma))'], {}), '(-abs * abs / (2.0 * sigma * sigma))\n', (504, 540), True, 'import numpy as np\n'), ((622, 645), 'numpy.linalg.norm', 'np.linalg.norm', (['(v1 - v2)'], {}), '(v1 - v2)\n', (636, 645), True, 'import numpy as np\n'), ((657, 701), 'numpy.exp', 'np.exp', (['(-norm * norm / (2.0 * sigma * sigma))'], {}), '(-norm * norm / (2.0 * sigma * sigma))\n', (663, 701), True, 'import numpy as np\n'), ((827, 867), 'speclus4py.types.DataObject.__init__', 'DataObject.__init__', (['self', 'comm', 'verbose'], {}), '(self, comm, verbose)\n', (846, 867), False, 'from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer\n'), ((876, 908), 'speclus4py.types.OperatorContainer.__init__', 'OperatorContainer.__init__', (['self'], {}), '(self)\n', (902, 908), False, 'from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer\n'), ((1306, 1335), 'speclus4py.types.OperatorContainer.reset', 'OperatorContainer.reset', (['self'], {}), '(self)\n', (1329, 1335), False, 'from speclus4py.types import DataObject, DataType, GraphType, OperatorType, OperatorContainer\n'), ((1587, 1625), 'pyflann.set_distance_type', 'pyflann.set_distance_type', (['"""euclidean"""'], {}), "('euclidean')\n", (1612, 1625), False, 'import pyflann\n'), ((1642, 1657), 'pyflann.FLANN', 'pyflann.FLANN', ([], {}), '()\n', (1655, 1657), False, 'import pyflann\n'), ((1745, 1756), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (1754, 1756), False, 'from petsc4py import PETSc\n'), ((5595, 5606), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (5604, 5606), False, 'from petsc4py import PETSc\n'), ((15149, 15160), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (15158, 15160), False, 'from petsc4py import PETSc\n'), ((14480, 14539), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['"""Connectivity (con) must be set to 4 or 8"""'], {}), "('Connectivity (con) must be set to 4 or 8')\n", (14495, 14539), False, 'from petsc4py import PETSc\n'), ((14558, 14573), 'petsc4py.PETSc.Error', 'PETSc.Error', (['(62)'], {}), '(62)\n', (14569, 14573), False, 'from petsc4py import PETSc\n'), ((18402, 18437), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['(s % v + str_params)'], {}), '(s % v + str_params)\n', (18417, 18437), False, 'from petsc4py import PETSc\n'), ((18853, 18888), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['(s % v + str_params)'], {}), '(s % v + str_params)\n', (18868, 18888), False, 'from petsc4py import PETSc\n'), ((19275, 19310), 'petsc4py.PETSc.Sys.Print', 'PETSc.Sys.Print', (['(s % v + str_params)'], {}), '(s % v + str_params)\n', (19290, 19310), False, 'from petsc4py import PETSc\n'), ((21894, 21905), 'petsc4py.PETSc.Mat', 'PETSc.Mat', ([], {}), '()\n', (21903, 21905), False, 'from petsc4py import PETSc\n')]
|
from matplotlib.colors import Normalize
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pandas as pd
import numpy as np
from math import pi, log
from scipy.stats import rankdata
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("fp", type=str)
parser.add_argument(
"bounds", type=float, nargs=4, help="lowerbound x, upperbound x, lb y, ub y"
)
args = parser.parse_args()
filepath = args.fp
dims = args.bounds
# === setup problem space, either real or Karpathy toy problem for validation ===
# pspace = np.loadtxt("golf_course_zoom_s1024.txt")
pspace = np.loadtxt(filepath)
# uncomment this line if you want smooth toy-problem
# pspace = G
print(dims)
lbp, ubp, lbb, ubb = dims
# ******************** PLOTTING ****************************************
# ======== establish figs =================
fig = plt.figure()
ax = fig.gca()
# ============= plot problem space bg images ====
cmap = plt.cm.viridis
colors = Normalize(min(pspace.flatten()), max(pspace.flatten()))(pspace)
colors = cmap(colors)
plt.axis('equal')
plt.imshow(
colors,
vmin=min(pspace.flatten()),
vmax=max(pspace.flatten()),
extent=[lbb, ubb,lbp, ubp],
aspect="auto",
interpolation="none",
origin="lower",
)
ax.set_xlabel("burnDv")
ax.set_ylabel("position")
plt.colorbar()
plt.show()
|
[
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.loadtxt"
] |
[((330, 346), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (344, 346), False, 'from argparse import ArgumentParser\n'), ((736, 756), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {}), '(filepath)\n', (746, 756), True, 'import numpy as np\n'), ((1013, 1025), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1023, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1250), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1241, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1538, 1552), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1550, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1566, 1568), True, 'import matplotlib.pyplot as plt\n')]
|
"""Classes to run register functions at certain timepoints and run asynchronously"""
import threading
import time
from typing import Any, Callable, Iterable, NoReturn, Union
import numpy as np
import sc3nb
from sc3nb.osc.osc_communication import Bundler, OSCCommunication, OSCMessage
class Event:
"""Stores a timestamp, function and arguments for that function.
Long running functions can be wrapped inside an own thread
Parameters
----------
timestamp : float
Time event should be executed
function : Callable[..., None]
Function to be executed
args : Iterable[Any]
Arguments for function
spawn : bool, optional
if True, create new thread for function, by default False
"""
def __init__(
self,
timestamp: float,
function: Callable[..., None],
args: Iterable[Any],
spawn: bool = False,
) -> None:
if spawn:
thread = threading.Thread(target=function, args=args)
function = thread.start
args = ()
self.timestamp = timestamp
self.function = function
self.args = args
def execute(self) -> None:
"""Executes function"""
self.function(*self.args)
def __eq__(self, other):
return self.timestamp == other.timestamp
def __lt__(self, other):
return self.timestamp < other.timestamp
def __le__(self, other):
return self.timestamp <= other.timestamp
def __repr__(self):
return "%s: %s" % (self.timestamp, self.function.__name__)
class TimedQueue:
"""Accumulates events as timestamps and functions.
Executes given functions according to the timestamps
Parameters
----------
relative_time : bool, optional
If True, use relative time, by default False
thread_sleep_time : float, optional
Sleep time in seconds for worker thread, by default 0.001
drop_time_threshold : float, optional
Threshold for execution time of events in seconds.
If this is exceeded the event will be dropped, by default 0.5
"""
def __init__(
self,
relative_time: bool = False,
thread_sleep_time: float = 0.001,
drop_time_threshold: float = 0.5,
) -> None:
self.drop_time_thr = drop_time_threshold
self.start = time.time() if relative_time else 0
self.onset_idx = np.empty((0, 2))
self.event_list = []
self.close_event = threading.Event()
self.lock = threading.Lock()
self.thread = threading.Thread(
target=self.__worker, args=(thread_sleep_time, self.close_event)
) # , daemon=True)
self.thread.start()
def close(self) -> None:
"""Closes event processing without waiting for pending events"""
self.close_event.set()
self.thread.join()
def join(self) -> None:
"""Closes event processing after waiting for pending events"""
self.complete()
self.close_event.set()
self.thread.join()
def complete(self) -> None:
"""Blocks until all pending events have completed"""
while self.event_list:
time.sleep(0.01)
def put(
self,
timestamp: float,
function: Callable[..., None],
args: Iterable[Any] = (),
spawn: bool = False,
) -> None:
"""Adds event to queue
Parameters
----------
timestamp : float
Time (POSIX) when event should be executed
function : Callable[..., None]
Function to be executed
args : Iterable[Any], optional
Arguments to be passed to function, by default ()
spawn : bool, optional
if True, create new sub-thread for function, by default False
Raises
------
TypeError
raised if function is not callable
"""
if not callable(function):
raise TypeError("function argument cannot be called")
if not isinstance(args, tuple):
args = (args,)
new_event = Event(timestamp, function, args, spawn)
with self.lock:
self.event_list.append(new_event)
evlen = len(self.event_list)
if not self.onset_idx.any():
idx = 0
else:
idx = np.searchsorted(self.onset_idx[:, 0], timestamp)
self.onset_idx = np.insert(
self.onset_idx, idx, [timestamp, evlen - 1], axis=0
)
def get(self) -> Event:
"""Get latest event from queue and remove event
Returns
-------
Event
Latest event
"""
event = self.peek()
self.pop()
return event
def peek(self) -> Event:
"""Look up latest event from queue
Returns
-------
Event
Latest event
"""
with self.lock:
return self.event_list[int(self.onset_idx[0][1])]
def empty(self) -> bool:
"""Checks if queue is empty
Returns
-------
bool
True if queue if empty
"""
with self.lock:
return bool(self.event_list)
def pop(self) -> None:
"""Removes latest event from queue"""
with self.lock:
event_idx = int(self.onset_idx[0][1])
self.onset_idx = self.onset_idx[1:]
# remove 1 from all idcs after popped event
self.onset_idx[:, 1][self.onset_idx[:, 1] > event_idx] -= 1
del self.event_list[event_idx]
def __worker(self, sleep_time: float, close_event: threading.Event) -> NoReturn:
"""Worker function to process events"""
while True:
if close_event.is_set():
break
if self.event_list:
event = self.peek()
if event.timestamp <= time.time() - self.start:
# execute only if not too old
if event.timestamp > time.time() - self.start - self.drop_time_thr:
event.execute()
self.pop()
# sleep_time = event_list[0].timestamp - (time.time() - self.start) - 0.001
time.sleep(sleep_time)
def __repr__(self):
return f"<TimedQueue {self.event_list.__repr__()}>"
def elapse(self, time_delta: float) -> None:
"""Add time delta to the current queue time.
Parameters
----------
time_delta : float
Additional time
"""
self.start += time_delta
class TimedQueueSC(TimedQueue):
"""Timed queue with OSC communication.
Parameters
----------
server : OSCCommunication, optional
OSC server to handle the bundlers and messsages, by default None
relative_time : bool, optional
If True, use relative time, by default False
thread_sleep_time : float, optional
Sleep time in seconds for worker thread, by default 0.001
"""
def __init__(
self,
server: OSCCommunication = None,
relative_time: bool = False,
thread_sleep_time: float = 0.001,
):
super().__init__(relative_time, thread_sleep_time)
self.server = server or sc3nb.SC.get_default().server
def put_bundler(self, onset: float, bundler: Bundler) -> None:
"""Add a Bundler to queue
Parameters
----------
onset : float
Sending timetag of the Bundler
bundler : Bundler
Bundler that will be sent
"""
self.put(onset, bundler.send)
def put_msg(
self, onset: float, msg: Union[OSCMessage, str], msg_params: Iterable[Any]
) -> None:
"""Add a message to queue
Parameters
----------
onset : float
Sending timetag of the message
msg : Union[OSCMessage, str]
OSCMessage or OSC address
msg_params : Iterable[Any]
If msg is str, this will be the parameters of the created OSCMessage
"""
if isinstance(msg, str):
self.put(onset, self.server.msg, args=(msg, msg_params))
else:
self.put(onset, self.server.send, args=(msg,))
|
[
"threading.Thread",
"numpy.empty",
"numpy.searchsorted",
"sc3nb.SC.get_default",
"time.time",
"threading.Lock",
"time.sleep",
"numpy.insert",
"threading.Event"
] |
[((2418, 2434), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (2426, 2434), True, 'import numpy as np\n'), ((2491, 2508), 'threading.Event', 'threading.Event', ([], {}), '()\n', (2506, 2508), False, 'import threading\n'), ((2530, 2546), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2544, 2546), False, 'import threading\n'), ((2570, 2657), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__worker', 'args': '(thread_sleep_time, self.close_event)'}), '(target=self.__worker, args=(thread_sleep_time, self.\n close_event))\n', (2586, 2657), False, 'import threading\n'), ((959, 1003), 'threading.Thread', 'threading.Thread', ([], {'target': 'function', 'args': 'args'}), '(target=function, args=args)\n', (975, 1003), False, 'import threading\n'), ((2357, 2368), 'time.time', 'time.time', ([], {}), '()\n', (2366, 2368), False, 'import time\n'), ((3202, 3218), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (3212, 3218), False, 'import time\n'), ((4453, 4515), 'numpy.insert', 'np.insert', (['self.onset_idx', 'idx', '[timestamp, evlen - 1]'], {'axis': '(0)'}), '(self.onset_idx, idx, [timestamp, evlen - 1], axis=0)\n', (4462, 4515), True, 'import numpy as np\n'), ((6275, 6297), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (6285, 6297), False, 'import time\n'), ((4375, 4423), 'numpy.searchsorted', 'np.searchsorted', (['self.onset_idx[:, 0]', 'timestamp'], {}), '(self.onset_idx[:, 0], timestamp)\n', (4390, 4423), True, 'import numpy as np\n'), ((7299, 7321), 'sc3nb.SC.get_default', 'sc3nb.SC.get_default', ([], {}), '()\n', (7319, 7321), False, 'import sc3nb\n'), ((5936, 5947), 'time.time', 'time.time', ([], {}), '()\n', (5945, 5947), False, 'import time\n'), ((6053, 6064), 'time.time', 'time.time', ([], {}), '()\n', (6062, 6064), False, 'import time\n')]
|
"""
Unit and regression test for the kissim.encoding.features.sitealign.SiteAlignFeature class.
"""
from pathlib import Path
import pytest
import numpy as np
import pandas as pd
from opencadd.databases.klifs import setup_local
from kissim.io import PocketBioPython
from kissim.encoding.features import SiteAlignFeature
PATH_TEST_DATA = Path(__name__).parent / "kissim" / "tests" / "data"
LOCAL = setup_local(PATH_TEST_DATA / "KLIFS_download")
class TestsSiteAlignFeature:
"""
Test SiteAlignFeature class methods.
"""
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session, feature_name",
[
(12347, LOCAL, "hba"),
(12347, LOCAL, "hbd"),
(12347, LOCAL, "size"),
(12347, LOCAL, "charge"),
(12347, LOCAL, "aliphatic"),
(12347, LOCAL, "aromatic"),
],
)
def test_from_pocket(self, structure_klifs_id, klifs_session, feature_name):
"""
Test if SiteAlignFeature can be set from a Pocket object.
Test object attribues.
"""
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
feature = SiteAlignFeature.from_pocket(pocket, feature_name)
assert isinstance(feature, SiteAlignFeature)
# Test class attributes
assert feature.name == structure_klifs_id
for residue_id, residue_ix, residue_name, category in zip(
feature._residue_ids, feature._residue_ixs, feature._residue_names, feature._categories
):
if residue_id is not None:
assert isinstance(residue_id, int)
assert isinstance(residue_ix, int)
assert isinstance(feature_name, str)
assert isinstance(category, float)
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session, feature_name",
[(12347, LOCAL, "xxx")],
)
def test_from_pocket_raises(self, structure_klifs_id, klifs_session, feature_name):
"""
Test if SiteAlignFeature raises error when passed an invalid feature name.
"""
with pytest.raises(KeyError):
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
SiteAlignFeature.from_pocket(pocket, feature_name)
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session",
[(12347, LOCAL)],
)
def test_values(self, structure_klifs_id, klifs_session):
"""
Test class property: values.
"""
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
# Use example feature type
feature = SiteAlignFeature.from_pocket(pocket, feature_name="hba")
assert isinstance(feature.values, list)
for value in feature.values:
assert isinstance(value, float)
@pytest.mark.parametrize(
"structure_klifs_id, klifs_session",
[(12347, LOCAL)],
)
def test_details(self, structure_klifs_id, klifs_session):
"""
Test class property: details.
"""
pocket = PocketBioPython.from_structure_klifs_id(
structure_klifs_id, klifs_session=klifs_session
)
# Use example feature type
feature = SiteAlignFeature.from_pocket(pocket, feature_name="hba")
assert isinstance(feature.details, pd.DataFrame)
assert feature.details.columns.to_list() == [
"residue.id",
"residue.name",
"sitealign.category",
]
@pytest.mark.parametrize(
"residue_name, feature_name, value",
[
("ALA", "size", 1.0), # Size
("ASN", "size", 2.0),
("ARG", "size", 3.0),
("PTR", "size", 3.0), # Converted non-standard
("MSE", "size", 2.0), # Converted non-standard
("XXX", "size", np.nan), # Non-convertable non-standard
("ALA", "hbd", 0.0),
("ASN", "hbd", 1.0),
("ARG", "hbd", 3.0),
("XXX", "hbd", np.nan),
("ALA", "hba", 0.0),
("ASN", "hba", 1.0),
("ASP", "hba", 2.0),
("XXX", "hba", np.nan),
("ALA", "charge", 0.0),
("ARG", "charge", 1.0),
("ASP", "charge", -1.0),
("XXX", "charge", np.nan),
("ALA", "aromatic", 0.0),
("HIS", "aromatic", 1.0),
("XXX", "aromatic", np.nan),
("ARG", "aliphatic", 0.0),
("ALA", "aliphatic", 1.0),
("XXX", "aliphatic", np.nan),
],
)
def test_residue_to_value(self, residue_name, feature_name, value):
"""
Test function for retrieval of residue's size and pharmacophoric features
(i.e. number of hydrogen bond donor,
hydrogen bond acceptors, charge features, aromatic features or aliphatic features )
Parameters
----------
residue_name : str
Three-letter code for residue.
feature_name : str
Feature type name.
value : float or None
Feature value.
"""
feature = SiteAlignFeature()
# Call feature from residue function
value_calculated = feature._residue_to_value(residue_name, feature_name)
if value_calculated: # If not None
assert isinstance(value_calculated, float)
# Note: Cannot use == to compare np.nan values
if np.isnan(value):
assert np.isnan(value_calculated)
else:
assert value_calculated == value
@pytest.mark.parametrize(
"feature_name",
[("XXX"), (1)],
)
def test_raise_invalid_feature_name(self, feature_name):
"""
Test if KeyError is raised if user passes an incorrect SiteAlign feature string.
"""
feature = SiteAlignFeature()
with pytest.raises(KeyError):
feature._raise_invalid_feature_name(feature_name)
@pytest.mark.parametrize(
"residue_name, residue_name_converted",
[
("MSE", "MET"),
("ALA", None),
("XXX", None),
],
)
def test_convert_modified_residue(self, residue_name, residue_name_converted):
"""
Test if modified residues are converted into standard residues correctly.
If conversion is not possible, test if None is returned.
"""
feature = SiteAlignFeature()
assert feature._convert_modified_residue(residue_name) == residue_name_converted
|
[
"kissim.io.PocketBioPython.from_structure_klifs_id",
"opencadd.databases.klifs.setup_local",
"numpy.isnan",
"pytest.raises",
"pathlib.Path",
"kissim.encoding.features.SiteAlignFeature",
"kissim.encoding.features.SiteAlignFeature.from_pocket",
"pytest.mark.parametrize"
] |
[((400, 446), 'opencadd.databases.klifs.setup_local', 'setup_local', (["(PATH_TEST_DATA / 'KLIFS_download')"], {}), "(PATH_TEST_DATA / 'KLIFS_download')\n", (411, 446), False, 'from opencadd.databases.klifs import setup_local\n'), ((541, 782), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session, feature_name"""', "[(12347, LOCAL, 'hba'), (12347, LOCAL, 'hbd'), (12347, LOCAL, 'size'), (\n 12347, LOCAL, 'charge'), (12347, LOCAL, 'aliphatic'), (12347, LOCAL,\n 'aromatic')]"], {}), "('structure_klifs_id, klifs_session, feature_name',\n [(12347, LOCAL, 'hba'), (12347, LOCAL, 'hbd'), (12347, LOCAL, 'size'),\n (12347, LOCAL, 'charge'), (12347, LOCAL, 'aliphatic'), (12347, LOCAL,\n 'aromatic')])\n", (564, 782), False, 'import pytest\n'), ((1829, 1932), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session, feature_name"""', "[(12347, LOCAL, 'xxx')]"], {}), "('structure_klifs_id, klifs_session, feature_name',\n [(12347, LOCAL, 'xxx')])\n", (1852, 1932), False, 'import pytest\n'), ((2394, 2472), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session"""', '[(12347, LOCAL)]'], {}), "('structure_klifs_id, klifs_session', [(12347, LOCAL)])\n", (2417, 2472), False, 'import pytest\n'), ((2993, 3071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""structure_klifs_id, klifs_session"""', '[(12347, LOCAL)]'], {}), "('structure_klifs_id, klifs_session', [(12347, LOCAL)])\n", (3016, 3071), False, 'import pytest\n'), ((3674, 4347), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""residue_name, feature_name, value"""', "[('ALA', 'size', 1.0), ('ASN', 'size', 2.0), ('ARG', 'size', 3.0), ('PTR',\n 'size', 3.0), ('MSE', 'size', 2.0), ('XXX', 'size', np.nan), ('ALA',\n 'hbd', 0.0), ('ASN', 'hbd', 1.0), ('ARG', 'hbd', 3.0), ('XXX', 'hbd',\n np.nan), ('ALA', 'hba', 0.0), ('ASN', 'hba', 1.0), ('ASP', 'hba', 2.0),\n ('XXX', 'hba', np.nan), ('ALA', 'charge', 0.0), ('ARG', 'charge', 1.0),\n ('ASP', 'charge', -1.0), ('XXX', 'charge', np.nan), ('ALA', 'aromatic',\n 0.0), ('HIS', 'aromatic', 1.0), ('XXX', 'aromatic', np.nan), ('ARG',\n 'aliphatic', 0.0), ('ALA', 'aliphatic', 1.0), ('XXX', 'aliphatic', np.nan)]"], {}), "('residue_name, feature_name, value', [('ALA',\n 'size', 1.0), ('ASN', 'size', 2.0), ('ARG', 'size', 3.0), ('PTR',\n 'size', 3.0), ('MSE', 'size', 2.0), ('XXX', 'size', np.nan), ('ALA',\n 'hbd', 0.0), ('ASN', 'hbd', 1.0), ('ARG', 'hbd', 3.0), ('XXX', 'hbd',\n np.nan), ('ALA', 'hba', 0.0), ('ASN', 'hba', 1.0), ('ASP', 'hba', 2.0),\n ('XXX', 'hba', np.nan), ('ALA', 'charge', 0.0), ('ARG', 'charge', 1.0),\n ('ASP', 'charge', -1.0), ('XXX', 'charge', np.nan), ('ALA', 'aromatic',\n 0.0), ('HIS', 'aromatic', 1.0), ('XXX', 'aromatic', np.nan), ('ARG',\n 'aliphatic', 0.0), ('ALA', 'aliphatic', 1.0), ('XXX', 'aliphatic', np.nan)]\n )\n", (3697, 4347), False, 'import pytest\n'), ((5722, 5773), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""feature_name"""', "['XXX', 1]"], {}), "('feature_name', ['XXX', 1])\n", (5745, 5773), False, 'import pytest\n'), ((6120, 6235), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""residue_name, residue_name_converted"""', "[('MSE', 'MET'), ('ALA', None), ('XXX', None)]"], {}), "('residue_name, residue_name_converted', [('MSE',\n 'MET'), ('ALA', None), ('XXX', None)])\n", (6143, 6235), False, 'import pytest\n'), ((1096, 1189), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (1135, 1189), False, 'from kissim.io import PocketBioPython\n'), ((1225, 1275), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket', 'feature_name'], {}), '(pocket, feature_name)\n', (1253, 1275), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((2636, 2729), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (2675, 2729), False, 'from kissim.io import PocketBioPython\n'), ((2800, 2856), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket'], {'feature_name': '"""hba"""'}), "(pocket, feature_name='hba')\n", (2828, 2856), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((3237, 3330), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (3276, 3330), False, 'from kissim.io import PocketBioPython\n'), ((3401, 3457), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket'], {'feature_name': '"""hba"""'}), "(pocket, feature_name='hba')\n", (3429, 3457), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((5283, 5301), 'kissim.encoding.features.SiteAlignFeature', 'SiteAlignFeature', ([], {}), '()\n', (5299, 5301), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((5594, 5609), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (5602, 5609), True, 'import numpy as np\n'), ((5994, 6012), 'kissim.encoding.features.SiteAlignFeature', 'SiteAlignFeature', ([], {}), '()\n', (6010, 6012), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((6575, 6593), 'kissim.encoding.features.SiteAlignFeature', 'SiteAlignFeature', ([], {}), '()\n', (6591, 6593), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((2160, 2183), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2173, 2183), False, 'import pytest\n'), ((2206, 2299), 'kissim.io.PocketBioPython.from_structure_klifs_id', 'PocketBioPython.from_structure_klifs_id', (['structure_klifs_id'], {'klifs_session': 'klifs_session'}), '(structure_klifs_id, klifs_session=\n klifs_session)\n', (2245, 2299), False, 'from kissim.io import PocketBioPython\n'), ((2337, 2387), 'kissim.encoding.features.SiteAlignFeature.from_pocket', 'SiteAlignFeature.from_pocket', (['pocket', 'feature_name'], {}), '(pocket, feature_name)\n', (2365, 2387), False, 'from kissim.encoding.features import SiteAlignFeature\n'), ((5630, 5656), 'numpy.isnan', 'np.isnan', (['value_calculated'], {}), '(value_calculated)\n', (5638, 5656), True, 'import numpy as np\n'), ((6027, 6050), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (6040, 6050), False, 'import pytest\n'), ((340, 354), 'pathlib.Path', 'Path', (['__name__'], {}), '(__name__)\n', (344, 354), False, 'from pathlib import Path\n')]
|
"""Test for the snakemake workflow distributed with region_set_profiler"""
import json
import subprocess
import os
import pandas as pd
import numpy as np
tmpdir = "/icgc/dkfzlsdf/analysis/hs_ontogeny/temp"
# TODO: gtfanno result has weird index
gtfanno_result: pd.DataFrame = pd.read_pickle(
"/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_primary-annotations.p"
)
# all_regions_annotated = pd.read_pickle('/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_all-annotations.p')
# all_regions_annotated.loc[all_regions_annotated.feat_class == 'intergenic', 'feature_rank'] = 'primary'
# gtfanno_result_temp = '/home/kraemers/temp/gtfanno-temp.p'
# primary_annotations.to_pickle(gtfanno_result_temp)
# gtfanno_result = primary_annotations
gene_annos = gtfanno_result.groupby(["Chromosome", "Start", "End", "gtfanno_uid"])[
"gene_name"
].aggregate(lambda ser: ser.str.cat(sep=","))
assert (
gene_annos.index.get_level_values("gtfanno_uid") == np.arange(gene_annos.shape[0])
).all()
gene_annos.index = gene_annos.index.droplevel(3)
clustered_gene_anno_fp = tmpdir + "clustered-gene-annos.p"
gene_annos.to_pickle(clustered_gene_anno_fp)
# Code to merge DMRs which are closer than merging_distance bp
# This should be moved elsewhere
# merging could also be achieved with pyranges:
# 1. slop all intervals with merging_distance on both sides
# 2. Cluster all intervals
# 3. Use the clustered intervals to find groups of intervals within the clustered intervals and compute the group annotations
merging_distance = 500
gtfanno_result = gtfanno_result.query('feat_class == "Promoter"')
distance_to_next_region = (
gtfanno_result.Start.iloc[1:].values - gtfanno_result.End.iloc[0:-1].values
)
# we iterate over the regions
# whenever the distance to the next region is > merging_distance, we begin a new cluster of regions
# In vectorized form:
region_cluster_ids = np.concatenate(
[[1], 1 + np.cumsum(distance_to_next_region > merging_distance)], axis=0
)
# Compress to gene anno series for the merged DMRs
gene_annos = gtfanno_result.groupby(region_cluster_ids)["gene_name"].apply(
lambda ser: ser.str.cat(sep=",")
)
gene_annos.to_pickle(clustered_gene_anno_fp)
gtfanno_result["gene_name"].to_pickle(clustered_gene_anno_fp)
config = {
"tasks": {
"cluster_ids": {
"no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.25": (
"min-gap_0.25",
"/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/clustering/full-hierarchy/method-selection/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/cutree-all.p",
),
# 'no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.12': ('min-gap_0.12',
# '/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/clustering/full-hierarchy/method-selection/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/cutree-all.p')
},
"metadata_tables": {
"codex": "/icgc/dkfzlsdf/analysis/hs_ontogeny/databases/enrichment_databases/lola_chipseq_2018-04-12/mm10/codex/regions/codex_annotations.csv",
"msigdb_canonical_pathways": "/icgc/dkfzlsdf/analysis/hs_ontogeny/databases/region_set_profiler_databases/msigdb_gmts/canonical-pathways.gmt",
},
"gene_annotations": {"promoters_500-bp-clusters": clustered_gene_anno_fp},
},
"output_dir": "/icgc/dkfzlsdf/analysis/hs_ontogeny/temp/rsp-tests",
"tmpdir": tmpdir,
"chromosomes": [
"1",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
],
}
config_fp = os.path.expanduser("~/temp/rsp-config.json")
with open(config_fp, "w") as fout:
json.dump(config, fout)
subprocess.run(
f"""
snakemake \
--snakefile {os.path.expanduser('~/projects/region_set_profiler/src/region_set_profiler/region_set_profiler.smk')} \
--configfile {config_fp} \
--cores 24 \
--keep-going \
--forcerun /icgc/dkfzlsdf/analysis/hs_ontogeny/temp/rsp-tests/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.25/msigdb_canonical_pathways:promoters_500-bp-clusters/msigdb_canonical_pathways:promoters_500-bp-clusters.done
""",
shell=True,
executable="/bin/bash",
)
# --dryrun \
|
[
"json.dump",
"numpy.cumsum",
"numpy.arange",
"pandas.read_pickle",
"os.path.expanduser"
] |
[((279, 460), 'pandas.read_pickle', 'pd.read_pickle', (['"""/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_primary-annotations.p"""'], {}), "(\n '/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_primary-annotations.p'\n )\n", (293, 460), True, 'import pandas as pd\n'), ((4090, 4134), 'os.path.expanduser', 'os.path.expanduser', (['"""~/temp/rsp-config.json"""'], {}), "('~/temp/rsp-config.json')\n", (4108, 4134), False, 'import os\n'), ((4174, 4197), 'json.dump', 'json.dump', (['config', 'fout'], {}), '(config, fout)\n', (4183, 4197), False, 'import json\n'), ((1122, 1152), 'numpy.arange', 'np.arange', (['gene_annos.shape[0]'], {}), '(gene_annos.shape[0])\n', (1131, 1152), True, 'import numpy as np\n'), ((2074, 2127), 'numpy.cumsum', 'np.cumsum', (['(distance_to_next_region > merging_distance)'], {}), '(distance_to_next_region > merging_distance)\n', (2083, 2127), True, 'import numpy as np\n'), ((4256, 4366), 'os.path.expanduser', 'os.path.expanduser', (['"""~/projects/region_set_profiler/src/region_set_profiler/region_set_profiler.smk"""'], {}), "(\n '~/projects/region_set_profiler/src/region_set_profiler/region_set_profiler.smk'\n )\n", (4274, 4366), False, 'import os\n')]
|
import sys
sys.path.append("../../")
import unittest
import paddle
import numpy as np
from paddleslim import UnstructuredPruner
from paddle.vision.models import mobilenet_v1
class TestUnstructuredPruner(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestUnstructuredPruner, self).__init__(*args, **kwargs)
paddle.disable_static()
self._gen_model()
def _gen_model(self):
self.net = mobilenet_v1(num_classes=10, pretrained=False)
self.pruner = UnstructuredPruner(
self.net, mode='ratio', ratio=0.98, threshold=0.0)
def test_prune(self):
ori_density = UnstructuredPruner.total_sparse(self.net)
ori_threshold = self.pruner.threshold
self.pruner.step()
self.net(
paddle.to_tensor(
np.random.uniform(0, 1, [16, 3, 32, 32]), dtype='float32'))
cur_density = UnstructuredPruner.total_sparse(self.net)
cur_threshold = self.pruner.threshold
print("Original threshold: {}".format(ori_threshold))
print("Current threshold: {}".format(cur_threshold))
print("Original density: {}".format(ori_density))
print("Current density: {}".format(cur_density))
self.assertLessEqual(ori_threshold, cur_threshold)
self.assertLessEqual(cur_density, ori_density)
self.pruner.update_params()
self.assertEqual(cur_density, UnstructuredPruner.total_sparse(self.net))
def test_summarize_weights(self):
max_value = -float("inf")
threshold = self.pruner.summarize_weights(self.net, 1.0)
for name, sub_layer in self.net.named_sublayers():
if not self.pruner._should_prune_layer(sub_layer):
continue
for param in sub_layer.parameters(include_sublayers=False):
max_value = max(
max_value,
np.max(np.abs(np.array(param.value().get_tensor()))))
print("The returned threshold is {}.".format(threshold))
print("The max_value is {}.".format(max_value))
self.assertEqual(max_value, threshold)
if __name__ == "__main__":
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"numpy.random.uniform",
"paddleslim.UnstructuredPruner",
"paddle.disable_static",
"paddleslim.UnstructuredPruner.total_sparse",
"paddle.vision.models.mobilenet_v1"
] |
[((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((2156, 2171), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2169, 2171), False, 'import unittest\n'), ((344, 367), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (365, 367), False, 'import paddle\n'), ((440, 486), 'paddle.vision.models.mobilenet_v1', 'mobilenet_v1', ([], {'num_classes': '(10)', 'pretrained': '(False)'}), '(num_classes=10, pretrained=False)\n', (452, 486), False, 'from paddle.vision.models import mobilenet_v1\n'), ((509, 578), 'paddleslim.UnstructuredPruner', 'UnstructuredPruner', (['self.net'], {'mode': '"""ratio"""', 'ratio': '(0.98)', 'threshold': '(0.0)'}), "(self.net, mode='ratio', ratio=0.98, threshold=0.0)\n", (527, 578), False, 'from paddleslim import UnstructuredPruner\n'), ((641, 682), 'paddleslim.UnstructuredPruner.total_sparse', 'UnstructuredPruner.total_sparse', (['self.net'], {}), '(self.net)\n', (672, 682), False, 'from paddleslim import UnstructuredPruner\n'), ((902, 943), 'paddleslim.UnstructuredPruner.total_sparse', 'UnstructuredPruner.total_sparse', (['self.net'], {}), '(self.net)\n', (933, 943), False, 'from paddleslim import UnstructuredPruner\n'), ((1417, 1458), 'paddleslim.UnstructuredPruner.total_sparse', 'UnstructuredPruner.total_sparse', (['self.net'], {}), '(self.net)\n', (1448, 1458), False, 'from paddleslim import UnstructuredPruner\n'), ((820, 860), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '[16, 3, 32, 32]'], {}), '(0, 1, [16, 3, 32, 32])\n', (837, 860), True, 'import numpy as np\n')]
|
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>, <NAME>, <NAME>, <NAME>
"""Functions to generate derived time features useful
in forecasting, such as growth, seasonality, holidays.
"""
import inspect
import math
import warnings
from datetime import datetime
import fbprophet.hdays as fbholidays
import holidays
import numpy as np
import pandas as pd
from scipy.special import expit
from greykite.common import constants as cst
def convert_date_to_continuous_time(dt):
"""Converts date to continuous time. Each year is one unit.
Parameters
----------
dt : datetime object
the date to convert
Returns
-------
conti_date : `float`
the date represented in years
"""
year_length = datetime(dt.year, 12, 31).timetuple().tm_yday
tt = dt.timetuple()
return (dt.year +
(tt.tm_yday - 1
+ dt.hour / 24
+ dt.minute / (24 * 60)
+ dt.second / (24 * 3600)) / float(year_length))
def get_default_origin_for_time_vars(df, time_col):
"""Sets default value for origin_for_time_vars
Parameters
----------
df : `pandas.DataFrame`
Training data. A data frame which includes the timestamp and value columns
time_col : `str`
The column name in `df` representing time for the time series data.
Returns
-------
dt_continuous_time : `float`
The time origin used to create continuous variables for time
"""
date = pd.to_datetime(df[time_col][0])
return convert_date_to_continuous_time(date)
def build_time_features_df(dt, conti_year_origin):
"""This function gets a datetime-like vector and creates new columns containing temporal
features useful for time series analysis and forecasting e.g. year, week of year, etc.
Parameters
----------
dt : array-like (1-dimensional)
A vector of datetime-like values
conti_year_origin : float
The origin used for creating continuous time.
Returns
-------
time_features_df : `pandas.DataFrame`
Dataframe with the following time features.
* "datetime": `datetime.datetime` object, a combination of date and a time
* "date": `datetime.date` object, date with the format (year, month, day)
* "year": integer, year of the date e.g. 2018
* "year_length": integer, number of days in the year e.g. 365 or 366
* "quarter": integer, quarter of the date, 1, 2, 3, 4
* "quarter_start": `pandas.DatetimeIndex`, date of beginning of the current quarter
* "quarter_length": integer, number of days in the quarter, 90/91 for Q1, 91 for Q2, 92 for Q3 and Q4
* "month": integer, month of the year, January=1, February=2, ..., December=12
* "month_length": integer, number of days in the month, 28/ 29/ 30/ 31
* "woy": integer, ISO 8601 week of the year where a week starts from Monday, 1, 2, ..., 53
* "doy": integer, ordinal day of the year, 1, 2, ..., year_length
* "doq": integer, ordinal day of the quarter, 1, 2, ..., quarter_length
* "dom": integer, ordinal day of the month, 1, 2, ..., month_length
* "dow": integer, day of the week, Monday=1, Tuesday=2, ..., Sunday=7
* "str_dow": string, day of the week as a string e.g. "1-Mon", "2-Tue", ..., "7-Sun"
* "str_doy": string, day of the year e.g. "2020-03-20" for March 20, 2020
* "hour": integer, discrete hours of the datetime, 0, 1, ..., 23
* "minute": integer, minutes of the datetime, 0, 1, ..., 59
* "second": integer, seconds of the datetime, 0, 1, ..., 3599
* "year_month": string, (year, month) e.g. "2020-03" for March 2020
* "year_woy": string, (year, week of year) e.g. "2020_42" for 42nd week of 2020
* "month_dom": string, (month, day of month) e.g. "02/20" for February 20th
* "year_woy_dow": string, (year, week of year, day of week) e.g. "2020_03_6" for Saturday of 3rd week in 2020
* "woy_dow": string, (week of year, day of week) e.g. "03_6" for Saturday of 3rd week
* "dow_hr": string, (day of week, hour) e.g. "4_09" for 9am on Thursday
* "dow_hr_min": string, (day of week, hour, minute) e.g. "4_09_10" for 9:10am on Thursday
* "tod": float, time of day, continuous, 0.0 to 24.0
* "tow": float, time of week, continuous, 0.0 to 7.0
* "tom": float, standardized time of month, continuous, 0.0 to 1.0
* "toq": float, time of quarter, continuous, 0.0 to 1.0
* "toy": float, standardized time of year, continuous, 0.0 to 1.0
* "conti_year": float, year in continuous time, eg 2018.5 means middle of the year 2018
* "is_weekend": boolean, weekend indicator, True for weekend, else False
* "dow_grouped": string, Monday-Thursday=1234-MTuWTh, Friday=5-Fri, Saturday=6-Sat, Sunday=7-Sun
* "ct1": float, linear growth based on conti_year_origin, -infinity to infinity
* "ct2": float, signed quadratic growth, -infinity to infinity
* "ct3": float, signed cubic growth, -infinity to infinity
* "ct_sqrt": float, signed square root growth, -infinity to infinity
* "ct_root3": float, signed cubic root growth, -infinity to infinity
"""
dt = pd.DatetimeIndex(dt)
if len(dt) == 0:
raise ValueError("Length of dt cannot be zero.")
# basic time features
date = dt.date
year = dt.year
year_length = (365.0 + dt.is_leap_year)
quarter = dt.quarter
month = dt.month
month_length = dt.days_in_month
# finds first day of quarter
quarter_start = pd.DatetimeIndex(
dt.year.map(str) + "-" + (3 * quarter - 2).map(int).map(str) + "-01")
next_quarter_start = dt + pd.tseries.offsets.QuarterBegin(startingMonth=1)
quarter_length = (next_quarter_start - quarter_start).days
# finds offset from first day of quarter (rounds down to nearest day)
doq = ((dt - quarter_start) / pd.to_timedelta("1D") + 1).astype(int)
# week of year, "woy", follows ISO 8601:
# - Week 01 is the week with the year's first Thursday in it.
# - A week begins with Monday and ends with Sunday.
# So the week number of the week that overlaps both years, is 1, 52, or 53,
# depending on whether it has more days in the previous year or new year.
# - e.g. Jan 1st, 2018 is Monday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 1, 2]
# - e.g. Jan 1st, 2019 is Tuesday. woy of first 8 days = [1, 1, 1, 1, 1, 1, 2, 2]
# - e.g. Jan 1st, 2020 is Wednesday. woy of first 8 days = [1, 1, 1, 1, 1, 2, 2, 2]
# - e.g. Jan 1st, 2015 is Thursday. woy of first 8 days = [1, 1, 1, 1, 2, 2, 2, 2]
# - e.g. Jan 1st, 2021 is Friday. woy of first 8 days = [53, 53, 53, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2022 is Saturday. woy of first 8 days = [52, 52, 1, 1, 1, 1, 1, 1]
# - e.g. Jan 1st, 2023 is Sunday. woy of first 8 days = [52, 1, 1, 1, 1, 1, 1, 1]
woy = dt.strftime("%V").astype(int)
doy = dt.dayofyear
dom = dt.day
dow = dt.strftime("%u").astype(int)
str_dow = dt.strftime("%u-%a") # e.g. 1-Mon, 2-Tue, ..., 7-Sun
hour = dt.hour
minute = dt.minute
second = dt.second
# grouped time feature
str_doy = dt.strftime("%Y-%m-%d") # e.g. 2020-03-20 for March 20, 2020
year_month = dt.strftime("%Y-%m") # e.g. 2020-03 for March 2020
month_dom = dt.strftime("%m/%d") # e.g. 02/20 for February 20th
year_woy = dt.strftime("%Y_%V") # e.g. 2020_42 for 42nd week of 2020
year_woy_dow = dt.strftime("%Y_%V_%u") # e.g. 2020_03_6 for Saturday of 3rd week in 2020
woy_dow = dt.strftime("%W_%u") # e.g. 03_6 for Saturday of 3rd week
dow_hr = dt.strftime("%u_%H") # e.g. 4_09 for 9am on Thursday
dow_hr_min = dt.strftime("%u_%H_%M") # e.g. 4_09_10 for 9:10am on Thursday
# derived time features
tod = hour + (minute / 60.0) + (second / 3600.0)
tow = dow - 1 + (tod / 24.0)
tom = (dom - 1 + (tod / 24.0)) / month_length
toq = (doq - 1 + (tod / 24.0)) / quarter_length
# time of year, continuous, 0.0 to 1.0. e.g. Jan 1, 12 am = 0/365, Jan 2, 12 am = 1/365, ...
# To handle leap years, Feb 28 = 58/365 - 59/365, Feb 29 = 59/365, Mar 1 = 59/365 - 60/365
# offset term is nonzero only in leap years
# doy_offset reduces doy by 1 from from Mar 1st (doy > 60)
doy_offset = (year_length == 366) * 1.0 * (doy > 60)
# tod_offset sets tod to 0 on Feb 29th (doy == 60)
tod_offset = 1 - (year_length == 366) * 1.0 * (doy == 60)
toy = (doy - 1 - doy_offset + (tod / 24.0) * tod_offset) / 365.0
# year of date in continuous time, eg 2018.5 means middle of year 2018
# this is useful for modeling features that do not care about leap year e.g. environmental variables
conti_year = year + (doy - 1 + (tod / 24.0)) / year_length
is_weekend = pd.Series(dow).apply(lambda x: x in [6, 7]).values # weekend indicator
# categorical var with levels (Mon-Thu, Fri, Sat, Sun), could help when training data are sparse.
dow_grouped = pd.Series(str_dow).apply(lambda x: "1234-MTuWTh" if (x in ["1-Mon", "2-Tue", "3-Wed", "4-Thu"]) else x).values
# growth terms
ct1 = conti_year - conti_year_origin
ct2 = signed_pow(ct1, 2)
ct3 = signed_pow(ct1, 3)
ct_sqrt = signed_pow(ct1, 1/2)
ct_root3 = signed_pow(ct1, 1/3)
# All keys must be added to constants.
features_dict = {
"datetime": dt,
"date": date,
"year": year,
"year_length": year_length,
"quarter": quarter,
"quarter_start": quarter_start,
"quarter_length": quarter_length,
"month": month,
"month_length": month_length,
"woy": woy,
"doy": doy,
"doq": doq,
"dom": dom,
"dow": dow,
"str_dow": str_dow,
"str_doy": str_doy,
"hour": hour,
"minute": minute,
"second": second,
"year_month": year_month,
"year_woy": year_woy,
"month_dom": month_dom,
"year_woy_dow": year_woy_dow,
"woy_dow": woy_dow,
"dow_hr": dow_hr,
"dow_hr_min": dow_hr_min,
"tod": tod,
"tow": tow,
"tom": tom,
"toq": toq,
"toy": toy,
"conti_year": conti_year,
"is_weekend": is_weekend,
"dow_grouped": dow_grouped,
"ct1": ct1,
"ct2": ct2,
"ct3": ct3,
"ct_sqrt": ct_sqrt,
"ct_root3": ct_root3,
}
df = pd.DataFrame(features_dict)
return df
def add_time_features_df(df, time_col, conti_year_origin):
"""Adds a time feature data frame to a data frame
:param df: the input data frame
:param time_col: the name of the time column of interest
:param conti_year_origin: the origin of time for the continuous time variable
:return: the same data frame (df) augmented with new columns
"""
df = df.reset_index(drop=True)
time_df = build_time_features_df(
dt=df[time_col],
conti_year_origin=conti_year_origin)
time_df = time_df.reset_index(drop=True)
return pd.concat([df, time_df], axis=1)
def get_holidays(countries, year_start, year_end):
"""This function extracts a holiday data frame for the period of interest
[year_start to year_end] for the given countries.
This is done using the holidays libraries in pypi:fbprophet and pypi:holidays
Implementation resembles that of `~fbprophet.make_holidays.make_holidays_df`
Parameters
----------
countries : `list` [`str`]
countries for which we need holidays
year_start : `int`
first year of interest, inclusive
year_end : `int`
last year of interest, inclusive
Returns
-------
holiday_df_dict : `dict` [`str`, `pandas.DataFrame`]
- key: country name
- value: data frame with holidays for that country
Each data frame has two columns: EVENT_DF_DATE_COL, EVENT_DF_LABEL_COL
"""
country_holiday_dict = {}
year_list = list(range(year_start, year_end + 1))
for country in countries:
try:
# Fetch the holidays from fbprophet holiday set
# Suppress the following warning for India:
# "We only support Diwali and Holi holidays from 2010 to 2025"
if country in ["India", "IN"]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
country_holidays = getattr(fbholidays, country)(years=year_list)
else:
country_holidays = getattr(fbholidays, country)(years=year_list)
except AttributeError:
# Fetch the holidays from pypi:holidays set
try:
country_holidays = getattr(holidays, country)(years=year_list)
except AttributeError:
raise AttributeError(f"Holidays in {country} are not currently supported!")
country_df = pd.DataFrame({
cst.EVENT_DF_DATE_COL: list(country_holidays.keys()),
cst.EVENT_DF_LABEL_COL: list(country_holidays.values())})
country_df[cst.EVENT_DF_DATE_COL] = pd.to_datetime(country_df[cst.EVENT_DF_DATE_COL])
country_holiday_dict[country] = country_df
return country_holiday_dict
def get_available_holiday_lookup_countries(countries=None):
"""Returns list of available countries for modeling holidays
:param countries: List[str]
only look for available countries in this set
:return: List[str]
list of available countries for modeling holidays
"""
fb_countries = [
name for name, obj in inspect.getmembers(fbholidays)
if inspect.isclass(obj) and obj.__module__ == fbholidays.__name__]
holidays_countries = [
name for name, obj in inspect.getmembers(holidays)
if inspect.isclass(obj) and obj.__module__ == holidays.__name__]
all_countries = set(fb_countries + holidays_countries)
if countries is not None:
countries = set(countries)
found_countries = all_countries.intersection(countries)
else:
found_countries = all_countries
found_countries.discard("HolidayBase") # edge case, remove if found
return sorted(list(found_countries))
def get_available_holidays_in_countries(
countries,
year_start,
year_end):
"""Returns a dictionary mapping each country to its holidays
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: Dict[str, List[str]]
key: country name
value: list of holidays in that country between [year_start, year_end]
"""
country_holiday_dict = get_holidays(countries, year_start, year_end)
country_holiday_list = {country: list(sorted(set(df[cst.EVENT_DF_LABEL_COL].values)))
for country, df in country_holiday_dict.items()}
return country_holiday_list
def get_available_holidays_across_countries(
countries,
year_start,
year_end):
"""Returns a list of holidays that occur any of the countries
between the years specified.
:param countries: List[str]
countries for which we need holidays
:param year_start: int
first year of interest
:param year_end: int
last year of interest
:return: List[str]
names of holidays in any of the countries between [year_start, year_end]
"""
country_holiday_list = get_available_holidays_in_countries(
countries=countries,
year_start=year_start,
year_end=year_end)
holiday_across_countries = {
holiday for country, holiday_list in country_holiday_list.items()
for holiday in holiday_list}
return list(sorted(holiday_across_countries))
def add_daily_events(
df,
event_df_dict,
date_col=cst.EVENT_DF_DATE_COL,
regular_day_label=cst.EVENT_DEFAULT):
"""For each key of event_df_dict, it adds a new column to a data frame (df)
with a date column (date_col).
Each new column will represent the events given for that key.
Notes
-----
As a side effect, the columns in ``event_df_dict`` are renamed.
Parameters
----------
df : `pandas.DataFrame`
The data frame which has a date column.
event_df_dict : `dict` [`str`, `pandas.DataFrame`]
A dictionary of data frames, each representing events data
for the corresponding key.
Values are DataFrames with two columns:
- The first column contains the date. Must be at the same
frequency as ``df[date_col]`` for proper join. Must be in a
format recognized by `pandas.to_datetime`.
- The second column contains the event label for each date
date_col : `str`
Column name in ``df`` that contains the dates for joining against
the events in ``event_df_dict``.
regular_day_label : `str`
The label used for regular days which are not "events".
Returns
-------
df_daily_events : `pandas.DataFrame`
An augmented data frame version of df with new label columns --
one for each key of ``event_df_dict``.
"""
df[date_col] = pd.to_datetime(df[date_col])
for label, event_df in event_df_dict.items():
event_df = event_df.copy()
new_col = f"{cst.EVENT_PREFIX}_{label}"
event_df.columns = [date_col, new_col]
event_df[date_col] = pd.to_datetime(event_df[date_col])
df = df.merge(event_df, on=date_col, how="left")
df[new_col] = df[new_col].fillna(regular_day_label)
return df
def add_event_window(
df,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
events_name=""):
"""For a data frame of events with a time_col and label_col
it adds shifted events
prior and after the given events
For example if the event data frame includes the row
'2019-12-25, Christmas'
the function will produce dataframes with the events:
'2019-12-24, Christmas' and '2019-12-26, Christmas'
if pre_num and post_num are 1 or more.
:param df: pd.DataFrame
the events data frame with two columns 'time_col' and 'label_col'
:param time_col: str
The column with the timestamp of the events.
This can be daily but does not have to
:param label_col: str
the column with labels for the events
:param time_delta: str
the amount of the shift for each unit specified by a string
e.g. "1D" stands for one day delta
:param pre_num: int
the number of events to be added prior to the given event for each event in df
:param post_num: int
the number of events to be added after to the given event for each event in df
:param events_name: str
for each shift, we generate a new data frame
and those data frames will be stored in a dictionary with appropriate keys.
Each key starts with "events_name"
and follow up with:
"_minus_1", "_minus_2", "_plus_1", "_plus_2", ...
depending on pre_num and post_num
:return: dict[key: pd.Dataframe]
A dictionary of dataframes for each needed shift.
For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
df_dict = {}
pd_time_delta = pd.to_timedelta(time_delta)
for num in range(pre_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] - (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_minus_" + f"{(num + 1):.0f}"] = df0
for num in range(post_num):
df0 = pd.DataFrame()
df0[time_col] = df[time_col] + (num + 1) * pd_time_delta
df0[label_col] = df[label_col]
df_dict[events_name + "_plus_" + f"{(num + 1):.0f}"] = df0
return df_dict
def get_evenly_spaced_changepoints_values(
df,
continuous_time_col="ct1",
n_changepoints=2):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:param n_changepoints: int
number of changepoints requested
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
if not n_changepoints > 0:
raise ValueError("n_changepoints must be > 0")
n = df.shape[0]
n_steps = n_changepoints + 1
step_size = n / n_steps
indices = np.floor(np.arange(start=1, stop=n_steps) * step_size)
return df[continuous_time_col][indices].values
def get_evenly_spaced_changepoints_dates(
df,
time_col,
n_changepoints):
"""Partitions interval into n_changepoints + 1 segments,
placing a changepoint at left endpoint of each segment.
The left most segment doesn't get a changepoint.
Changepoints should be determined from training data.
:param df: pd.DataFrame
training dataset. contains continuous_time_col
:param time_col: str
name of time column
:param n_changepoints: int
number of changepoints requested
:return: pd.Series
values of df[time_col] at the changepoints
"""
if not n_changepoints >= 0:
raise ValueError("n_changepoints must be >= 0")
changepoint_indices = np.floor(np.arange(start=1, stop=n_changepoints + 1) * (df.shape[0] / (n_changepoints + 1)))
changepoint_indices = df.index[np.concatenate([[0], changepoint_indices.astype(int)])]
return df.loc[changepoint_indices, time_col]
def get_custom_changepoints_values(
df,
changepoint_dates,
time_col=cst.TIME_COL,
continuous_time_col="ct1"):
"""Returns the values of continuous_time_col at the
requested changepoint_dates.
:param df: pd.DataFrame
training dataset. contains continuous_time_col and time_col
:param changepoint_dates: Iterable[Union[int, float, str, datetime]]
Changepoint dates, interpreted by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param continuous_time_col: str
name of continuous time column (e.g. conti_year, ct1)
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
ts = pd.to_datetime(df[time_col])
changepoint_dates = pd.to_datetime(changepoint_dates)
# maps each changepoint to first date >= changepoint in the dataframe
# if there is no such date, the changepoint is dropped (it would not be useful anyway)
changepoint_ts = [ts[ts >= date].min() for date in changepoint_dates if any(ts >= date)]
indices = ts.isin(changepoint_ts)
changepoints = df[indices][continuous_time_col].values
if changepoints.shape[0] == 0:
changepoints = None
return changepoints
def get_changepoint_string(changepoint_dates):
"""Gets proper formatted strings for changepoint dates.
The default format is "_%Y_%m_%d_%H". When necessary, it appends "_%M" or "_%M_%S".
Parameters
----------
changepoint_dates : `list`
List of changepoint dates, parsable by `pandas.to_datetime`.
Returns
-------
date_strings : `list[`str`]`
List of string formatted changepoint dates.
"""
changepoint_dates = list(pd.to_datetime(changepoint_dates))
time_format = "_%Y_%m_%d_%H"
if any([stamp.second != 0 for stamp in changepoint_dates]):
time_format += "_%M_%S"
elif any([stamp.minute != 0 for stamp in changepoint_dates]):
time_format += "_%M"
date_strings = [date.strftime(time_format) for date in changepoint_dates]
return date_strings
def get_changepoint_features(
df,
changepoint_values,
continuous_time_col="ct1",
growth_func=None,
changepoint_dates=None):
"""Returns features for growth terms with continuous time origins at
the changepoint_values (locations) specified
Generates a time series feature for each changepoint:
Let t = continuous_time value, c = changepoint value
Then the changepoint feature value at time point t is
`growth_func(t - c) * I(t >= c)`, where I is the indicator function
This represents growth as a function of time, where the time origin is
the changepoint
In the typical case where growth_func(0) = 0 (has origin at 0),
the total effect of the changepoints is continuous in time.
If `growth_func` is the identity function, and `continuous_time`
represents the year in continuous time, these terms form the basis for a
continuous, piecewise linear curve to the growth trend.
Fitting these terms with linear model, the coefficents represent slope
change at each changepoint
Intended usage
----------
To make predictions (on test set)
Allow growth term as a function of time to change at these points.
Parameters
----------
:param df: pd.Dataframe
The dataset to make predictions. Contains column continuous_time_col.
:param changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col]).
Should be determined from training data
:param continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term
If None, uses "ct1", linear growth
:param growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term
:param changepoint_dates: Optional[list]
List of change point dates, parsable by `pandas.to_datetime`.
:return: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features, 0-indexed
"""
if continuous_time_col is None:
continuous_time_col = "ct1"
if growth_func is None:
def growth_func(x):
return x
if changepoint_dates is not None:
time_postfixes = get_changepoint_string(changepoint_dates)
else:
time_postfixes = [""] * len(changepoint_values)
changepoint_df = pd.DataFrame()
for i, changepoint in enumerate(changepoint_values):
time_feature = np.array(df[continuous_time_col]) - changepoint # shifted time column (t - c_i)
growth_term = np.array([growth_func(max(x, 0)) for x in time_feature]) # growth as a function of time
time_feature_ind = time_feature >= 0 # Indicator(t >= c_i), lets changepoint take effect starting at c_i
new_col = growth_term * time_feature_ind
new_changepoint = pd.Series(new_col, name=f"{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}")
changepoint_df = pd.concat([changepoint_df, new_changepoint], axis=1)
return changepoint_df
def get_changepoint_values_from_config(
changepoints_dict,
time_features_df,
time_col=cst.TIME_COL):
"""Applies the changepoint method specified in `changepoints_dict` to return the changepoint values
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param time_features_df: pd.Dataframe
training dataset. contains column "continuous_time_col"
:param time_col: str
The column name in `time_features_df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
Used only in the "custom" method.
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
changepoint_values = None
if changepoints_dict is not None:
valid_changepoint_methods = ["uniform", "custom"]
changepoint_method = changepoints_dict.get("method")
continuous_time_col = changepoints_dict.get("continuous_time_col")
if changepoint_method is None:
raise Exception("changepoint method must be specified")
if changepoint_method not in valid_changepoint_methods:
raise NotImplementedError(
f"changepoint method {changepoint_method} not recognized. "
f"Must be one of {valid_changepoint_methods}")
if changepoint_method == "uniform":
if changepoints_dict["n_changepoints"] > 0:
params = {"continuous_time_col": continuous_time_col} if continuous_time_col is not None else {}
changepoint_values = get_evenly_spaced_changepoints_values(
df=time_features_df,
n_changepoints=changepoints_dict["n_changepoints"],
**params)
elif changepoint_method == "custom":
params = {}
if time_col is not None:
params["time_col"] = time_col
if continuous_time_col is not None:
params["continuous_time_col"] = continuous_time_col
changepoint_values = get_custom_changepoints_values(
df=time_features_df,
changepoint_dates=changepoints_dict["dates"],
**params)
return changepoint_values
def get_changepoint_features_and_values_from_config(
df,
time_col,
changepoints_dict=None,
origin_for_time_vars=None):
"""Extracts changepoints from changepoint configuration and input data
:param df: pd.DataFrame
Training data. A data frame which includes the timestamp and value columns
:param time_col: str
The column name in `df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param origin_for_time_vars: Optional[float]
The time origin used to create continuous variables for time
:return: Dict[str, any]
Dictionary with the requested changepoints and associated information
changepoint_df: pd.DataFrame, shape (df.shape[0], len(changepoints))
Changepoint features for modeling the training data
changepoint_values: array-like
List of changepoint values (on same scale as df[continuous_time_col])
Can be used to generate changepoints for prediction.
continuous_time_col: Optional[str]
Name of continuous time column in df
growth_func is applied to this column to generate growth term.
If None, uses "ct1", linear growth
Can be used to generate changepoints for prediction.
growth_func: Optional[callable]
Growth function for defining changepoints (scalar -> scalar).
If None, uses identity function to use continuous_time_col directly
as growth term.
Can be used to generate changepoints for prediction.
changepoint_cols: List[str]
Names of the changepoint columns for modeling
"""
# extracts changepoint values
if changepoints_dict is None:
changepoint_values = None
continuous_time_col = None
growth_func = None
else:
if origin_for_time_vars is None:
origin_for_time_vars = get_default_origin_for_time_vars(df, time_col)
time_features_df = build_time_features_df(
df[time_col],
conti_year_origin=origin_for_time_vars)
changepoint_values = get_changepoint_values_from_config(
changepoints_dict=changepoints_dict,
time_features_df=time_features_df,
time_col="datetime") # datetime column generated by `build_time_features_df`
continuous_time_col = changepoints_dict.get("continuous_time_col")
growth_func = changepoints_dict.get("growth_func")
# extracts changepoint column names
if changepoint_values is None:
changepoint_df = None
changepoint_cols = []
else:
if changepoints_dict is None:
changepoint_dates = None
elif changepoints_dict["method"] == "custom":
changepoint_dates = list(pd.to_datetime(changepoints_dict["dates"]))
elif changepoints_dict["method"] == "uniform":
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
).tolist()[1:] # the changepoint features does not include the growth term
else:
changepoint_dates = None
changepoint_df = get_changepoint_features(
df=time_features_df,
changepoint_values=changepoint_values,
continuous_time_col=continuous_time_col,
growth_func=growth_func,
changepoint_dates=changepoint_dates)
changepoint_cols = list(changepoint_df.columns)
return {
"changepoint_df": changepoint_df,
"changepoint_values": changepoint_values,
"continuous_time_col": continuous_time_col,
"growth_func": growth_func,
"changepoint_cols": changepoint_cols
}
def get_changepoint_dates_from_changepoints_dict(
changepoints_dict,
df=None,
time_col=None):
"""Gets the changepoint dates from ``changepoints_dict``
Parameters
----------
changepoints_dict : `dict` or `None`
The ``changepoints_dict`` which is compatible with
`~greykite.algo.forecast.silverkite.forecast_silverkite.SilverkiteForecast.forecast`
df : `pandas.DataFrame` or `None`, default `None`
The data df to put changepoints on.
time_col : `str` or `None`, default `None`
The column name of time column in ``df``.
Returns
-------
changepoint_dates : `list`
List of changepoint dates.
"""
if (changepoints_dict is None
or "method" not in changepoints_dict.keys()
or changepoints_dict["method"] not in ["auto", "uniform", "custom"]):
return None
method = changepoints_dict["method"]
if method == "custom":
# changepoints_dict["dates"] is `Iterable`, converts to list
changepoint_dates = list(changepoints_dict["dates"])
elif method == "uniform":
if df is None or time_col is None:
raise ValueError("When the method of ``changepoints_dict`` is 'uniform', ``df`` and "
"``time_col`` must be provided.")
changepoint_dates = get_evenly_spaced_changepoints_dates(
df=df,
time_col=time_col,
n_changepoints=changepoints_dict["n_changepoints"]
)
# the output is `pandas.Series`, converts to list
changepoint_dates = changepoint_dates.tolist()[1:]
else:
raise ValueError("The method of ``changepoints_dict`` can not be 'auto'. "
"Please specify or detect change points first.")
return changepoint_dates
def add_event_window_multi(
event_df_dict,
time_col,
label_col,
time_delta="1D",
pre_num=1,
post_num=1,
pre_post_num_dict=None):
"""For a given dictionary of events data frames with a time_col and label_col
it adds shifted events prior and after the given events
For example if the event data frame includes the row '2019-12-25, Christmas' as a row
the function will produce dataframes with the events '2019-12-24, Christmas' and '2019-12-26, Christmas' if
pre_num and post_num are 1 or more.
Parameters
----------
event_df_dict: `dict` [`str`, `pandas.DataFrame`]
A dictionary of events data frames
with each having two columns: ``time_col`` and ``label_col``.
time_col: `str`
The column with the timestamp of the events.
This can be daily but does not have to be.
label_col : `str`
The column with labels for the events.
time_delta : `str`, default "1D"
The amount of the shift for each unit specified by a string
e.g. '1D' stands for one day delta
pre_num : `int`, default 1
The number of events to be added prior to the given event for each event in df.
post_num: `int`, default 1
The number of events to be added after to the given event for each event in df.
pre_post_num_dict : `dict` [`str`, (`int`, `int`)] or None, default None
Optionally override ``pre_num`` and ``post_num`` for each key in ``event_df_dict``.
For example, if ``event_df_dict`` has keys "US" and "India", this parameter
can be set to ``pre_post_num_dict = {"US": [1, 3], "India": [1, 2]}``,
denoting that the "US" ``pre_num`` is 1 and ``post_num`` is 3, and "India" ``pre_num`` is 1
and ``post_num`` is 2. Keys not specified by ``pre_post_num_dict`` use the default given by
``pre_num`` and ``post_num``.
Returns
-------
df : `dict` [`str`, `pandas.DataFrame`]
A dictionary of dataframes for each needed shift. For example if pre_num=2 and post_num=3.
2 + 3 = 5 data frames will be stored in the return dictionary.
"""
if pre_post_num_dict is None:
pre_post_num_dict = {}
shifted_df_dict = {}
for event_df_key, event_df in event_df_dict.items():
if event_df_key in pre_post_num_dict.keys():
pre_num0 = pre_post_num_dict[event_df_key][0]
post_num0 = pre_post_num_dict[event_df_key][1]
else:
pre_num0 = pre_num
post_num0 = post_num
df_dict0 = add_event_window(
df=event_df,
time_col=time_col,
label_col=label_col,
time_delta=time_delta,
pre_num=pre_num0,
post_num=post_num0,
events_name=event_df_key)
shifted_df_dict.update(df_dict0)
return shifted_df_dict
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None):
"""Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn
:param k: int
fourier term
:param col_name: str
column in the dataframe used to generate fourier series
:param function_name: str
sin or cos
:param seas_name: strcols_interact
appended to new column names added for fourier terms
:return: str
column name in DataFrame returned by fourier_series_fcn
"""
# patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms
name = f"{function_name}{k:.0f}_{col_name}"
if seas_name is not None:
name = f"{name}_{seas_name}"
return name
def fourier_series_fcn(col_name, period=1.0, order=1, seas_name=None):
"""Generates a function which creates fourier series matrix for a column of an input df
:param col_name: str
is the column name in the dataframe which is to be used for
generating fourier series. It needs to be a continuous variable.
:param period: float
the period of the fourier series
:param order: int
the order of the fourier series
:param seas_name: Optional[str]
appended to new column names added for fourier terms.
Useful to distinguish multiple fourier
series on same col_name with different periods.
:return: callable
a function which can be applied to any data.frame df
with a column name being equal to col_name
"""
def fs_func(df):
out_df = pd.DataFrame()
out_cols = []
if col_name not in df.columns:
raise ValueError("The data frame does not have the column: " + col_name)
x = df[col_name]
x = np.array(x)
for i in range(order):
k = i + 1
sin_col_name = get_fourier_col_name(
k,
col_name,
function_name="sin",
seas_name=seas_name)
cos_col_name = get_fourier_col_name(
k,
col_name,
function_name="cos",
seas_name=seas_name)
out_cols.append(sin_col_name)
out_cols.append(cos_col_name)
omega = 2 * math.pi / period
u = omega * k * x
out_df[sin_col_name] = np.sin(u)
out_df[cos_col_name] = np.cos(u)
return {"df": out_df, "cols": out_cols}
return fs_func
def fourier_series_multi_fcn(
col_names,
periods=None,
orders=None,
seas_names=None):
"""Generates a func which adds multiple fourier series with multiple periods.
Parameters
----------
col_names : `list` [`str`]
the column names which are to be used to generate Fourier series.
Each column can have its own period and order.
periods: `list` [`float`] or None
the periods corresponding to each column given in col_names
orders : `list` [`int`] or None
the orders for each of the Fourier series
seas_names : `list` [`str`] or None
Appended to the Fourier series name.
If not provided (None) col_names will be used directly.
"""
k = len(col_names)
if periods is None:
periods = [1.0] * k
if orders is None:
orders = [1] * k
if len(periods) != len(orders):
raise ValueError("periods and orders must have the same length.")
def fs_multi_func(df):
out_df = None
out_cols = []
for i in range(k):
col_name = col_names[i]
period = periods[i]
order = orders[i]
seas_name = None
if seas_names is not None:
seas_name = seas_names[i]
func0 = fourier_series_fcn(
col_name=col_name,
period=period,
order=order,
seas_name=seas_name)
res = func0(df)
fs_df = res["df"]
fs_cols = res["cols"]
out_df = pd.concat([out_df, fs_df], axis=1)
out_cols = out_cols + fs_cols
return {"df": out_df, "cols": out_cols}
return fs_multi_func
def signed_pow(x, y):
""" Takes the absolute value of x and raises it to power of y.
Then it multiplies the result by sign of x.
This guarantees this function is non-decreasing.
This is useful in many contexts e.g. statistical modeling.
:param x: the base number which can be any real number
:param y: the power which can be any real number
:return: returns abs(x) to power of y multiplied by sign of x
"""
return np.sign(x) * np.power(np.abs(x), y)
def signed_pow_fcn(y):
return lambda x: signed_pow(x, y)
signed_sqrt = signed_pow_fcn(1 / 2)
signed_sq = signed_pow_fcn(2)
def logistic(x, growth_rate=1.0, capacity=1.0, floor=0.0, inflection_point=0.0):
"""Evaluates the logistic function at x with the specified growth rate,
capacity, floor, and inflection point.
:param x: value to evaluate the logistic function
:type x: float
:param growth_rate: growth rate
:type growth_rate: float
:param capacity: max value (carrying capacity)
:type capacity: float
:param floor: min value (lower bound)
:type floor: float
:param inflection_point: the t value of the inflection point
:type inflection_point: float
:return: value of the logistic function at t
:rtype: float
"""
return floor + capacity * expit(growth_rate * (x - inflection_point))
def get_logistic_func(growth_rate=1.0, capacity=1.0, floor=0.0, inflection_point=0.0):
"""Returns a function that evaluates the logistic function at t with the
specified growth rate, capacity, floor, and inflection point.
f(x) = floor + capacity / (1 + exp(-growth_rate * (x - inflection_point)))
:param growth_rate: growth rate
:type growth_rate: float
:param capacity: max value (carrying capacity)
:type capacity: float
:param floor: min value (lower bound)
:type floor: float
:param inflection_point: the t value of the inflection point
:type inflection_point: float
:return: the logistic function with specified parameters
:rtype: callable
"""
return lambda t: logistic(t, growth_rate, capacity, floor, inflection_point)
|
[
"numpy.abs",
"pandas.DatetimeIndex",
"numpy.sin",
"numpy.arange",
"inspect.getmembers",
"pandas.DataFrame",
"warnings.simplefilter",
"inspect.isclass",
"warnings.catch_warnings",
"pandas.concat",
"datetime.datetime",
"scipy.special.expit",
"pandas.to_timedelta",
"pandas.to_datetime",
"pandas.Series",
"numpy.cos",
"numpy.array",
"pandas.tseries.offsets.QuarterBegin",
"numpy.sign"
] |
[((2738, 2769), 'pandas.to_datetime', 'pd.to_datetime', (['df[time_col][0]'], {}), '(df[time_col][0])\n', (2752, 2769), True, 'import pandas as pd\n'), ((6696, 6716), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dt'], {}), '(dt)\n', (6712, 6716), True, 'import pandas as pd\n'), ((11951, 11978), 'pandas.DataFrame', 'pd.DataFrame', (['features_dict'], {}), '(features_dict)\n', (11963, 11978), True, 'import pandas as pd\n'), ((12559, 12591), 'pandas.concat', 'pd.concat', (['[df, time_df]'], {'axis': '(1)'}), '([df, time_df], axis=1)\n', (12568, 12591), True, 'import pandas as pd\n'), ((18829, 18857), 'pandas.to_datetime', 'pd.to_datetime', (['df[date_col]'], {}), '(df[date_col])\n', (18843, 18857), True, 'import pandas as pd\n'), ((21067, 21094), 'pandas.to_timedelta', 'pd.to_timedelta', (['time_delta'], {}), '(time_delta)\n', (21082, 21094), True, 'import pandas as pd\n'), ((24501, 24529), 'pandas.to_datetime', 'pd.to_datetime', (['df[time_col]'], {}), '(df[time_col])\n', (24515, 24529), True, 'import pandas as pd\n'), ((24554, 24587), 'pandas.to_datetime', 'pd.to_datetime', (['changepoint_dates'], {}), '(changepoint_dates)\n', (24568, 24587), True, 'import pandas as pd\n'), ((28436, 28450), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28448, 28450), True, 'import pandas as pd\n'), ((7166, 7214), 'pandas.tseries.offsets.QuarterBegin', 'pd.tseries.offsets.QuarterBegin', ([], {'startingMonth': '(1)'}), '(startingMonth=1)\n', (7197, 7214), True, 'import pandas as pd\n'), ((14607, 14656), 'pandas.to_datetime', 'pd.to_datetime', (['country_df[cst.EVENT_DF_DATE_COL]'], {}), '(country_df[cst.EVENT_DF_DATE_COL])\n', (14621, 14656), True, 'import pandas as pd\n'), ((19067, 19101), 'pandas.to_datetime', 'pd.to_datetime', (['event_df[date_col]'], {}), '(event_df[date_col])\n', (19081, 19101), True, 'import pandas as pd\n'), ((21140, 21154), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21152, 21154), True, 'import pandas as pd\n'), ((21374, 21388), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21386, 21388), True, 'import pandas as pd\n'), ((25506, 25539), 'pandas.to_datetime', 'pd.to_datetime', (['changepoint_dates'], {}), '(changepoint_dates)\n', (25520, 25539), True, 'import pandas as pd\n'), ((28912, 28990), 'pandas.Series', 'pd.Series', (['new_col'], {'name': 'f"""{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}"""'}), "(new_col, name=f'{cst.CHANGEPOINT_COL_PREFIX}{i}{time_postfixes[i]}')\n", (28921, 28990), True, 'import pandas as pd\n'), ((29016, 29068), 'pandas.concat', 'pd.concat', (['[changepoint_df, new_changepoint]'], {'axis': '(1)'}), '([changepoint_df, new_changepoint], axis=1)\n', (29025, 29068), True, 'import pandas as pd\n'), ((44484, 44498), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (44496, 44498), True, 'import pandas as pd\n'), ((44683, 44694), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (44691, 44694), True, 'import numpy as np\n'), ((47575, 47585), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (47582, 47585), True, 'import numpy as np\n'), ((15095, 15125), 'inspect.getmembers', 'inspect.getmembers', (['fbholidays'], {}), '(fbholidays)\n', (15113, 15125), False, 'import inspect\n'), ((15258, 15286), 'inspect.getmembers', 'inspect.getmembers', (['holidays'], {}), '(holidays)\n', (15276, 15286), False, 'import inspect\n'), ((22480, 22512), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': 'n_steps'}), '(start=1, stop=n_steps)\n', (22489, 22512), True, 'import numpy as np\n'), ((23334, 23377), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': '(n_changepoints + 1)'}), '(start=1, stop=n_changepoints + 1)\n', (23343, 23377), True, 'import numpy as np\n'), ((28531, 28564), 'numpy.array', 'np.array', (['df[continuous_time_col]'], {}), '(df[continuous_time_col])\n', (28539, 28564), True, 'import numpy as np\n'), ((45275, 45284), 'numpy.sin', 'np.sin', (['u'], {}), '(u)\n', (45281, 45284), True, 'import numpy as np\n'), ((45320, 45329), 'numpy.cos', 'np.cos', (['u'], {}), '(u)\n', (45326, 45329), True, 'import numpy as np\n'), ((46971, 47005), 'pandas.concat', 'pd.concat', (['[out_df, fs_df]'], {'axis': '(1)'}), '([out_df, fs_df], axis=1)\n', (46980, 47005), True, 'import pandas as pd\n'), ((47597, 47606), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (47603, 47606), True, 'import numpy as np\n'), ((48433, 48476), 'scipy.special.expit', 'expit', (['(growth_rate * (x - inflection_point))'], {}), '(growth_rate * (x - inflection_point))\n', (48438, 48476), False, 'from scipy.special import expit\n'), ((2000, 2025), 'datetime.datetime', 'datetime', (['dt.year', '(12)', '(31)'], {}), '(dt.year, 12, 31)\n', (2008, 2025), False, 'from datetime import datetime\n'), ((10327, 10341), 'pandas.Series', 'pd.Series', (['dow'], {}), '(dow)\n', (10336, 10341), True, 'import pandas as pd\n'), ((10519, 10537), 'pandas.Series', 'pd.Series', (['str_dow'], {}), '(str_dow)\n', (10528, 10537), True, 'import pandas as pd\n'), ((15137, 15157), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (15152, 15157), False, 'import inspect\n'), ((15298, 15318), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (15313, 15318), False, 'import inspect\n'), ((7386, 7407), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""1D"""'], {}), "('1D')\n", (7401, 7407), True, 'import pandas as pd\n'), ((13817, 13842), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (13840, 13842), False, 'import warnings\n'), ((13864, 13895), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (13885, 13895), False, 'import warnings\n'), ((37123, 37165), 'pandas.to_datetime', 'pd.to_datetime', (["changepoints_dict['dates']"], {}), "(changepoints_dict['dates'])\n", (37137, 37165), True, 'import pandas as pd\n')]
|
# coding: utf-8
# In[2]:
#start of code
#importing packages
import numpy as np
import scipy.signal as sp
import matplotlib.pyplot as plt
# In[3]:
def time_domain_output(f,H,t_start,t_end):
t = np.linspace(t_start,t_end,10*(t_end-t_start))
t2,y,svec=sp.lsim(H,f,t)
return y
# In[4]:
t_start = 0
t_end = 100
t = np.linspace(t_start,t_end,10*(t_end-t_start))
f1 = np.cos(1.5*t) * np.exp(-0.5*t)
#d2y + 2.25y = x
H=sp.lti([1],[1,0,2.25])
# In[5]:
y1 = time_domain_output(f1,H,t_start,t_end)
# In[14]:
plt.plot(t,y1)
plt.xlabel(r"t ---------->",size=15)
plt.ylabel(r"x ---------->",size=15)
plt.title(r"System with decay = 0.5",size=20)
plt.show()
# In[16]:
f2 = np.cos(1.5*t) * np.exp(-0.05*t)
# In[17]:
y2 = time_domain_output(f2,H,t_start,t_end)
# In[18]:
plt.plot(t,y2)
plt.xlabel(r"t ---------->",size=15)
plt.ylabel(r"x ---------->",size=15)
plt.title(r"System with decay = 0.05",size=20)
plt.show()
# In[19]:
def input(freq,damp_fac):
t = np.linspace(0,100,1000)
return np.cos(freq*t) * np.exp(-damp_fac*t)
# In[25]:
n=5
t = np.linspace(0,100,1000)
freq_range =np.linspace(1.4,1.6,n)
for freq in freq_range:
plt.plot(t,time_domain_output(input(freq,0.05),H,0,100))
plt.xlabel("t -------->",size=15)
plt.ylabel("x -------->",size =15)
plt.title(r"System response with Different Frequencies",size=15)
plt.legend(["Freq = ${:.2f}$".format(f) for f in freq_range])
plt.show()
# In[62]:
w,S,phi=H.bode()
plt.semilogx(w,S)
plt.plot(1.5,28,"ro",label=r"Resonance Frequency")
plt.title(r"Magnitude Bode plot with resonance freq = 1.5",size=14)
plt.xlabel(r"Freq in rad/s log(w) -------->",size=15)
plt.ylabel("Mag in dB -------->",size =15)
plt.legend()
plt.show()
plt.semilogx(w,phi)
#plt.plot(1.5,28,"ro",label=r"Resonance Frequency")
plt.title(r"Phase Bode plot with resonance freq = 1.5",size=14)
plt.xlabel(r"Freq in rad/s log(w) -------->",size=15)
plt.ylabel("Phase in degrees -------->",size =15)
plt.show()
# In[11]:
#eqn1 -- dx2 + x-y = 0
#Eqn2 --dy2 + 2(y-x) = 0
# In[52]:
#form eqn1 y = dx2 + x
#eq2 -- dx4+3dx2=0
xs = sp.lti([1,0,2],[1,0,3,0])
ys = sp.lti([2],[1,0,3,0])
# In[53]:
t = np.linspace(0,20,200)
# In[54]:
t1,x = sp.impulse(xs,None,t)
t2,y = sp.impulse(ys,None,t)
# In[72]:
plt.plot(t1,x,label=r"x(t)")
plt.plot(t2,y,label=r"y(t)")
plt.legend()
plt.xlabel("t ---------------->",size=15)
plt.title("Coupled Equation Response",size=15)
plt.show()
# In[77]:
H_circ1 = sp.lti(np.poly1d([10**12]),np.poly1d([1,10**8,10**12]))
w1,S1,phi1=H_circ1.bode()
plt.semilogx(w1,S1)
plt.xlabel("Frequency in rad/s",size=15)
plt.ylabel("Magnitude in dB",size=15)
plt.title("Magnitude plot",size=15)
plt.grid(True)
plt.show()
plt.semilogx(w1,phi1)
plt.xlabel("Frequency in rad/s",size=15)
plt.ylabel("Phase in degrees",size=15)
plt.title("Phase plot",size=15)
plt.grid(True)
plt.show()
# In[79]:
t_steady = np.linspace(0,10**-2,10**5)
in_steady = np.cos(10**3 * t_steady) - np.cos(10**6 * t_steady)
# In[80]:
t1,y_steady,svec1=sp.lsim(H_circ1,in_steady,t_steady)
# In[91]:
plt.plot(t1,y_steady)
plt.title("Steady state Response")
plt.ylabel(r"$V_{o}(t) --->$",size=15)
plt.xlabel(r"$t --->$",size=15)
plt.show()
# In[93]:
t_trans = np.linspace(0,35*10**-6,30*10**2+1)
in_trans = np.cos(10**3 * t_trans) - np.cos(10**6 * t_trans)
# In[94]:
t2,y_trans,svec2 = sp.lsim(H_circ1,in_trans,t_trans)
# In[95]:
plt.plot(t2,y_trans)
plt.title("Transient Response")
plt.ylabel(r"$V_{o}(t) --->$",size=15)
plt.xlabel(r"$t --->$",size=15)
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.signal.impulse",
"matplotlib.pyplot.legend",
"scipy.signal.lsim",
"matplotlib.pyplot.grid",
"numpy.exp",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.semilogx",
"scipy.signal.lti"
] |
[((340, 391), 'numpy.linspace', 'np.linspace', (['t_start', 't_end', '(10 * (t_end - t_start))'], {}), '(t_start, t_end, 10 * (t_end - t_start))\n', (351, 391), True, 'import numpy as np\n'), ((441, 466), 'scipy.signal.lti', 'sp.lti', (['[1]', '[1, 0, 2.25]'], {}), '([1], [1, 0, 2.25])\n', (447, 466), True, 'import scipy.signal as sp\n'), ((535, 550), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y1'], {}), '(t, y1)\n', (543, 550), True, 'import matplotlib.pyplot as plt\n'), ((550, 596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t ---------->"""'], {'size': '(15)'}), "('t ---------->', size=15)\n", (560, 596), True, 'import matplotlib.pyplot as plt\n'), ((597, 644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x ---------->"""'], {'size': '(15)'}), "('x ---------->', size=15)\n", (607, 644), True, 'import matplotlib.pyplot as plt\n'), ((645, 690), 'matplotlib.pyplot.title', 'plt.title', (['"""System with decay = 0.5"""'], {'size': '(20)'}), "('System with decay = 0.5', size=20)\n", (654, 690), True, 'import matplotlib.pyplot as plt\n'), ((691, 701), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (699, 701), True, 'import matplotlib.pyplot as plt\n'), ((825, 840), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y2'], {}), '(t, y2)\n', (833, 840), True, 'import matplotlib.pyplot as plt\n'), ((840, 886), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t ---------->"""'], {'size': '(15)'}), "('t ---------->', size=15)\n", (850, 886), True, 'import matplotlib.pyplot as plt\n'), ((887, 934), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x ---------->"""'], {'size': '(15)'}), "('x ---------->', size=15)\n", (897, 934), True, 'import matplotlib.pyplot as plt\n'), ((935, 981), 'matplotlib.pyplot.title', 'plt.title', (['"""System with decay = 0.05"""'], {'size': '(20)'}), "('System with decay = 0.05', size=20)\n", (944, 981), True, 'import matplotlib.pyplot as plt\n'), ((982, 992), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (990, 992), True, 'import matplotlib.pyplot as plt\n'), ((1136, 1161), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (1147, 1161), True, 'import numpy as np\n'), ((1172, 1196), 'numpy.linspace', 'np.linspace', (['(1.4)', '(1.6)', 'n'], {}), '(1.4, 1.6, n)\n', (1183, 1196), True, 'import numpy as np\n'), ((1280, 1320), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t -------->"""'], {'size': '(15)'}), "('t -------->', size=15)\n", (1290, 1320), True, 'import matplotlib.pyplot as plt\n'), ((1320, 1360), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x -------->"""'], {'size': '(15)'}), "('x -------->', size=15)\n", (1330, 1360), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1425), 'matplotlib.pyplot.title', 'plt.title', (['"""System response with Different Frequencies"""'], {'size': '(15)'}), "('System response with Different Frequencies', size=15)\n", (1370, 1425), True, 'import matplotlib.pyplot as plt\n'), ((1488, 1498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1496, 1498), True, 'import matplotlib.pyplot as plt\n'), ((1530, 1548), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w', 'S'], {}), '(w, S)\n', (1542, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1600), 'matplotlib.pyplot.plot', 'plt.plot', (['(1.5)', '(28)', '"""ro"""'], {'label': '"""Resonance Frequency"""'}), "(1.5, 28, 'ro', label='Resonance Frequency')\n", (1556, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1666), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnitude Bode plot with resonance freq = 1.5"""'], {'size': '(14)'}), "('Magnitude Bode plot with resonance freq = 1.5', size=14)\n", (1608, 1666), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq in rad/s log(w) -------->"""'], {'size': '(15)'}), "('Freq in rad/s log(w) -------->', size=15)\n", (1677, 1726), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1775), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mag in dB -------->"""'], {'size': '(15)'}), "('Mag in dB -------->', size=15)\n", (1737, 1775), True, 'import matplotlib.pyplot as plt\n'), ((1776, 1788), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1786, 1788), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1799), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1797, 1799), True, 'import matplotlib.pyplot as plt\n'), ((1800, 1820), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w', 'phi'], {}), '(w, phi)\n', (1812, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1935), 'matplotlib.pyplot.title', 'plt.title', (['"""Phase Bode plot with resonance freq = 1.5"""'], {'size': '(14)'}), "('Phase Bode plot with resonance freq = 1.5', size=14)\n", (1881, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1936, 1995), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Freq in rad/s log(w) -------->"""'], {'size': '(15)'}), "('Freq in rad/s log(w) -------->', size=15)\n", (1946, 1995), True, 'import matplotlib.pyplot as plt\n'), ((1996, 2051), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phase in degrees -------->"""'], {'size': '(15)'}), "('Phase in degrees -------->', size=15)\n", (2006, 2051), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2062), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2060, 2062), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2217), 'scipy.signal.lti', 'sp.lti', (['[1, 0, 2]', '[1, 0, 3, 0]'], {}), '([1, 0, 2], [1, 0, 3, 0])\n', (2192, 2217), True, 'import scipy.signal as sp\n'), ((2217, 2242), 'scipy.signal.lti', 'sp.lti', (['[2]', '[1, 0, 3, 0]'], {}), '([2], [1, 0, 3, 0])\n', (2223, 2242), True, 'import scipy.signal as sp\n'), ((2257, 2280), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(200)'], {}), '(0, 20, 200)\n', (2268, 2280), True, 'import numpy as np\n'), ((2300, 2323), 'scipy.signal.impulse', 'sp.impulse', (['xs', 'None', 't'], {}), '(xs, None, t)\n', (2310, 2323), True, 'import scipy.signal as sp\n'), ((2329, 2352), 'scipy.signal.impulse', 'sp.impulse', (['ys', 'None', 't'], {}), '(ys, None, t)\n', (2339, 2352), True, 'import scipy.signal as sp\n'), ((2365, 2394), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'x'], {'label': '"""x(t)"""'}), "(t1, x, label='x(t)')\n", (2373, 2394), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2423), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'y'], {'label': '"""y(t)"""'}), "(t2, y, label='y(t)')\n", (2402, 2423), True, 'import matplotlib.pyplot as plt\n'), ((2423, 2435), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2433, 2435), True, 'import matplotlib.pyplot as plt\n'), ((2436, 2495), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t ---------------->"""'], {'size': '(15)'}), "('t ---------------->', size=15)\n", (2446, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2542), 'matplotlib.pyplot.title', 'plt.title', (['"""Coupled Equation Response"""'], {'size': '(15)'}), "('Coupled Equation Response', size=15)\n", (2504, 2542), True, 'import matplotlib.pyplot as plt\n'), ((2542, 2552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2550, 2552), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2679), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w1', 'S1'], {}), '(w1, S1)\n', (2671, 2679), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2720), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency in rad/s"""'], {'size': '(15)'}), "('Frequency in rad/s', size=15)\n", (2689, 2720), True, 'import matplotlib.pyplot as plt\n'), ((2720, 2758), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude in dB"""'], {'size': '(15)'}), "('Magnitude in dB', size=15)\n", (2730, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2794), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnitude plot"""'], {'size': '(15)'}), "('Magnitude plot', size=15)\n", (2767, 2794), True, 'import matplotlib.pyplot as plt\n'), ((2794, 2808), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2802, 2808), True, 'import matplotlib.pyplot as plt\n'), ((2809, 2819), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2817, 2819), True, 'import matplotlib.pyplot as plt\n'), ((2820, 2842), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['w1', 'phi1'], {}), '(w1, phi1)\n', (2832, 2842), True, 'import matplotlib.pyplot as plt\n'), ((2842, 2883), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency in rad/s"""'], {'size': '(15)'}), "('Frequency in rad/s', size=15)\n", (2852, 2883), True, 'import matplotlib.pyplot as plt\n'), ((2883, 2922), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phase in degrees"""'], {'size': '(15)'}), "('Phase in degrees', size=15)\n", (2893, 2922), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2954), 'matplotlib.pyplot.title', 'plt.title', (['"""Phase plot"""'], {'size': '(15)'}), "('Phase plot', size=15)\n", (2931, 2954), True, 'import matplotlib.pyplot as plt\n'), ((2954, 2968), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2962, 2968), True, 'import matplotlib.pyplot as plt\n'), ((2969, 2979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2977, 2979), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3038), 'numpy.linspace', 'np.linspace', (['(0)', '(10 ** -2)', '(10 ** 5)'], {}), '(0, 10 ** -2, 10 ** 5)\n', (3016, 3038), True, 'import numpy as np\n'), ((3129, 3166), 'scipy.signal.lsim', 'sp.lsim', (['H_circ1', 'in_steady', 't_steady'], {}), '(H_circ1, in_steady, t_steady)\n', (3136, 3166), True, 'import scipy.signal as sp\n'), ((3179, 3201), 'matplotlib.pyplot.plot', 'plt.plot', (['t1', 'y_steady'], {}), '(t1, y_steady)\n', (3187, 3201), True, 'import matplotlib.pyplot as plt\n'), ((3201, 3235), 'matplotlib.pyplot.title', 'plt.title', (['"""Steady state Response"""'], {}), "('Steady state Response')\n", (3210, 3235), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3276), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$V_{o}(t) --->$"""'], {'size': '(15)'}), "('$V_{o}(t) --->$', size=15)\n", (3246, 3276), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t --->$"""'], {'size': '(15)'}), "('$t --->$', size=15)\n", (3287, 3309), True, 'import matplotlib.pyplot as plt\n'), ((3310, 3320), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3318, 3320), True, 'import matplotlib.pyplot as plt\n'), ((3345, 3392), 'numpy.linspace', 'np.linspace', (['(0)', '(35 * 10 ** -6)', '(30 * 10 ** 2 + 1)'], {}), '(0, 35 * 10 ** -6, 30 * 10 ** 2 + 1)\n', (3356, 3392), True, 'import numpy as np\n'), ((3475, 3510), 'scipy.signal.lsim', 'sp.lsim', (['H_circ1', 'in_trans', 't_trans'], {}), '(H_circ1, in_trans, t_trans)\n', (3482, 3510), True, 'import scipy.signal as sp\n'), ((3523, 3544), 'matplotlib.pyplot.plot', 'plt.plot', (['t2', 'y_trans'], {}), '(t2, y_trans)\n', (3531, 3544), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3575), 'matplotlib.pyplot.title', 'plt.title', (['"""Transient Response"""'], {}), "('Transient Response')\n", (3553, 3575), True, 'import matplotlib.pyplot as plt\n'), ((3576, 3616), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$V_{o}(t) --->$"""'], {'size': '(15)'}), "('$V_{o}(t) --->$', size=15)\n", (3586, 3616), True, 'import matplotlib.pyplot as plt\n'), ((3617, 3649), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t --->$"""'], {'size': '(15)'}), "('$t --->$', size=15)\n", (3627, 3649), True, 'import matplotlib.pyplot as plt\n'), ((3650, 3660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3658, 3660), True, 'import matplotlib.pyplot as plt\n'), ((206, 257), 'numpy.linspace', 'np.linspace', (['t_start', 't_end', '(10 * (t_end - t_start))'], {}), '(t_start, t_end, 10 * (t_end - t_start))\n', (217, 257), True, 'import numpy as np\n'), ((266, 282), 'scipy.signal.lsim', 'sp.lsim', (['H', 'f', 't'], {}), '(H, f, t)\n', (273, 282), True, 'import scipy.signal as sp\n'), ((391, 406), 'numpy.cos', 'np.cos', (['(1.5 * t)'], {}), '(1.5 * t)\n', (397, 406), True, 'import numpy as np\n'), ((407, 423), 'numpy.exp', 'np.exp', (['(-0.5 * t)'], {}), '(-0.5 * t)\n', (413, 423), True, 'import numpy as np\n'), ((721, 736), 'numpy.cos', 'np.cos', (['(1.5 * t)'], {}), '(1.5 * t)\n', (727, 736), True, 'import numpy as np\n'), ((737, 754), 'numpy.exp', 'np.exp', (['(-0.05 * t)'], {}), '(-0.05 * t)\n', (743, 754), True, 'import numpy as np\n'), ((1041, 1066), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(1000)'], {}), '(0, 100, 1000)\n', (1052, 1066), True, 'import numpy as np\n'), ((2584, 2605), 'numpy.poly1d', 'np.poly1d', (['[10 ** 12]'], {}), '([10 ** 12])\n', (2593, 2605), True, 'import numpy as np\n'), ((2604, 2637), 'numpy.poly1d', 'np.poly1d', (['[1, 10 ** 8, 10 ** 12]'], {}), '([1, 10 ** 8, 10 ** 12])\n', (2613, 2637), True, 'import numpy as np\n'), ((3045, 3071), 'numpy.cos', 'np.cos', (['(10 ** 3 * t_steady)'], {}), '(10 ** 3 * t_steady)\n', (3051, 3071), True, 'import numpy as np\n'), ((3072, 3098), 'numpy.cos', 'np.cos', (['(10 ** 6 * t_steady)'], {}), '(10 ** 6 * t_steady)\n', (3078, 3098), True, 'import numpy as np\n'), ((3392, 3417), 'numpy.cos', 'np.cos', (['(10 ** 3 * t_trans)'], {}), '(10 ** 3 * t_trans)\n', (3398, 3417), True, 'import numpy as np\n'), ((3418, 3443), 'numpy.cos', 'np.cos', (['(10 ** 6 * t_trans)'], {}), '(10 ** 6 * t_trans)\n', (3424, 3443), True, 'import numpy as np\n'), ((1077, 1093), 'numpy.cos', 'np.cos', (['(freq * t)'], {}), '(freq * t)\n', (1083, 1093), True, 'import numpy as np\n'), ((1094, 1115), 'numpy.exp', 'np.exp', (['(-damp_fac * t)'], {}), '(-damp_fac * t)\n', (1100, 1115), True, 'import numpy as np\n')]
|
import sys, time, os, json
import numpy as np
import matplotlib.pylab as plt
from PIL import Image
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization
from google.colab import drive
def Unet(img_shape):
def conv2d(x, filters):
x = Conv2D(filters, 4, strides=2, padding='same')(x)
x = LeakyReLU(0.2)(x)
x = InstanceNormalization()(x)
return x
def deconv2d(x, contracting_path, filters, drop_rate=0):
x = UpSampling2D(2)(x)
x = Conv2D(filters, 4, padding='same', activation='relu')(x)
if drop_rate:
x = Dropout(drop_rate)(x)
x = InstanceNormalization()(x)
return Concatenate()([x, contracting_path])
img = Input(img_shape)
#エンコーダー
c1 = conv2d(img, 32)
c2 = conv2d(c1, 64)
c3 = conv2d(c2, 128)
#中間層
x = conv2d(c3, 256)
#デコーダー
x = deconv2d(x, c3, 128)
x = deconv2d(x, c2, 64)
x = deconv2d(x, c1, 32)
#元サイズ出力
x = UpSampling2D(2)(x)
x = Conv2D(img_shape[-1], 4, padding='same', activation='tanh')(x)
return Model(img, x)
def Discriminator(img_shape):
def d_layer(x, filters, bn=True):
x = Conv2D(filters, 4, strides=2, padding='same')(x)
x = LeakyReLU(0.2)(x)
if bn:
x = InstanceNormalization()(x)
return x
img = Input(img_shape)
#PatchGANのサイズまで畳み込み
x = d_layer(img, 64, False)
x = d_layer(x, 128)
x = d_layer(x, 256)
x = d_layer(x, 512)
#0〜1ラベル出力
x = Conv2D(1, 4, padding='same')(x)
return Model(img, x)
def CycleGAN(gen_AB, gen_BA, disc_A, disc_B, img_shape):
img_A = Input(img_shape)
img_B = Input(img_shape)
fake_B = gen_AB(img_A)
fake_A = gen_BA(img_B)
reconstr_A = gen_BA(fake_B)
reconstr_B = gen_AB(fake_A)
img_A_id = gen_BA(img_A)
img_B_id = gen_AB(img_B)
valid_A = disc_A(fake_A)
valid_B = disc_B(fake_B)
return Model([img_A, img_B],
[valid_A, valid_B, reconstr_A, reconstr_B, img_A_id, img_B_id])
def load_datasets(path, train_num, img_shape):
return np.memmap(path, dtype=np.uint8, mode="r", shape=(train_num,)+img_shape)
def get_json(json_name, init_func):
if os.path.isfile(json_name):
with open(json_name) as f:
return json.load(f)
else:
return init_func()
def train():
#ドライブをマウントしてフォルダ作成
drive_root = '/content/drive'
drive.mount(drive_root)
datasets_dir = "%s/My Drive/datasets"%drive_root
train_dir = "%s/My Drive/train/cycle128"%drive_root
imgs_dir = "%s/imgs"%train_dir
os.makedirs(imgs_dir, exist_ok=True)
#教師データ
train_num = 30000
test_num = 6000
img_size = 128
data_num = train_num + test_num
img_shape = (img_size,img_size,3)
train_A = load_datasets("%s/color%d_%d.npy"%(datasets_dir,img_size,data_num), data_num, img_shape)
train_B = load_datasets("%s/gray%d_%d.npy"%(datasets_dir,img_size,data_num), data_num, (img_size,img_size))
#訓練回数
epochs = 200
batch_size = 100
batch_num = train_num // batch_size
#前回までの訓練情報
info_path = "%s/info.json"%train_dir
info = get_json(info_path, lambda: {"epoch":0})
last_epoch = info["epoch"]
#PatchGAN
patch_shape = (img_size//16, img_size//16, 1)
real = np.ones((batch_size,) + patch_shape)
fake = np.zeros((batch_size,) + patch_shape)
#モデル
lambda_cycle = 10.0
lambda_id = 0.1 * lambda_cycle
opt = Adam(0.0002, 0.5)
gen_AB_path = "%s/gen_AB.h5"%train_dir
gen_BA_path = "%s/gen_BA.h5"%train_dir
disc_A_path = "%s/disc_A.h5"%train_dir
disc_B_path = "%s/disc_B.h5"%train_dir
if os.path.isfile(disc_B_path):
gen_AB = load_model(gen_AB_path, custom_objects={'InstanceNormalization': InstanceNormalization})
gen_BA = load_model(gen_BA_path, custom_objects={'InstanceNormalization': InstanceNormalization})
disc_A = load_model(disc_A_path, custom_objects={'InstanceNormalization': InstanceNormalization})
disc_B = load_model(disc_B_path, custom_objects={'InstanceNormalization': InstanceNormalization})
print_img(last_epoch, gen_BA, train_A, train_B, 0, train_num, "train", img_size)
print_img(last_epoch, gen_BA, train_A, train_B, train_num, test_num, "test", img_size)
else:
gen_AB = Unet(img_shape)
gen_BA = Unet(img_shape)
disc_A = Discriminator(img_shape)
disc_B = Discriminator(img_shape)
disc_A.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
disc_B.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
disc_A.trainable = False
disc_B.trainable = False
cycle_gan = CycleGAN(gen_AB, gen_BA, disc_A, disc_B, img_shape)
cycle_gan.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],
loss_weights=[1, 1, lambda_cycle, lambda_cycle, lambda_id, lambda_id], optimizer=opt)
#エポック
for e in range(last_epoch, epochs):
start = time.time()
#ミニバッチ
for i in range(batch_num):
#バッチ範囲をランダム選択
idx = np.random.choice(train_num, batch_size, replace=False)
imgs_A = train_A[idx].astype(np.float32) / 255
idx = np.random.choice(train_num, batch_size, replace=False)
imgs_B = convert_rgb(train_B[idx]).astype(np.float32) / 255
#識別訓練
fake_B = gen_AB.predict(imgs_A)
fake_A = gen_BA.predict(imgs_B)
d_loss_real = disc_A.train_on_batch(imgs_A, real)
d_loss_fake = disc_A.train_on_batch(fake_A, fake)
d_loss_A = np.add(d_loss_real, d_loss_fake) * 0.5
d_loss_real = disc_B.train_on_batch(imgs_B, real)
d_loss_fake = disc_B.train_on_batch(fake_B, fake)
d_loss_B = np.add(d_loss_real, d_loss_fake) * 0.5
d_loss = np.add(d_loss_A, d_loss_B) * 0.5
#生成訓練
g_loss = cycle_gan.train_on_batch([imgs_A, imgs_B],
[real, real, imgs_A, imgs_B, imgs_A, imgs_B])
#ログ
print("\repoch:%d/%d batch:%d/%d %ds d_loss:%s g_loss:%s" %
(e+1,epochs, (i+1),batch_num, (time.time()-start), d_loss[0], g_loss[0]), end="")
sys.stdout.flush()
print()
#画像生成テスト
if (e+1) % 10 == 0 or e == 0:
print_img(e+1, gen_BA, train_A, train_B, 0, train_num, "train", img_size)
print_img(e+1, gen_BA, train_A, train_B, train_num, test_num, "test", img_size)
#重みの保存
gen_AB.save(gen_AB_path)
gen_BA.save(gen_BA_path)
disc_A.save(disc_A_path)
disc_B.save(disc_B_path)
info["epoch"] += 1
with open(info_path, "w") as f:
json.dump(info, f)
def convert_rgb(train_B):
return np.array([np.asarray(Image.fromarray(x).convert("RGB")) for x in train_B])
def print_img(epoch, gen, train_A, train_B, offset, limit, title, img_size):
#データをランダム選択
num = 10
idx = np.random.choice(limit, num, replace=False) + offset
imgs_A = train_A[idx]
imgs_B = convert_rgb(train_B[idx])
#生成してみる
fake_A = gen.predict(imgs_B.astype(np.float32) / 255)
fake_A = (fake_A * 255).clip(0).astype(np.uint8)
#繋げる
imgs_A = np.concatenate(imgs_A, axis=1)
imgs_B = np.concatenate(imgs_B, axis=1)
fake_A = np.concatenate(fake_A, axis=1)
imgs = np.concatenate((imgs_B,imgs_A,fake_A), axis=0)
#プロット
plt.figure(figsize=(20, 6))
plt.title(title)
plt.imshow(imgs)
plt.axis('off')
plt.show()
#保存
Image.fromarray(imgs).save("%s/cycle%d_%d_%s.png"%(imgs_dir,img_size,epoch,title))
#実行
train()
|
[
"matplotlib.pylab.imshow",
"numpy.ones",
"matplotlib.pylab.axis",
"os.path.isfile",
"sys.stdout.flush",
"matplotlib.pylab.title",
"matplotlib.pylab.show",
"matplotlib.pylab.figure",
"numpy.random.choice",
"numpy.add",
"json.dump",
"google.colab.drive.mount",
"numpy.memmap",
"numpy.concatenate",
"json.load",
"os.makedirs",
"numpy.zeros",
"time.time",
"keras_contrib.layers.normalization.instancenormalization.InstanceNormalization",
"PIL.Image.fromarray"
] |
[((2246, 2319), 'numpy.memmap', 'np.memmap', (['path'], {'dtype': 'np.uint8', 'mode': '"""r"""', 'shape': '((train_num,) + img_shape)'}), "(path, dtype=np.uint8, mode='r', shape=(train_num,) + img_shape)\n", (2255, 2319), True, 'import numpy as np\n'), ((2365, 2390), 'os.path.isfile', 'os.path.isfile', (['json_name'], {}), '(json_name)\n', (2379, 2390), False, 'import sys, time, os, json\n'), ((2581, 2604), 'google.colab.drive.mount', 'drive.mount', (['drive_root'], {}), '(drive_root)\n', (2592, 2604), False, 'from google.colab import drive\n'), ((2757, 2793), 'os.makedirs', 'os.makedirs', (['imgs_dir'], {'exist_ok': '(True)'}), '(imgs_dir, exist_ok=True)\n', (2768, 2793), False, 'import sys, time, os, json\n'), ((3476, 3512), 'numpy.ones', 'np.ones', (['((batch_size,) + patch_shape)'], {}), '((batch_size,) + patch_shape)\n', (3483, 3512), True, 'import numpy as np\n'), ((3525, 3562), 'numpy.zeros', 'np.zeros', (['((batch_size,) + patch_shape)'], {}), '((batch_size,) + patch_shape)\n', (3533, 3562), True, 'import numpy as np\n'), ((3847, 3874), 'os.path.isfile', 'os.path.isfile', (['disc_B_path'], {}), '(disc_B_path)\n', (3861, 3874), False, 'import sys, time, os, json\n'), ((7463, 7493), 'numpy.concatenate', 'np.concatenate', (['imgs_A'], {'axis': '(1)'}), '(imgs_A, axis=1)\n', (7477, 7493), True, 'import numpy as np\n'), ((7508, 7538), 'numpy.concatenate', 'np.concatenate', (['imgs_B'], {'axis': '(1)'}), '(imgs_B, axis=1)\n', (7522, 7538), True, 'import numpy as np\n'), ((7553, 7583), 'numpy.concatenate', 'np.concatenate', (['fake_A'], {'axis': '(1)'}), '(fake_A, axis=1)\n', (7567, 7583), True, 'import numpy as np\n'), ((7596, 7644), 'numpy.concatenate', 'np.concatenate', (['(imgs_B, imgs_A, fake_A)'], {'axis': '(0)'}), '((imgs_B, imgs_A, fake_A), axis=0)\n', (7610, 7644), True, 'import numpy as np\n'), ((7659, 7686), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 6)'}), '(figsize=(20, 6))\n', (7669, 7686), True, 'import matplotlib.pylab as plt\n'), ((7692, 7708), 'matplotlib.pylab.title', 'plt.title', (['title'], {}), '(title)\n', (7701, 7708), True, 'import matplotlib.pylab as plt\n'), ((7714, 7730), 'matplotlib.pylab.imshow', 'plt.imshow', (['imgs'], {}), '(imgs)\n', (7724, 7730), True, 'import matplotlib.pylab as plt\n'), ((7736, 7751), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7744, 7751), True, 'import matplotlib.pylab as plt\n'), ((7757, 7767), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7765, 7767), True, 'import matplotlib.pylab as plt\n'), ((5166, 5177), 'time.time', 'time.time', ([], {}), '()\n', (5175, 5177), False, 'import sys, time, os, json\n'), ((7193, 7236), 'numpy.random.choice', 'np.random.choice', (['limit', 'num'], {'replace': '(False)'}), '(limit, num, replace=False)\n', (7209, 7236), True, 'import numpy as np\n'), ((473, 496), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (494, 496), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((757, 780), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (778, 780), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((2448, 2460), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2457, 2460), False, 'import sys, time, os, json\n'), ((5276, 5330), 'numpy.random.choice', 'np.random.choice', (['train_num', 'batch_size'], {'replace': '(False)'}), '(train_num, batch_size, replace=False)\n', (5292, 5330), True, 'import numpy as np\n'), ((5410, 5464), 'numpy.random.choice', 'np.random.choice', (['train_num', 'batch_size'], {'replace': '(False)'}), '(train_num, batch_size, replace=False)\n', (5426, 5464), True, 'import numpy as np\n'), ((6429, 6447), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6445, 6447), False, 'import sys, time, os, json\n'), ((6936, 6954), 'json.dump', 'json.dump', (['info', 'f'], {}), '(info, f)\n', (6945, 6954), False, 'import sys, time, os, json\n'), ((7782, 7803), 'PIL.Image.fromarray', 'Image.fromarray', (['imgs'], {}), '(imgs)\n', (7797, 7803), False, 'from PIL import Image\n'), ((1427, 1450), 'keras_contrib.layers.normalization.instancenormalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (1448, 1450), False, 'from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n'), ((5797, 5829), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (5803, 5829), True, 'import numpy as np\n'), ((5986, 6018), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (5992, 6018), True, 'import numpy as np\n'), ((6047, 6073), 'numpy.add', 'np.add', (['d_loss_A', 'd_loss_B'], {}), '(d_loss_A, d_loss_B)\n', (6053, 6073), True, 'import numpy as np\n'), ((7017, 7035), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (7032, 7035), False, 'from PIL import Image\n'), ((6365, 6376), 'time.time', 'time.time', ([], {}), '()\n', (6374, 6376), False, 'import sys, time, os, json\n')]
|
import argparse
import os
import sys
import cv2
import numpy as np
from matplotlib import pyplot as plt
from functools import cmp_to_key
from fhi_lib.geometry import Point, Line
class DistanceEstimator():
def __init__(self, img):
self.img = img
self.panel_length = 2235
self.scale_length = 100
def initialize(self):
self.__find_scales()
self.__form_reference_points()
self.__shift_accessory_coordinate_init()
print('Estimator initialized')
def initialize_with_pt(self, pt):
self.__find_scales()
self.__form_reference_points()
self.vertical_pt2 = Point(pt)
self.__shift_accessory_coordinate_init()
print('Estimator initialized')
def display_reference_pts(self, img):
img = cv2.circle(img, self.origin.get_point_tuple(), 20, (0,0,0), 3)
img = cv2.circle(img, self.horizontal_pt.get_point_tuple(), 20, (0,255,0), 3)
img = cv2.circle(img, self.vertical_pt.get_point_tuple(), 20, (255,0,0), 3)
img = cv2.circle(img, self.vertical_pt2.get_point_tuple(), 20, (255,0,0), 3)
img = cv2.circle(img, self.origin.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.horizontal_pt.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.vertical_pt.get_point_tuple(), 0, (0,0,255), 3)
img = cv2.circle(img, self.vertical_pt2.get_point_tuple(), 0, (0,0,255), 3)
return img
def estimate(self, pt_itr):
img_intersection = self.__shift_accessory_coordinate(pt_itr)
dist = self.__cross_ratio(img_intersection)
caption = '{}\n'.format(int(dist))
return caption
def __find_scales(self):
### Image Processing, convert rgb to hsv and find the scale by its color ###
blur = cv2.GaussianBlur(self.img, (5,5), 0)
img_hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
img_threshold = cv2.inRange(img_hsv, (45,20,230), (90,220,255))
morphology_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilation = cv2.dilate(img_threshold, morphology_kernel, iterations=3)
thresh = cv2.erode(dilation, morphology_kernel, iterations=3)
'''
compare_img = np.hstack((img_threshold,thresh))
plt.imshow(compare_img)
plt.show()
'''
### Crop the image as we know the scale is always on the left half of the image ###
cropped_thresh = thresh[:, 0:int(thresh.shape[1]/2)]
contours, _ = cv2.findContours(image=cropped_thresh,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_SIMPLE)
### Discard contours that are not quadrilaterals and smaller than 4000 pixels###
result_contours = {}
epsilon = 30
minimal_area = 1000
for contour in contours:
contour_area = cv2.contourArea(contour)
if contour_area > minimal_area:
hull = cv2.convexHull(contour)
approxCurve = cv2.approxPolyDP(hull, epsilon, True)
if len(approxCurve) == 4:
result_contours.update({contour_area : [approxCurve, contour]})
self.__verify_shape(result_contours)
# sort the dictionary based on the size of the area
result_contours = sorted(result_contours.items())
# pick the contour with the largest area as near scale, and the second as far scale
self.near_scale = result_contours[1]
self.far_scale = result_contours[0]
def __verify_shape(self, result_contours):
# For a parallel shape, the length of the two opposite sides should be approximately the same.
tolerance = 0.55
remove_keys = []
for key in result_contours.keys():
pts = result_contours[key][0]
pts = pts[:,0,:]
pt1 = Point(pts[0])
pt2 = Point(pts[1])
pt3 = Point(pts[2])
pt4 = Point(pts[3])
dist1_2 = pt1.get_distance(pt2).astype(np.int)
dist3_4 = pt3.get_distance(pt4).astype(np.int)
dist1_4 = pt1.get_distance(pt4).astype(np.int)
dist2_3 = pt2.get_distance(pt3).astype(np.int)
if np.absolute(dist1_2 - dist3_4) / np.min([dist1_2, dist3_4])> tolerance:
remove_keys.append(key)
continue
elif np.absolute(dist1_4 - dist2_3) / np.min([dist1_4, dist2_3])> tolerance:
remove_keys.append(key)
continue
for remove_key in remove_keys:
del result_contours[remove_key]
def __form_reference_points(self):
self.near_scale[1][0] = self.near_scale[1][0][:,0,:]
self.far_scale[1][0] = self.far_scale[1][0][:,0,:]
self.far_scale[1][0] = self.__set_orientation_hull(self.far_scale[1][0])
self.near_scale[1][0] = self.__set_orientation_hull(self.near_scale[1][0])
self.origin = Point(self.near_scale[1][0][1])
self.vertical_pt = Point(self.near_scale[1][0][0])
self.horizontal_pt = Point(self.near_scale[1][0][3])
self.vertical_pt2 = Point(self.far_scale[1][0][0])
def __set_orientation_hull(self, scale):
# Assuming the scale is placed on the left half of the image.
# The first vertex should be top left. If it's not the case, then reorder the verticies.
order = scale[:,0].argsort()
if order[0].astype(int) == 0:
## 1 2 ##
## 0 3 ##
# The first vertex is at bottom left instead of top left. Reorder the verticies.
scale = scale[[1,0,3,2]]
elif order[0].astype(int) == 1:
## 2 3 ##
## 1 0 ##
# The first vertex is at bottom left instead of top left. Reorder the verticies.
scale = scale[[2,1,0,3]]
elif order[0].astype(int) == 2:
## 3 0 ##
## 2 1 ##
scale = scale[[3,2,1,0]]
elif order[0].astype(int) == 3:
## 0 1 ##
## 3 2 ##
scale = scale[[0,3,2,1]]
return scale
def __shift_accessory_coordinate_init(self):
math_origin = self.origin.switch_coordinate_system(self.img)
math_horizontal_pt = self.horizontal_pt.switch_coordinate_system(self.img)
math_vertical_pt2 = self.vertical_pt2.switch_coordinate_system(self.img)
math_vertical_pt = self.vertical_pt.switch_coordinate_system(self.img)
self.vertical_reference_line = Line(math_origin, math_vertical_pt2)
self.horizontal_reference_line = Line(math_vertical_pt, math_horizontal_pt)
def __shift_accessory_coordinate(self, pt):
math_pt = pt.switch_coordinate_system(self.img)
slope_proj, intercept_proj = math_pt.get_projected_line(self.horizontal_reference_line.get_slope())
math_intersection = self.vertical_reference_line.calculate_intersection(slope_proj, intercept_proj)
img_intersection = math_intersection.switch_coordinate_system(self.img)
return img_intersection
def __cross_ratio(self, intersection):
### AC*BD/(CD*AB) = A'C'*B'D'/(C'D'*A'B') ###
# Image cross ratio
# AB(scale_length): origin to vertical_pt (scale_pixel_dist)
# CD: accessory_pt to vertical_pt2
# BD: vertical_pt to vertical_pt2
# AC(interested_length): origin to accessory_pt
AB = self.origin.get_distance(self.vertical_pt.get_point())
CD = intersection.get_distance(self.vertical_pt2.get_point())
BD = self.vertical_pt.get_distance(self.vertical_pt2.get_point())
AC = self.origin.get_distance(intersection.get_point())
image_ratio = AC*BD/CD/AB
# World cross ratio
ABw = self.scale_length
ADw = self.panel_length
BDw = self.panel_length - self.scale_length
ACw = image_ratio*ABw*ADw/(BDw+image_ratio*ABw)
return ACw
|
[
"cv2.GaussianBlur",
"cv2.contourArea",
"numpy.absolute",
"cv2.dilate",
"cv2.cvtColor",
"cv2.getStructuringElement",
"cv2.approxPolyDP",
"fhi_lib.geometry.Line",
"numpy.min",
"fhi_lib.geometry.Point",
"cv2.convexHull",
"cv2.erode",
"cv2.inRange",
"cv2.findContours"
] |
[((574, 583), 'fhi_lib.geometry.Point', 'Point', (['pt'], {}), '(pt)\n', (579, 583), False, 'from fhi_lib.geometry import Point, Line\n'), ((1635, 1672), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['self.img', '(5, 5)', '(0)'], {}), '(self.img, (5, 5), 0)\n', (1651, 1672), False, 'import cv2\n'), ((1684, 1721), 'cv2.cvtColor', 'cv2.cvtColor', (['blur', 'cv2.COLOR_BGR2HSV'], {}), '(blur, cv2.COLOR_BGR2HSV)\n', (1696, 1721), False, 'import cv2\n'), ((1741, 1792), 'cv2.inRange', 'cv2.inRange', (['img_hsv', '(45, 20, 230)', '(90, 220, 255)'], {}), '(img_hsv, (45, 20, 230), (90, 220, 255))\n', (1752, 1792), False, 'import cv2\n'), ((1812, 1861), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (1837, 1861), False, 'import cv2\n'), ((1874, 1932), 'cv2.dilate', 'cv2.dilate', (['img_threshold', 'morphology_kernel'], {'iterations': '(3)'}), '(img_threshold, morphology_kernel, iterations=3)\n', (1884, 1932), False, 'import cv2\n'), ((1944, 1996), 'cv2.erode', 'cv2.erode', (['dilation', 'morphology_kernel'], {'iterations': '(3)'}), '(dilation, morphology_kernel, iterations=3)\n', (1953, 1996), False, 'import cv2\n'), ((2257, 2356), 'cv2.findContours', 'cv2.findContours', ([], {'image': 'cropped_thresh', 'mode': 'cv2.RETR_EXTERNAL', 'method': 'cv2.CHAIN_APPROX_SIMPLE'}), '(image=cropped_thresh, mode=cv2.RETR_EXTERNAL, method=cv2.\n CHAIN_APPROX_SIMPLE)\n', (2273, 2356), False, 'import cv2\n'), ((4327, 4358), 'fhi_lib.geometry.Point', 'Point', (['self.near_scale[1][0][1]'], {}), '(self.near_scale[1][0][1])\n', (4332, 4358), False, 'from fhi_lib.geometry import Point, Line\n'), ((4380, 4411), 'fhi_lib.geometry.Point', 'Point', (['self.near_scale[1][0][0]'], {}), '(self.near_scale[1][0][0])\n', (4385, 4411), False, 'from fhi_lib.geometry import Point, Line\n'), ((4435, 4466), 'fhi_lib.geometry.Point', 'Point', (['self.near_scale[1][0][3]'], {}), '(self.near_scale[1][0][3])\n', (4440, 4466), False, 'from fhi_lib.geometry import Point, Line\n'), ((4489, 4519), 'fhi_lib.geometry.Point', 'Point', (['self.far_scale[1][0][0]'], {}), '(self.far_scale[1][0][0])\n', (4494, 4519), False, 'from fhi_lib.geometry import Point, Line\n'), ((5652, 5688), 'fhi_lib.geometry.Line', 'Line', (['math_origin', 'math_vertical_pt2'], {}), '(math_origin, math_vertical_pt2)\n', (5656, 5688), False, 'from fhi_lib.geometry import Point, Line\n'), ((5726, 5768), 'fhi_lib.geometry.Line', 'Line', (['math_vertical_pt', 'math_horizontal_pt'], {}), '(math_vertical_pt, math_horizontal_pt)\n', (5730, 5768), False, 'from fhi_lib.geometry import Point, Line\n'), ((2565, 2589), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (2580, 2589), False, 'import cv2\n'), ((3414, 3427), 'fhi_lib.geometry.Point', 'Point', (['pts[0]'], {}), '(pts[0])\n', (3419, 3427), False, 'from fhi_lib.geometry import Point, Line\n'), ((3437, 3450), 'fhi_lib.geometry.Point', 'Point', (['pts[1]'], {}), '(pts[1])\n', (3442, 3450), False, 'from fhi_lib.geometry import Point, Line\n'), ((3460, 3473), 'fhi_lib.geometry.Point', 'Point', (['pts[2]'], {}), '(pts[2])\n', (3465, 3473), False, 'from fhi_lib.geometry import Point, Line\n'), ((3483, 3496), 'fhi_lib.geometry.Point', 'Point', (['pts[3]'], {}), '(pts[3])\n', (3488, 3496), False, 'from fhi_lib.geometry import Point, Line\n'), ((2636, 2659), 'cv2.convexHull', 'cv2.convexHull', (['contour'], {}), '(contour)\n', (2650, 2659), False, 'import cv2\n'), ((2678, 2715), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['hull', 'epsilon', '(True)'], {}), '(hull, epsilon, True)\n', (2694, 2715), False, 'import cv2\n'), ((3705, 3735), 'numpy.absolute', 'np.absolute', (['(dist1_2 - dist3_4)'], {}), '(dist1_2 - dist3_4)\n', (3716, 3735), True, 'import numpy as np\n'), ((3738, 3764), 'numpy.min', 'np.min', (['[dist1_2, dist3_4]'], {}), '([dist1_2, dist3_4])\n', (3744, 3764), True, 'import numpy as np\n'), ((3826, 3856), 'numpy.absolute', 'np.absolute', (['(dist1_4 - dist2_3)'], {}), '(dist1_4 - dist2_3)\n', (3837, 3856), True, 'import numpy as np\n'), ((3859, 3885), 'numpy.min', 'np.min', (['[dist1_4, dist2_3]'], {}), '([dist1_4, dist2_3])\n', (3865, 3885), True, 'import numpy as np\n')]
|
from distutils.core import setup, Extension
import glob
import numpy
import config
import sys
import os
from config import ROOT
includes = [os.path.join(ROOT,"Include"),os.path.join(ROOT,"PrivateInclude"),os.path.join("cmsisdsp_pkg","src")]
if sys.platform == 'win32':
cflags = ["-DWIN",config.cflags,"-DUNALIGNED_SUPPORT_DISABLE"]
# Custom because a customized arm_math.h is required to build on windows
# since the visual compiler and the win platform are
# not supported by default in arm_math.h
else:
cflags = ["-Wno-unused-variable","-Wno-implicit-function-declaration",config.cflags,"-D__GNUC_PYTHON__"]
transform = glob.glob(os.path.join(ROOT,"Source","TransformFunctions","*.c"))
#transform.remove(os.path.join(ROOT,"Source","TransformFunctions","arm_dct4_init_q15.c"))
#transform.remove(os.path.join(ROOT,"Source","TransformFunctions","arm_rfft_init_q15.c"))
transform.remove(os.path.join(ROOT,"Source","TransformFunctions","TransformFunctions.c"))
support = glob.glob(os.path.join(ROOT,"Source","SupportFunctions","*.c"))
support.remove(os.path.join(ROOT,"Source","SupportFunctions","SupportFunctions.c"))
fastmath = glob.glob(os.path.join(ROOT,"Source","FastMathFunctions","*.c"))
fastmath.remove(os.path.join(ROOT,"Source","FastMathFunctions","FastMathFunctions.c"))
filtering = glob.glob(os.path.join(ROOT,"Source","FilteringFunctions","*.c"))
filtering.remove(os.path.join(ROOT,"Source","FilteringFunctions","FilteringFunctions.c"))
matrix = glob.glob(os.path.join(ROOT,"Source","MatrixFunctions","*.c"))
matrix.remove(os.path.join(ROOT,"Source","MatrixFunctions","MatrixFunctions.c"))
statistics = glob.glob(os.path.join(ROOT,"Source","StatisticsFunctions","*.c"))
statistics.remove(os.path.join(ROOT,"Source","StatisticsFunctions","StatisticsFunctions.c"))
complexf = glob.glob(os.path.join(ROOT,"Source","ComplexMathFunctions","*.c"))
complexf.remove(os.path.join(ROOT,"Source","ComplexMathFunctions","ComplexMathFunctions.c"))
basic = glob.glob(os.path.join(ROOT,"Source","BasicMathFunctions","*.c"))
basic.remove(os.path.join(ROOT,"Source","BasicMathFunctions","BasicMathFunctions.c"))
controller = glob.glob(os.path.join(ROOT,"Source","ControllerFunctions","*.c"))
controller.remove(os.path.join(ROOT,"Source","ControllerFunctions","ControllerFunctions.c"))
common = glob.glob(os.path.join(ROOT,"Source","CommonTables","*.c"))
common.remove(os.path.join(ROOT,"Source","CommonTables","CommonTables.c"))
#modulesrc = glob.glob(os.path.join("cmsisdsp_pkg","src","*.c"))
modulesrc = []
modulesrc.append(os.path.join("cmsisdsp_pkg","src","cmsismodule.c"))
module1 = Extension(config.extensionName,
sources = (support
+ fastmath
+ filtering
+ matrix
+ statistics
+ complexf
+ basic
+ controller
+ transform
+ modulesrc
+ common
)
,
include_dirs = includes + [numpy.get_include()],
#extra_compile_args = ["-Wno-unused-variable","-Wno-implicit-function-declaration",config.cflags]
extra_compile_args = cflags
)
setup (name = config.setupName,
version = '0.0.1',
description = config.setupDescription,
ext_modules = [module1],
author = 'Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.',
url="https://github.com/ARM-software/CMSIS_5",
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
])
|
[
"numpy.get_include",
"os.path.join",
"distutils.core.setup"
] |
[((3420, 3826), 'distutils.core.setup', 'setup', ([], {'name': 'config.setupName', 'version': '"""0.0.1"""', 'description': 'config.setupDescription', 'ext_modules': '[module1]', 'author': '"""Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved."""', 'url': '"""https://github.com/ARM-software/CMSIS_5"""', 'classifiers': "['Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent']"}), "(name=config.setupName, version='0.0.1', description=config.\n setupDescription, ext_modules=[module1], author=\n 'Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved.'\n , url='https://github.com/ARM-software/CMSIS_5', classifiers=[\n 'Programming Language :: Python',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent'])\n", (3425, 3826), False, 'from distutils.core import setup, Extension\n'), ((141, 170), 'os.path.join', 'os.path.join', (['ROOT', '"""Include"""'], {}), "(ROOT, 'Include')\n", (153, 170), False, 'import os\n'), ((170, 206), 'os.path.join', 'os.path.join', (['ROOT', '"""PrivateInclude"""'], {}), "(ROOT, 'PrivateInclude')\n", (182, 206), False, 'import os\n'), ((206, 241), 'os.path.join', 'os.path.join', (['"""cmsisdsp_pkg"""', '"""src"""'], {}), "('cmsisdsp_pkg', 'src')\n", (218, 241), False, 'import os\n'), ((646, 703), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""TransformFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'TransformFunctions', '*.c')\n", (658, 703), False, 'import os\n'), ((899, 973), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""TransformFunctions"""', '"""TransformFunctions.c"""'], {}), "(ROOT, 'Source', 'TransformFunctions', 'TransformFunctions.c')\n", (911, 973), False, 'import os\n'), ((993, 1048), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""SupportFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'SupportFunctions', '*.c')\n", (1005, 1048), False, 'import os\n'), ((1062, 1132), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""SupportFunctions"""', '"""SupportFunctions.c"""'], {}), "(ROOT, 'Source', 'SupportFunctions', 'SupportFunctions.c')\n", (1074, 1132), False, 'import os\n'), ((1153, 1209), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FastMathFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'FastMathFunctions', '*.c')\n", (1165, 1209), False, 'import os\n'), ((1224, 1296), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FastMathFunctions"""', '"""FastMathFunctions.c"""'], {}), "(ROOT, 'Source', 'FastMathFunctions', 'FastMathFunctions.c')\n", (1236, 1296), False, 'import os\n'), ((1318, 1375), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FilteringFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'FilteringFunctions', '*.c')\n", (1330, 1375), False, 'import os\n'), ((1391, 1465), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""FilteringFunctions"""', '"""FilteringFunctions.c"""'], {}), "(ROOT, 'Source', 'FilteringFunctions', 'FilteringFunctions.c')\n", (1403, 1465), False, 'import os\n'), ((1484, 1538), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""MatrixFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'MatrixFunctions', '*.c')\n", (1496, 1538), False, 'import os\n'), ((1551, 1619), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""MatrixFunctions"""', '"""MatrixFunctions.c"""'], {}), "(ROOT, 'Source', 'MatrixFunctions', 'MatrixFunctions.c')\n", (1563, 1619), False, 'import os\n'), ((1642, 1700), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""StatisticsFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'StatisticsFunctions', '*.c')\n", (1654, 1700), False, 'import os\n'), ((1717, 1793), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""StatisticsFunctions"""', '"""StatisticsFunctions.c"""'], {}), "(ROOT, 'Source', 'StatisticsFunctions', 'StatisticsFunctions.c')\n", (1729, 1793), False, 'import os\n'), ((1814, 1873), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ComplexMathFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'ComplexMathFunctions', '*.c')\n", (1826, 1873), False, 'import os\n'), ((1888, 1966), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ComplexMathFunctions"""', '"""ComplexMathFunctions.c"""'], {}), "(ROOT, 'Source', 'ComplexMathFunctions', 'ComplexMathFunctions.c')\n", (1900, 1966), False, 'import os\n'), ((1984, 2041), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""BasicMathFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'BasicMathFunctions', '*.c')\n", (1996, 2041), False, 'import os\n'), ((2053, 2127), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""BasicMathFunctions"""', '"""BasicMathFunctions.c"""'], {}), "(ROOT, 'Source', 'BasicMathFunctions', 'BasicMathFunctions.c')\n", (2065, 2127), False, 'import os\n'), ((2150, 2208), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ControllerFunctions"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'ControllerFunctions', '*.c')\n", (2162, 2208), False, 'import os\n'), ((2225, 2301), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""ControllerFunctions"""', '"""ControllerFunctions.c"""'], {}), "(ROOT, 'Source', 'ControllerFunctions', 'ControllerFunctions.c')\n", (2237, 2301), False, 'import os\n'), ((2320, 2371), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""CommonTables"""', '"""*.c"""'], {}), "(ROOT, 'Source', 'CommonTables', '*.c')\n", (2332, 2371), False, 'import os\n'), ((2384, 2446), 'os.path.join', 'os.path.join', (['ROOT', '"""Source"""', '"""CommonTables"""', '"""CommonTables.c"""'], {}), "(ROOT, 'Source', 'CommonTables', 'CommonTables.c')\n", (2396, 2446), False, 'import os\n'), ((2543, 2595), 'os.path.join', 'os.path.join', (['"""cmsisdsp_pkg"""', '"""src"""', '"""cmsismodule.c"""'], {}), "('cmsisdsp_pkg', 'src', 'cmsismodule.c')\n", (2555, 2595), False, 'import os\n'), ((3199, 3218), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3216, 3218), False, 'import numpy\n')]
|
# Copyright 2021, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.keras_models import dp_keras_model
def get_data():
# Data is for hidden weights of [3, 1] and bias of 2.
# With mean squared loss, we expect loss = 15^2 = 225, gradients of
# weights = [90, 120], and gradient of bias = 30.
data = np.array([[3, 4]])
labels = np.matmul(data, [[3], [1]]) + 2
return data, labels
class DPKerasModelTest(tf.test.TestCase, parameterized.TestCase):
def testBaseline(self):
"""Tests that DPSequential works when DP-SGD has no effect."""
train_data, train_labels = get_data()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=1.0e9,
noise_multiplier=0.0,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss = tf.keras.losses.MeanSquaredError()
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=1)
model_weights = model.get_weights()
# Check parameters are as expected, taking into account the learning rate.
self.assertAllClose(model_weights[0], [[0.90], [1.20]])
self.assertAllClose(model_weights[1], [0.30])
@parameterized.named_parameters(
('l2_norm_clip 10.0', 10.0),
('l2_norm_clip 40.0', 40.0),
('l2_norm_clip 200.0', 200.0),
)
def testClippingNorm(self, l2_norm_clip):
"""Tests that clipping norm works."""
train_data, train_labels = get_data()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=0.0,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
learning_rate = 0.01
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=1)
model_weights = model.get_weights()
unclipped_gradient = np.sqrt(90**2 + 120**2 + 30**2)
scale = min(1.0, l2_norm_clip / unclipped_gradient)
expected_weights = np.array([[90], [120]]) * scale * learning_rate
expected_bias = np.array([30]) * scale * learning_rate
# Check parameters are as expected, taking into account the learning rate.
self.assertAllClose(model_weights[0], expected_weights)
self.assertAllClose(model_weights[1], expected_bias)
def _compute_expected_gradients(self, data, labels, w, l2_norm_clip,
num_microbatches):
batch_size = data.shape[0]
if num_microbatches is None:
num_microbatches = batch_size
preds = np.matmul(data, w)
grads = 2 * data * (labels - preds)[:, np.newaxis]
grads = np.reshape(grads,
[num_microbatches, batch_size // num_microbatches, -1])
mb_grads = np.mean(grads, axis=1)
mb_grad_norms = np.linalg.norm(mb_grads, axis=1)
scale = np.minimum(l2_norm_clip / mb_grad_norms, 1.0)
mb_grads = mb_grads * scale[:, np.newaxis]
final_grads = np.mean(mb_grads, axis=0)
return final_grads
@parameterized.named_parameters(
('mb_test 0', 1.0, None),
('mb_test 1', 1.0, 1),
('mb_test 2', 1.0, 2),
('mb_test 4', 1.0, 4),
)
def testMicrobatches(self, l2_norm_clip, num_microbatches):
train_data = np.array([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])
w = np.zeros((2))
train_labels = np.array([1.0, 3.0, -2.0, -4.0])
learning_rate = 1.0
expected_grads = self._compute_expected_gradients(train_data, train_labels,
w, l2_norm_clip,
num_microbatches)
expected_weights = np.squeeze(learning_rate * expected_grads)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
1, use_bias=False, kernel_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=4, shuffle=False)
model_weights = np.squeeze(model.get_weights())
self.assertAllClose(model_weights, expected_weights)
@parameterized.named_parameters(
('noise_multiplier 3 2 1', 3.0, 2.0, 1),
('noise_multiplier 5 4 1', 5.0, 4.0, 1),
('noise_multiplier 3 2 2', 3.0, 2.0, 2),
('noise_multiplier 5 4 2', 5.0, 4.0, 2),
('noise_multiplier 3 2 4', 3.0, 2.0, 4),
('noise_multiplier 5 4 4', 5.0, 4.0, 4),
)
def testNoiseMultiplier(self, l2_norm_clip, noise_multiplier,
num_microbatches):
# The idea behind this test is to start with a model whose parameters
# are set to zero. We then run one step of a model that produces
# an un-noised gradient of zero, and then compute the standard deviation
# of the resulting weights to see if it matches the expected standard
# deviation.
# Data is one example of length 1000, set to zero, with label zero.
train_data = np.zeros((4, 1000))
train_labels = np.array([0.0, 0.0, 0.0, 0.0])
learning_rate = 1.0
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss = tf.keras.losses.MeanSquaredError()
# Simple linear model returns w * x + b.
model = dp_keras_model.DPSequential(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(1000,)),
tf.keras.layers.Dense(
1, kernel_initializer='zeros', bias_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss)
model.fit(train_data, train_labels, epochs=1, batch_size=4)
model_weights = model.get_weights()
measured_std = np.std(model_weights[0])
expected_std = l2_norm_clip * noise_multiplier / num_microbatches
# Test standard deviation is close to l2_norm_clip * noise_multiplier.
self.assertNear(measured_std, expected_std, 0.1 * expected_std)
# Simple check to make sure dimensions are correct when output has
# dimension > 1.
@parameterized.named_parameters(
('mb_test None 1', None, 1),
('mb_test 1 2', 1, 2),
('mb_test 2 2', 2, 2),
('mb_test 4 4', 4, 4),
)
def testMultiDimensionalOutput(self, num_microbatches, output_dimension):
train_data = np.array([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])
train_labels = np.array([0, 1, 1, 0])
learning_rate = 1.0
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model = dp_keras_model.DPSequential(
l2_norm_clip=1.0e9,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
output_dimension, use_bias=False, kernel_initializer='zeros')
])
model.compile(optimizer=optimizer, loss=loss_fn)
model.fit(train_data, train_labels, epochs=1, batch_size=4, shuffle=False)
# Checks that calls to earlier API using `use_xla` as a positional argument
# raise an exception.
@parameterized.named_parameters(
('earlier API True', True),
('earlier API False', False),
)
def testEarlierAPIFails(self, use_xla):
with self.assertRaises(ValueError):
_ = dp_keras_model.DPSequential(
1.0e9,
0.0,
use_xla,
layers=[
tf.keras.layers.InputLayer(input_shape=(2,)),
tf.keras.layers.Dense(
2, use_bias=False, kernel_initializer='zeros')
])
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"numpy.minimum",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.layers.Dense",
"numpy.std",
"tensorflow.keras.optimizers.SGD",
"numpy.zeros",
"tensorflow.keras.layers.InputLayer",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.squeeze",
"absl.testing.parameterized.named_parameters",
"numpy.sqrt"
] |
[((946, 964), 'numpy.array', 'np.array', (['[[3, 4]]'], {}), '([[3, 4]])\n', (954, 964), True, 'import numpy as np\n'), ((2029, 2153), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('l2_norm_clip 10.0', 10.0)", "('l2_norm_clip 40.0', 40.0)", "('l2_norm_clip 200.0', 200.0)"], {}), "(('l2_norm_clip 10.0', 10.0), (\n 'l2_norm_clip 40.0', 40.0), ('l2_norm_clip 200.0', 200.0))\n", (2059, 2153), False, 'from absl.testing import parameterized\n'), ((4076, 4205), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('mb_test 0', 1.0, None)", "('mb_test 1', 1.0, 1)", "('mb_test 2', 1.0, 2)", "('mb_test 4', 1.0, 4)"], {}), "(('mb_test 0', 1.0, None), ('mb_test 1', 1.0,\n 1), ('mb_test 2', 1.0, 2), ('mb_test 4', 1.0, 4))\n", (4106, 4205), False, 'from absl.testing import parameterized\n'), ((5495, 5789), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('noise_multiplier 3 2 1', 3.0, 2.0, 1)", "('noise_multiplier 5 4 1', 5.0, 4.0, 1)", "('noise_multiplier 3 2 2', 3.0, 2.0, 2)", "('noise_multiplier 5 4 2', 5.0, 4.0, 2)", "('noise_multiplier 3 2 4', 3.0, 2.0, 4)", "('noise_multiplier 5 4 4', 5.0, 4.0, 4)"], {}), "(('noise_multiplier 3 2 1', 3.0, 2.0, 1), (\n 'noise_multiplier 5 4 1', 5.0, 4.0, 1), ('noise_multiplier 3 2 2', 3.0,\n 2.0, 2), ('noise_multiplier 5 4 2', 5.0, 4.0, 2), (\n 'noise_multiplier 3 2 4', 3.0, 2.0, 4), ('noise_multiplier 5 4 4', 5.0,\n 4.0, 4))\n", (5525, 5789), False, 'from absl.testing import parameterized\n'), ((7443, 7575), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('mb_test None 1', None, 1)", "('mb_test 1 2', 1, 2)", "('mb_test 2 2', 2, 2)", "('mb_test 4 4', 4, 4)"], {}), "(('mb_test None 1', None, 1), ('mb_test 1 2',\n 1, 2), ('mb_test 2 2', 2, 2), ('mb_test 4 4', 4, 4))\n", (7473, 7575), False, 'from absl.testing import parameterized\n'), ((8547, 8640), 'absl.testing.parameterized.named_parameters', 'parameterized.named_parameters', (["('earlier API True', True)", "('earlier API False', False)"], {}), "(('earlier API True', True), (\n 'earlier API False', False))\n", (8577, 8640), False, 'from absl.testing import parameterized\n'), ((9049, 9063), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (9061, 9063), True, 'import tensorflow as tf\n'), ((976, 1003), 'numpy.matmul', 'np.matmul', (['data', '[[3], [1]]'], {}), '(data, [[3], [1]])\n', (985, 1003), True, 'import numpy as np\n'), ((1589, 1632), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (1612, 1632), True, 'import tensorflow as tf\n'), ((1644, 1678), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (1676, 1678), True, 'import tensorflow as tf\n'), ((2687, 2739), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2710, 2739), True, 'import tensorflow as tf\n'), ((2751, 2785), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (2783, 2785), True, 'import tensorflow as tf\n'), ((2968, 3005), 'numpy.sqrt', 'np.sqrt', (['(90 ** 2 + 120 ** 2 + 30 ** 2)'], {}), '(90 ** 2 + 120 ** 2 + 30 ** 2)\n', (2975, 3005), True, 'import numpy as np\n'), ((3621, 3639), 'numpy.matmul', 'np.matmul', (['data', 'w'], {}), '(data, w)\n', (3630, 3639), True, 'import numpy as np\n'), ((3708, 3781), 'numpy.reshape', 'np.reshape', (['grads', '[num_microbatches, batch_size // num_microbatches, -1]'], {}), '(grads, [num_microbatches, batch_size // num_microbatches, -1])\n', (3718, 3781), True, 'import numpy as np\n'), ((3821, 3843), 'numpy.mean', 'np.mean', (['grads'], {'axis': '(1)'}), '(grads, axis=1)\n', (3828, 3843), True, 'import numpy as np\n'), ((3864, 3896), 'numpy.linalg.norm', 'np.linalg.norm', (['mb_grads'], {'axis': '(1)'}), '(mb_grads, axis=1)\n', (3878, 3896), True, 'import numpy as np\n'), ((3910, 3955), 'numpy.minimum', 'np.minimum', (['(l2_norm_clip / mb_grad_norms)', '(1.0)'], {}), '(l2_norm_clip / mb_grad_norms, 1.0)\n', (3920, 3955), True, 'import numpy as np\n'), ((4023, 4048), 'numpy.mean', 'np.mean', (['mb_grads'], {'axis': '(0)'}), '(mb_grads, axis=0)\n', (4030, 4048), True, 'import numpy as np\n'), ((4310, 4368), 'numpy.array', 'np.array', (['[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]'], {}), '([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])\n', (4318, 4368), True, 'import numpy as np\n'), ((4377, 4388), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4385, 4388), True, 'import numpy as np\n'), ((4410, 4442), 'numpy.array', 'np.array', (['[1.0, 3.0, -2.0, -4.0]'], {}), '([1.0, 3.0, -2.0, -4.0])\n', (4418, 4442), True, 'import numpy as np\n'), ((4714, 4756), 'numpy.squeeze', 'np.squeeze', (['(learning_rate * expected_grads)'], {}), '(learning_rate * expected_grads)\n', (4724, 4756), True, 'import numpy as np\n'), ((4774, 4826), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4797, 4826), True, 'import tensorflow as tf\n'), ((4838, 4872), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (4870, 4872), True, 'import tensorflow as tf\n'), ((6323, 6342), 'numpy.zeros', 'np.zeros', (['(4, 1000)'], {}), '((4, 1000))\n', (6331, 6342), True, 'import numpy as np\n'), ((6362, 6392), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (6370, 6392), True, 'import numpy as np\n'), ((6434, 6486), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (6457, 6486), True, 'import tensorflow as tf\n'), ((6498, 6532), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (6530, 6532), True, 'import tensorflow as tf\n'), ((7112, 7136), 'numpy.std', 'np.std', (['model_weights[0]'], {}), '(model_weights[0])\n', (7118, 7136), True, 'import numpy as np\n'), ((7694, 7752), 'numpy.array', 'np.array', (['[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]'], {}), '([[2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]])\n', (7702, 7752), True, 'import numpy as np\n'), ((7772, 7794), 'numpy.array', 'np.array', (['[0, 1, 1, 0]'], {}), '([0, 1, 1, 0])\n', (7780, 7794), True, 'import numpy as np\n'), ((7836, 7888), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (7859, 7888), True, 'import tensorflow as tf\n'), ((7903, 7966), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (7948, 7966), True, 'import tensorflow as tf\n'), ((3079, 3102), 'numpy.array', 'np.array', (['[[90], [120]]'], {}), '([[90], [120]])\n', (3087, 3102), True, 'import numpy as np\n'), ((3147, 3161), 'numpy.array', 'np.array', (['[30]'], {}), '([30])\n', (3155, 3161), True, 'import numpy as np\n'), ((1408, 1452), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (1434, 1452), True, 'import tensorflow as tf\n'), ((1466, 1544), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(1, kernel_initializer='zeros', bias_initializer='zeros')\n", (1487, 1544), True, 'import tensorflow as tf\n'), ((2481, 2525), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (2507, 2525), True, 'import tensorflow as tf\n'), ((2539, 2617), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(1, kernel_initializer='zeros', bias_initializer='zeros')\n", (2560, 2617), True, 'import tensorflow as tf\n'), ((5097, 5141), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (5123, 5141), True, 'import tensorflow as tf\n'), ((5155, 5223), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'use_bias': '(False)', 'kernel_initializer': '"""zeros"""'}), "(1, use_bias=False, kernel_initializer='zeros')\n", (5176, 5223), True, 'import tensorflow as tf\n'), ((6770, 6817), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(1000,)'}), '(input_shape=(1000,))\n', (6796, 6817), True, 'import tensorflow as tf\n'), ((6831, 6909), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(1, kernel_initializer='zeros', bias_initializer='zeros')\n", (6852, 6909), True, 'import tensorflow as tf\n'), ((8139, 8183), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (8165, 8183), True, 'import tensorflow as tf\n'), ((8197, 8285), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dimension'], {'use_bias': '(False)', 'kernel_initializer': '"""zeros"""'}), "(output_dimension, use_bias=False, kernel_initializer=\n 'zeros')\n", (8218, 8285), True, 'import tensorflow as tf\n'), ((8858, 8902), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(2,)'}), '(input_shape=(2,))\n', (8884, 8902), True, 'import tensorflow as tf\n'), ((8918, 8986), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'use_bias': '(False)', 'kernel_initializer': '"""zeros"""'}), "(2, use_bias=False, kernel_initializer='zeros')\n", (8939, 8986), True, 'import tensorflow as tf\n')]
|
"""
Here I am going to convert array to image from it's pixel value and put those images in their respective directory for
both in train and test set.
train set -------> [A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z]
test set -------> [A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z]
"""
# Import required packages
import os
import numpy as np
import cv2
word_dict = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J', 10: 'K', 11: 'L',
12: 'M', 13: 'N', 14: 'O', 15: 'P', 16: 'Q', 17: 'R', 18: 'S', 19: 'T', 20: 'U', 21: 'V', 22: 'W', 23: 'X',
24: 'Y', 25: 'Z'}
def test_images_creation():
# Open file of test.csv in read mode
file = open('test.csv', 'r')
count = 0
labels = []
# Directory where test image save
parent_dir = os.path.join(os.getcwd(), 'test')
while True:
# read line of file
line = file.readline()
# Break if line not found
if not line:
break
# Split line on ',' and create list of row values
row = line.split(',')
# extract label and pixel value from row
# label = str(row[0]) --orignal
lab_num = int(row[0])
label = word_dict.get(lab_num)
pixel = row[1:]
# Convert pixel in numpy array of 28 x 28
pixel = np.asarray(pixel, dtype=np.uint8).reshape((28, 28, 1))
# join path of directories
path = os.path.join(parent_dir, label)
# count line number and use with image name
count += 1
# list of contents(directory and file both) in directory
labels = os.listdir(parent_dir)
if label in labels:
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - not created directory only image add")
else:
try:
os.mkdir(path)
except OSError as error:
print(error)
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - created directory and image add")
file.close()
test_images_creation()
def train_images_creation():
# Open file of train.csv in read mode
file = open('train.csv', 'r')
count = 0
labels = []
# Directory where train image save
parent_dir = os.path.join(os.getcwd(), 'train')
while True:
# read line of file
line = file.readline()
# Break if line not found
if not line:
break
# Split line on ',' and create list of row values
row = line.split(',')
# extract label and pixel value from row
# label = str(row[0]) --orignal
lab_num = int(row[0])
label = word_dict.get(lab_num)
pixel = row[1:]
# Convert pixel in numpy array of 28 x 28
pixel = np.asarray(pixel, dtype=np.uint8).reshape((28, 28, 1))
# join path of directories
path = os.path.join(parent_dir, label)
# count line number and use with image name
count += 1
# list of contents(directory and file both) in directory
labels = os.listdir(parent_dir)
if label in labels:
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - not created directory only image add")
else:
try:
os.mkdir(path)
except OSError as error:
print(error)
# save image in its directory
cv2.imwrite(f'{path}/image_{count}.png', pixel)
print(f"{count} - created directory and image add")
file.close()
# train_images_creation()
|
[
"os.mkdir",
"os.getcwd",
"cv2.imwrite",
"numpy.asarray",
"os.path.join",
"os.listdir"
] |
[((841, 852), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (850, 852), False, 'import os\n'), ((1457, 1488), 'os.path.join', 'os.path.join', (['parent_dir', 'label'], {}), '(parent_dir, label)\n', (1469, 1488), False, 'import os\n'), ((1644, 1666), 'os.listdir', 'os.listdir', (['parent_dir'], {}), '(parent_dir)\n', (1654, 1666), False, 'import os\n'), ((2411, 2422), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2420, 2422), False, 'import os\n'), ((3028, 3059), 'os.path.join', 'os.path.join', (['parent_dir', 'label'], {}), '(parent_dir, label)\n', (3040, 3059), False, 'import os\n'), ((3215, 3237), 'os.listdir', 'os.listdir', (['parent_dir'], {}), '(parent_dir)\n', (3225, 3237), False, 'import os\n'), ((1750, 1797), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (1761, 1797), False, 'import cv2\n'), ((2049, 2096), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (2060, 2096), False, 'import cv2\n'), ((3321, 3368), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (3332, 3368), False, 'import cv2\n'), ((3620, 3667), 'cv2.imwrite', 'cv2.imwrite', (['f"""{path}/image_{count}.png"""', 'pixel'], {}), "(f'{path}/image_{count}.png', pixel)\n", (3631, 3667), False, 'import cv2\n'), ((1351, 1384), 'numpy.asarray', 'np.asarray', (['pixel'], {'dtype': 'np.uint8'}), '(pixel, dtype=np.uint8)\n', (1361, 1384), True, 'import numpy as np\n'), ((1914, 1928), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1922, 1928), False, 'import os\n'), ((2922, 2955), 'numpy.asarray', 'np.asarray', (['pixel'], {'dtype': 'np.uint8'}), '(pixel, dtype=np.uint8)\n', (2932, 2955), True, 'import numpy as np\n'), ((3485, 3499), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (3493, 3499), False, 'import os\n')]
|
import argparse
import numpy as np
from astropy.io import fits
from numba import jit
class DragonPedestal:
n_pixels = 7
roisize = 40
size4drs = 4*1024
high_gain = 0
low_gain = 1
def __init__(self):
self.first_capacitor = np.zeros((2, 8))
self.meanped = np.zeros((2, self.n_pixels, self.size4drs))
self.numped = np.zeros((2, self.n_pixels, self.size4drs))
def fill_pedestal_event(self, event, nr):
first_cap = event.lst.tel[0].evt.first_capacitor_id[nr * 8:(nr + 1) * 8]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 1, 2, 2, 3]):
self.first_capacitor[self.high_gain, i] = first_cap[j]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [4, 4, 5, 5, 6, 6, 7]):
self.first_capacitor[self.low_gain, i] = first_cap[j]
waveform = event.r0.tel[0].waveform[:, :, :]
expected_pixel_id = event.lst.tel[0].svc.pixel_ids
self._fill_pedestal_event_jit(nr, waveform, expected_pixel_id, self.first_capacitor, self.meanped, self.numped)
@staticmethod
@jit(parallel=True)
def _fill_pedestal_event_jit(nr, waveform, expected_pixel_id, first_cap, meanped, numped):
size4drs = 4096
roisize = 40
for i in range(0, 2):
for j in range(0, 7):
fc = int(first_cap[i, j])
pixel = expected_pixel_id[nr*7 + j]
posads0 = int((2+fc)%size4drs)
if posads0 + 40 < 4096:
meanped[i, j, posads0:(posads0+36)] += waveform[i, pixel, 2:38]
numped[i, j, posads0:(posads0 + 36)] += 1
else:
for k in range(2, roisize-2):
posads = int((k+fc)%size4drs)
val = waveform[i, pixel, k]
meanped[i, j, posads] += val
numped[i, j, posads] += 1
def finalize_pedestal(self):
try:
self.meanped = self.meanped/self.numped
except Exception as err:
print("Not enough events to coverage all capacitor. Please use more events to create pedestal file.")
print(err)
def get_first_capacitor(event, nr):
hg = 0
lg = 1
fc = np.zeros((2, 8))
first_cap = event.lst.tel[0].evt.first_capacitor_id[nr * 8:(nr + 1) * 8]
# First capacitor order according Dragon v5 board data format
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [0, 0, 1, 1, 2, 2, 3]):
fc[hg, i] = first_cap[j]
for i, j in zip([0, 1, 2, 3, 4, 5, 6], [4, 4, 5, 5, 6, 6, 7]):
fc[lg, i] = first_cap[j]
return fc
|
[
"numpy.zeros",
"numba.jit"
] |
[((1068, 1086), 'numba.jit', 'jit', ([], {'parallel': '(True)'}), '(parallel=True)\n', (1071, 1086), False, 'from numba import jit\n'), ((2238, 2254), 'numpy.zeros', 'np.zeros', (['(2, 8)'], {}), '((2, 8))\n', (2246, 2254), True, 'import numpy as np\n'), ((256, 272), 'numpy.zeros', 'np.zeros', (['(2, 8)'], {}), '((2, 8))\n', (264, 272), True, 'import numpy as np\n'), ((296, 339), 'numpy.zeros', 'np.zeros', (['(2, self.n_pixels, self.size4drs)'], {}), '((2, self.n_pixels, self.size4drs))\n', (304, 339), True, 'import numpy as np\n'), ((362, 405), 'numpy.zeros', 'np.zeros', (['(2, self.n_pixels, self.size4drs)'], {}), '((2, self.n_pixels, self.size4drs))\n', (370, 405), True, 'import numpy as np\n')]
|
#TODO: move this to pioneer.das.acquisition
from pioneer.das.api import platform
try:
import folium #pip3 install folium
except:
pass
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import tqdm
import utm
def easting_northing_from_lat_long(latitude, longitude):
easting, northing, _, _ = utm.from_latlon(latitude, longitude)
return easting, northing
def distance_traj_step(easting, northing, t=None):
d_e = np.diff(easting)
d_n = np.diff(northing)
if t is not None:
d_t = np.diff(t)
return (d_e**2 + d_n**2)**0.5/d_t
def get_trajectory(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel'):
'''simple: return easting, northing, points list, and time of the trajectory following the timestamps of ref_ts_sensor
'''
n = len(pfsynch)
easting, northing, ts = [], [], []
points = []
for mu in tqdm.tqdm(range(n)):
ref_ts = pfsynch[mu][ref_ts_sensor].timestamp
imu = pfsynch.platform[imu_nav].get_at_timestamp(ref_ts).raw
lati, longi = imu['latitude'], imu['longitude']
eg, ng = easting_northing_from_lat_long(lati, longi)
easting.append(eg)
northing.append(ng)
ts.append(ref_ts/1e6)
points.append([lati, longi])
return np.array(easting, dtype=np.float64), np.array(northing, dtype=np.float64), points, np.array(ts, dtype=np.float64)-ts[0]
def compute_neighbour_step_ratio(xt, yt, t, min_epsilon_precision=1e-5):
step_ratio_norm = []
step_ratio_norm.append(0)
for i in range(1,len(xt)-1):
d_t_l = np.abs(t[i-1]-t[i])
d_t_r = np.abs(t[i+1]-t[i])
d_xt_l = np.maximum(np.abs(xt[i-1]-xt[i])/d_t_l, min_epsilon_precision)
d_xt_r = np.maximum(np.abs(xt[i+1]-xt[i])/d_t_r, min_epsilon_precision)
d_yt_l = np.maximum(np.abs(yt[i-1]-yt[i])/d_t_l, min_epsilon_precision)
d_yt_r = np.maximum(np.abs(yt[i+1]-yt[i])/d_t_r, min_epsilon_precision)
step_ratio_xt = np.maximum(d_xt_l, d_xt_r) / np.minimum(d_xt_l, d_xt_r)
step_ratio_yt = np.maximum(d_yt_l, d_yt_r) / np.minimum(d_yt_l, d_yt_r)
step_ratio_norm.append((step_ratio_xt**2 + step_ratio_yt**2)**0.5)
step_ratio_norm.append(0)
return np.array(step_ratio_norm, dtype=np.float)
def compute_standard_score(x, seq_memory: int=200, start_at_zero: bool=True, outliers_threshold: float=100.0):
'''return the standard score based on a memory sequence of certain length.
'''
m = len(x)
epsilon_ = 1e-4 # 0.1 mm precision
z_score = []
z_score.append(0)
flag_outliers = np.zeros_like(x, dtype=bool)
for mu in tqdm.tqdm(range(1, m)):
a, b = np.maximum(mu - seq_memory, 0), mu
if mu < seq_memory and not start_at_zero:
z_score.append(0)
continue
window_seq = x[a:b][~flag_outliers[a:b]]
# if mu > seq_memory and len(window_seq) < 0.25*seq_memory:
# z_score.append(0)
# continue
seq_mean = np.mean(window_seq)
seq_std = np.std(window_seq)
z_ = np.abs((x[mu] - seq_mean)/(seq_std + epsilon_))
if z_ > outliers_threshold:
flag_outliers[mu] = 1
z_score.append(np.copy(z_))
return np.array(z_score)
def get_trajectory_standard_score(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_seq_memory:int=200):
'''estimation of the smoothness of a trajectory based on the standard score.
'''
easting, northing, _, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
traj_step = distance_traj_step(easting, northing, t)
z_scores = np.zeros_like(easting)
z_scores[1:] = compute_standard_score(traj_step, traj_seq_memory-1, False)
return z_scores
def get_trajectory_step_ratio(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_min_epsilon_precision:float=1e-6):
'''estimation of the smoothness of the trajectory based on the ratio of left-right epsilons step
'''
easting, northing, _, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
return compute_neighbour_step_ratio(easting, northing, t, traj_min_epsilon_precision)
def find_trajectory_jump(pfsynch:platform.Synchronized,
ref_ts_sensor:str='flir_bfc_img',
imu_nav:str='sbgekinox_bcc_navposvel',
traj_seq_memory:int=200,
traj_jump_threshold:float=15.5,
show_result:bool=True):
'''Compute the list of ranges of intervall from pfsynch which are smooth according to traj_jump_threshold.
'''
print('Computing trajectory')
easting, northing, points, t = get_trajectory(pfsynch, ref_ts_sensor, imu_nav)
traj_step = distance_traj_step(easting, northing)
print('Validate trajectory')
z_scores = np.zeros_like(easting)
z_scores[1:] = compute_standard_score(traj_step, traj_seq_memory-1, False)
jump_flag = (z_scores > traj_jump_threshold).astype(bool)
list_intervals = []
ids = np.arange(len(jump_flag))[jump_flag]
for mu in range(len(ids)):
if mu == 0:
list_intervals.append([0 , ids[mu]-1])
continue
if ids[mu]-ids[mu-1] >= traj_seq_memory:
list_intervals.append([ids[mu-1], ids[mu]-1])
if show_result:
t = np.arange(len(easting))
fig, ax = plt.subplots(2, 1, figsize=(9,10))
fig.suptitle('Trajectory positions and jumps')
ax[0].scatter(t, easting)
ax[0].scatter(t[jump_flag], easting[jump_flag], label='jump flags')
ax[0].legend()
ax[0].set_xlabel('Frame number')
ax[0].set_ylabel('Easting')
ax[1].scatter(t, northing)
ax[1].scatter(t[jump_flag], northing[jump_flag], label='jump flags')
ax[1].legend()
ax[1].set_xlabel('Frame number')
ax[1].set_ylabel('Northing')
plt.show()
my_map = folium.Map(location=points[0], zoom_start=15)
folium.PolyLine(points).add_to(my_map)
for mu in ids:
folium.CircleMarker(
location=points[mu],
radius=5.5,
popup='IMU jump: '+ str(mu),
color='red',
fill=True,
fill_color='red'
).add_to(my_map)
return jump_flag, list_intervals, my_map
return jump_flag, list_intervals
if __name__ == '__main__':
#example of use:
#see this dataset:
_dataset = '/nas/pixset/exportedDataset/20200610_195655_rec_dataset_quartier_pierre_exported'
_ignore = ['radarTI_bfc']
pf = platform.Platform(_dataset, ignore=_ignore)
# get the platform synchronized:
sync_labels = ['*ech*', '*_img*', '*_trr*', '*_trf*',' *_ftrr*', '*xyzit-*']
interp_labels = ['*_xyzit', 'sbgekinox_*', 'peakcan_*', '*temp', '*_pos*', '*_agc*']
synch = pf.synchronized(sync_labels=sync_labels, interp_labels=interp_labels, tolerance_us=1e3)
flags, inters, my_map = find_trajectory_jump(synch,
ref_ts_sensor='flir_bfc_img',
imu_nav='sbgekinox_bcc_navposvel',
traj_seq_memory=200,
traj_jump_threshold=4.0,
show_result=True)
print('Intervals:', inters)
|
[
"utm.from_latlon",
"numpy.zeros_like",
"numpy.abs",
"matplotlib.pyplot.show",
"numpy.maximum",
"numpy.minimum",
"numpy.std",
"numpy.copy",
"pioneer.das.api.platform.Platform",
"numpy.diff",
"numpy.array",
"numpy.mean",
"folium.Map",
"folium.PolyLine",
"matplotlib.pyplot.subplots"
] |
[((327, 363), 'utm.from_latlon', 'utm.from_latlon', (['latitude', 'longitude'], {}), '(latitude, longitude)\n', (342, 363), False, 'import utm\n'), ((455, 471), 'numpy.diff', 'np.diff', (['easting'], {}), '(easting)\n', (462, 471), True, 'import numpy as np\n'), ((482, 499), 'numpy.diff', 'np.diff', (['northing'], {}), '(northing)\n', (489, 499), True, 'import numpy as np\n'), ((2351, 2392), 'numpy.array', 'np.array', (['step_ratio_norm'], {'dtype': 'np.float'}), '(step_ratio_norm, dtype=np.float)\n', (2359, 2392), True, 'import numpy as np\n'), ((2708, 2736), 'numpy.zeros_like', 'np.zeros_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (2721, 2736), True, 'import numpy as np\n'), ((3396, 3413), 'numpy.array', 'np.array', (['z_score'], {}), '(z_score)\n', (3404, 3413), True, 'import numpy as np\n'), ((3933, 3955), 'numpy.zeros_like', 'np.zeros_like', (['easting'], {}), '(easting)\n', (3946, 3955), True, 'import numpy as np\n'), ((5306, 5328), 'numpy.zeros_like', 'np.zeros_like', (['easting'], {}), '(easting)\n', (5319, 5328), True, 'import numpy as np\n'), ((7117, 7160), 'pioneer.das.api.platform.Platform', 'platform.Platform', (['_dataset'], {'ignore': '_ignore'}), '(_dataset, ignore=_ignore)\n', (7134, 7160), False, 'from pioneer.das.api import platform\n'), ((536, 546), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (543, 546), True, 'import numpy as np\n'), ((1375, 1410), 'numpy.array', 'np.array', (['easting'], {'dtype': 'np.float64'}), '(easting, dtype=np.float64)\n', (1383, 1410), True, 'import numpy as np\n'), ((1412, 1448), 'numpy.array', 'np.array', (['northing'], {'dtype': 'np.float64'}), '(northing, dtype=np.float64)\n', (1420, 1448), True, 'import numpy as np\n'), ((1674, 1697), 'numpy.abs', 'np.abs', (['(t[i - 1] - t[i])'], {}), '(t[i - 1] - t[i])\n', (1680, 1697), True, 'import numpy as np\n'), ((1710, 1733), 'numpy.abs', 'np.abs', (['(t[i + 1] - t[i])'], {}), '(t[i + 1] - t[i])\n', (1716, 1733), True, 'import numpy as np\n'), ((3153, 3172), 'numpy.mean', 'np.mean', (['window_seq'], {}), '(window_seq)\n', (3160, 3172), True, 'import numpy as np\n'), ((3191, 3209), 'numpy.std', 'np.std', (['window_seq'], {}), '(window_seq)\n', (3197, 3209), True, 'import numpy as np\n'), ((3224, 3273), 'numpy.abs', 'np.abs', (['((x[mu] - seq_mean) / (seq_std + epsilon_))'], {}), '((x[mu] - seq_mean) / (seq_std + epsilon_))\n', (3230, 3273), True, 'import numpy as np\n'), ((5855, 5890), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(9, 10)'}), '(2, 1, figsize=(9, 10))\n', (5867, 5890), True, 'import matplotlib.pyplot as plt\n'), ((6377, 6387), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6385, 6387), True, 'import matplotlib.pyplot as plt\n'), ((6406, 6451), 'folium.Map', 'folium.Map', ([], {'location': 'points[0]', 'zoom_start': '(15)'}), '(location=points[0], zoom_start=15)\n', (6416, 6451), False, 'import folium\n'), ((1458, 1488), 'numpy.array', 'np.array', (['ts'], {'dtype': 'np.float64'}), '(ts, dtype=np.float64)\n', (1466, 1488), True, 'import numpy as np\n'), ((2085, 2111), 'numpy.maximum', 'np.maximum', (['d_xt_l', 'd_xt_r'], {}), '(d_xt_l, d_xt_r)\n', (2095, 2111), True, 'import numpy as np\n'), ((2114, 2140), 'numpy.minimum', 'np.minimum', (['d_xt_l', 'd_xt_r'], {}), '(d_xt_l, d_xt_r)\n', (2124, 2140), True, 'import numpy as np\n'), ((2165, 2191), 'numpy.maximum', 'np.maximum', (['d_yt_l', 'd_yt_r'], {}), '(d_yt_l, d_yt_r)\n', (2175, 2191), True, 'import numpy as np\n'), ((2194, 2220), 'numpy.minimum', 'np.minimum', (['d_yt_l', 'd_yt_r'], {}), '(d_yt_l, d_yt_r)\n', (2204, 2220), True, 'import numpy as np\n'), ((2790, 2820), 'numpy.maximum', 'np.maximum', (['(mu - seq_memory)', '(0)'], {}), '(mu - seq_memory, 0)\n', (2800, 2820), True, 'import numpy as np\n'), ((3367, 3378), 'numpy.copy', 'np.copy', (['z_'], {}), '(z_)\n', (3374, 3378), True, 'import numpy as np\n'), ((1759, 1784), 'numpy.abs', 'np.abs', (['(xt[i - 1] - xt[i])'], {}), '(xt[i - 1] - xt[i])\n', (1765, 1784), True, 'import numpy as np\n'), ((1839, 1864), 'numpy.abs', 'np.abs', (['(xt[i + 1] - xt[i])'], {}), '(xt[i + 1] - xt[i])\n', (1845, 1864), True, 'import numpy as np\n'), ((1928, 1953), 'numpy.abs', 'np.abs', (['(yt[i - 1] - yt[i])'], {}), '(yt[i - 1] - yt[i])\n', (1934, 1953), True, 'import numpy as np\n'), ((2008, 2033), 'numpy.abs', 'np.abs', (['(yt[i + 1] - yt[i])'], {}), '(yt[i + 1] - yt[i])\n', (2014, 2033), True, 'import numpy as np\n'), ((6461, 6484), 'folium.PolyLine', 'folium.PolyLine', (['points'], {}), '(points)\n', (6476, 6484), False, 'import folium\n')]
|
import os.path as osp
import numpy as np
import mmcv
from . import XMLDataset
from .builder import DATASETS
import xml.etree.ElementTree as ET
from PIL import Image
@DATASETS.register_module()
class LogosDataset(XMLDataset):
def load_annotations(self, ann_file):
"""Load annotation from XML style ann_file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
if not self.CLASSES:
self.CLASSES = set()
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'JPEGImages/{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
# Get image size data
size = root.find('size')
if size is not None:
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, 'JPEGImages',
'{}.jpg'.format(img_id))
img = Image.open(img_path)
width, height = img.size
# Get object classes
self.CLASSES |= {x.text for x in tree.findall("object/name")}
data_infos.append(
dict(id=img_id, filename=filename, width=width, height=height))
self.CLASSES = sorted(list(self.CLASSES))
return data_infos
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls/mAPs. If set to a list, the average of all IoUs will
also be computed. Default: np.arange(0.5, 0.96, 0.05).
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
return dict()
|
[
"xml.etree.ElementTree.parse",
"PIL.Image.open",
"numpy.arange",
"mmcv.list_from_file",
"os.path.join"
] |
[((578, 607), 'mmcv.list_from_file', 'mmcv.list_from_file', (['ann_file'], {}), '(ann_file)\n', (597, 607), False, 'import mmcv\n'), ((1875, 1901), 'numpy.arange', 'np.arange', (['(0.5)', '(0.96)', '(0.05)'], {}), '(0.5, 0.96, 0.05)\n', (1884, 1901), True, 'import numpy as np\n'), ((712, 769), 'os.path.join', 'osp.join', (['self.img_prefix', '"""Annotations"""', 'f"""{img_id}.xml"""'], {}), "(self.img_prefix, 'Annotations', f'{img_id}.xml')\n", (720, 769), True, 'import os.path as osp\n'), ((821, 839), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path'], {}), '(xml_path)\n', (829, 839), True, 'import xml.etree.ElementTree as ET\n'), ((1255, 1275), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1265, 1275), False, 'from PIL import Image\n')]
|
import csv
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
#from sklearn.neighbors import NearestNeighbors
from path import Path
from vector_math import *
from find_matches import *
import search_matches
#********************
#**** this compares two sets of angles to see how close the two paths are
#********************
#@profile
def compare_two_sets_of_angles(path1, path2):
match_comparison = []
max_distance = 0
if len(path2.angles>0):
angles2 = path2.angles[:,0]
distances1_2 = path2.angles[:,1]
distances2_2 = path2.angles[:,2]
path2_angles = path2.angles[:,0:3]
path2_angles_test = path2.angles[:,0:3].tolist()
else:
return # if we don't have any angles then break out of the loop and go to the next path
if len(path1.angles>0):
path1_angles = path1.angles[:,0:3]
path1_angles_test = path1.angles[:,0:3].tolist()
else:
return # if we don't have any angles then break out of the loop and go to the next path
angle_tolerance = 4.2
distance_tolerance = 18.0
#cdef int cnt2
matching_angles, num_matched = search_matches.match_angles(path1_angles, path2_angles, angle_tolerance, distance_tolerance)
match_comparison = matching_angles[0:num_matched,:]
## path1 is being compared against path 2
#for cnt in xrange (0, len(path1.angles)):
# angle1 = path1.angles[cnt,0]
# matches = np.where( (abs(path2_angles[:,0]-angle1) <= angle_tolerance) & ( abs(path2_angles[:,1]-path1_angles[cnt,1]) <= 16) & ( abs(path2_angles[:,2]-path1_angles[cnt,2]) <= 16) )
# if (len(matches[0]) > 0):
# match_score = [1, cnt, matches[0][0], 1.0, angle1] # remember this angle
# match_comparison.append(match_score)
#
#
##while( path1_angles_test and path2_angles_test ): # look for matches and pop from the list
## if ( path1_angles_test[-1][0] > path2_angles_test[-1][0] ): # pop the highest angle
## path1_angles_test.pop()
## else:
## path2_angles_test.pop()
#
#for cnt, match_check in enumerate(match_comparison):
# if ( abs(match_check[0]-matching_angles[cnt,0]) > .01 ):
# print("here 1",abs(match_check[0]-matching_angles[cnt,0]) )
# print(0,cnt, match_check[0], matching_angles[cnt,0])
# sys.exit(0)
# if ( match_check[1] != matching_angles[cnt,1] ):
# print(1,cnt, match_check[1], matching_angles[cnt,1])
# sys.exit(0)
# if ( match_check[2] != matching_angles[cnt,2] ):
# print(2, cnt, match_check[2], matching_angles[cnt,2])
# sys.exit(0)
# if ( match_check[3] != matching_angles[cnt,3] ):
# print(3, cnt, match_check[3], matching_angles[cnt,3])
# sys.exit(0)
# if ( abs(match_check[4] - matching_angles[cnt,4]) > .01 ):
# print(4, cnt, match_check[4], matching_angles[cnt,4])
# sys.exit(0)
#
#
#
exact_match_cnt = 0
matched_points1 = []
matched_points2 = []
for cnt, match_score in enumerate(match_comparison):
if (match_score[0] ==1):
exact_match_cnt += 1
loc1 = match_score[1]
loc2 = match_score[2]
# remember all of the matching points
matched_points1.append( match_score[1])
matched_points2.append( match_score[2])
match_found =0
if ( exact_match_cnt >= 2 ):
path1_matching = [path2.routeid, 1, loc1]
for match_point in matched_points1:
path1_matching.append(match_point)
path2_matching = [path1.routeid, 1, loc2]
for match_point in matched_points2:
path2_matching.append(match_point)
path1_matching_angle_list = path1_matching[3:]
path2_matching_angle_list = path2_matching[3:]
if ( exact_match_cnt >= 3 ): # we need at least 3 points to check for an exact match
# loop through each of the angles that was a good match and see how many of the points line up
match_found = 0
for cnt, angle1 in enumerate(path1_matching_angle_list):
angle2 = path2_matching_angle_list[cnt]
if (match_found ==0):
#print
#print
#print
match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#path1, path2, match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#print
#print
#print
#if (match_found != match_found2):
# print("***************** no match*******************",match_found,match_found2)
if (match_found == 1):
path1.comparison.append( path1_matching ) # remember that we matched and remember which RDP points had a good match
path2.comparison.append( path2_matching )
if (path1.matched<0):
path1.matched=0
if (path2.matched<0):
path2.matched = 0
path1.matched += 1
path2.matched += 1
path1.print_flag = 1
# if we don't have a match, check 2 points to see if we have anything resembling a match
if ( (path1.matched < 0 or path2.matched < 0) and exact_match_cnt >=2):
#if (match_found ==1):
if (len(path1_matching_angle_list) < 5):
pass
# find the distances between each of these angles and see if we can get any matching pairs
for cnt1 in range(0,len(path1_matching_angle_list)-1):
for cnt2 in range(cnt1+1,len(path1_matching_angle_list)):
angle_id_1_1 = path1_matching_angle_list[cnt1]
angle_id_1_2 = path1_matching_angle_list[cnt2]
angle_id_2_1 = path2_matching_angle_list[cnt1]
angle_id_2_2 = path2_matching_angle_list[cnt2]
distance1 = path1.angle_distances[angle_id_1_1, angle_id_1_2]
distance2 = path2.angle_distances[angle_id_2_1, angle_id_2_2]
#if( abs(distance1-distance2) < 30): # if these angles are the same distance, count it
# print ("here 1")
if(distance1 != 0 and distance2 != 0 and abs(distance1-distance2) < 30): # if these angles are the same distance, count it
if (path1.matched < 0):
path1.matched = 0 # these could be a match, so move them off of the definitely not matched list
if (path2.matched < 0):
path2.matched = 0 # these could be match
return
#********************
#**** end this compares two sets of angles to see how close the two paths are
#********************
#**************************************************************************************
#***** this gets the x, y, location of an rdp point
#**************************************************************************************
def get_RDP_xy(path, RDP_point):
#x = path.route[path.feature_loc[RDP_point,2], 0]
#y = path.route[path.feature_loc[RDP_point,2], 1]
# saves time to not assign them to another variable
return path.route[path.feature_loc[RDP_point,2], 0], path.route[path.feature_loc[RDP_point,2], 1]
# ****************************************************************************
# This returns 3 RDP points for each angle
# ********************************************************************
def get_RDP_point_from_angle(path, angle_num):
#path_rdp1 = path.angles[angle_num, 3] # the is the before point
#path_rdp2 = path.angles[angle_num, 4] # center point
#path_rdp3 = path.angles[angle_num, 5] # after point
#return path_rdp1, path_rdp2, path_rdp3
return path.angles[angle_num, 3], path.angles[angle_num, 4], path.angles[angle_num, 5]
# ****************************************************************************
# This returns 3 RDP points for each angle
# ********************************************************************
def get_one_RDP_point_from_angle(path, angle_num):
#path_rdp1 = path.angles[angle_num, 3] # the is the before point
#path_rdp2 = path.angles[angle_num, 4] # center point
#path_rdp3 = path.angles[angle_num, 5] # after point
#return path_rdp1, path_rdp2, path_rdp3
return path.angles[angle_num, 4]
#********************
#**** this aligns two paths and gives a score of that alignment
#********************
#@profile
def align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list ):
# assign criteria for how closely we have to match teh vector and distance depending on how close the angle is
matching_criteria = [ [2.0, 4.5, 30.0], [3.0, 3.0, 20.0], [4.0, 2.5, 17.0], [15.0, 2.0, 15.0] ]
# find out which feature to center on for point 1
path1_rdp2 = get_one_RDP_point_from_angle(path1, angle1)
# find out which feature to center on for point 2
path2_rdp2 = get_one_RDP_point_from_angle(path2, angle2)
path1_rdp2_x, path1_rdp2_y = get_RDP_xy(path1, path1_rdp2)
path2_rdp2_x, path2_rdp2_y = get_RDP_xy(path2, path2_rdp2)
# center the path1
index_array = np.array([path1_rdp2_x, path1_rdp2_y])
path1.route = np.subtract(path1.route, index_array)
# center the path2
index_array = np.array([path2_rdp2_x, path2_rdp2_y])
path2.route = np.subtract(path2.route, index_array)
path1_rdp2_x, path1_rdp2_y = get_RDP_xy(path1, path1_rdp2)
path2_rdp2_x, path2_rdp2_y = get_RDP_xy(path2, path2_rdp2)
match_found = 0
# try aligning with the other RDP points
for cnt3, path1_aligning_angle in enumerate(path1_matching_angle_list):
good_angle_found_2 = 0
good_distance = 1
if (match_found ==0):
path2_aligning_angle = path2_matching_angle_list[cnt3] # find the MSE error between all of our points
# find out which feature to center on for point 1
path1_aligning_rdp2 = get_one_RDP_point_from_angle(path1, path1_aligning_angle)
# find out which feature to center on for point 2
path2_aligning_rdp2 = get_one_RDP_point_from_angle(path2, path2_aligning_angle)
path1_aligning_rdp2_x, path1_aligning_rdp2_y = get_RDP_xy(path1, path1_aligning_rdp2) #
path2_aligning_rdp2_x, path2_aligning_rdp2_y = get_RDP_xy(path2, path2_aligning_rdp2) #
distance1 = get_distance(path1_rdp2_x, path1_rdp2_y, path1_aligning_rdp2_x, path1_aligning_rdp2_y)
distance2 = get_distance(path2_rdp2_x, path2_rdp2_y, path2_aligning_rdp2_x, path2_aligning_rdp2_y)
if (match_found == 0 and abs(distance1 - distance2) < matching_criteria[0][2]+5 and
path1_rdp2 != path1_aligning_rdp2 and path2_rdp2 != path2_aligning_rdp2 and
path1_rdp2_x != path1_aligning_rdp2_x and path2_rdp2_x != path2_aligning_rdp2_x ):
path1_angle = np.arctan( (path1_rdp2_y-path1_aligning_rdp2_y) / (path1_rdp2_x-path1_aligning_rdp2_x) )
path2_angle = np.arctan( (path2_rdp2_y-path2_aligning_rdp2_y) / (path2_rdp2_x-path2_aligning_rdp2_x) )
path1.rotate_path(path1_angle)
path2.rotate_path(path2_angle) # rotate the paths to the same angle
path1_aligning_rdp2_x, path1_aligning_rdp2_y = get_RDP_xy(path1, path1_aligning_rdp2) #
path2_aligning_rdp2_x, path2_aligning_rdp2_y = get_RDP_xy(path2, path2_aligning_rdp2) #
# if the x signs values of our aligning points don't match, flip the x of number 2
if ( np.sign(path1_aligning_rdp2_x) != np.sign(path2_aligning_rdp2_x) ):
path2.flip_x_coords()
for rotation in range(0,2):
if ( rotation== 1 or rotation== 3): # on the second loop, flip the y coordinates of the second path
path2.flip_y_coords()
close_count = 0
good_angle_found = 0
close_list = []
close_list2 = []
close_list3 = []
for cnt, path1_angle in enumerate(path1_matching_angle_list):
path2_angle = path2_matching_angle_list[cnt] # find the MSE error between all of our points
path1_angle_degrees = path1.angles[path1_angle][0]
path2_angle_degrees = path2.angles[path2_angle][0]
angle_diff = abs(path1_angle_degrees - path2_angle_degrees)
distance_criteria = 30.0 # initially assume it needs to be within 10 meters
vector_criteria = 6.0 # assume it needs to be within 1 degrees
for criteria in matching_criteria:
if (angle_diff <= criteria[0]): # if the angle is less than the criteria, assign the distance and vector criteria
vector_criteria = criteria[1]
distance_criteria = criteria[2]
break
path1_test_rdp1, path1_test_rdp2, path1_test_rdp3 = get_RDP_point_from_angle(path1, path1_angle)
path2_test_rdp1, path2_test_rdp2, path2_test_rdp3 = get_RDP_point_from_angle(path2, path2_angle)
# get the location of the center points of the angle
path1_test_rdp2_x, path1_test_rdp2_y = get_RDP_xy(path1, path1_test_rdp2)
path2_test_rdp2_x, path2_test_rdp2_y = get_RDP_xy(path2, path2_test_rdp2)
# see how close the center points are
distance_off = get_distance(path1_test_rdp2_x, path1_test_rdp2_y, path2_test_rdp2_x, path2_test_rdp2_y)
# see how many points are close to matching, but make sure not to double count any
if ( distance_off < distance_criteria and path1_test_rdp2 not in close_list and path2_test_rdp2 not in close_list2):
if (path1_test_rdp1 < path1_test_rdp2):
path1_test_rdp1 = path1_test_rdp2 - 1
path1_test_rdp3 = path1_test_rdp2 + 1
else:
path1_test_rdp1 = path1_test_rdp2 + 1
path1_test_rdp3 = path1_test_rdp2 - 1
if (path2_test_rdp1 < path2_test_rdp2):
path2_test_rdp1 = path2_test_rdp2 - 1
path2_test_rdp3 = path2_test_rdp2 + 1
else:
path2_test_rdp1 = path2_test_rdp2 + 1
path2_test_rdp3 = path2_test_rdp2 - 1
# the the location of the rdp points adjacent to the center for each angle, to calculate vectors
path1_test_rdp1_x, path1_test_rdp1_y = get_RDP_xy(path1, path1_test_rdp1)
path1_test_rdp3_x, path1_test_rdp3_y = get_RDP_xy(path1, path1_test_rdp3)
path2_test_rdp1_x, path2_test_rdp1_y = get_RDP_xy(path2, path2_test_rdp1)
path2_test_rdp3_x, path2_test_rdp3_y = get_RDP_xy(path2, path2_test_rdp3)
# get the unit vectors for the path
path1_vector1 = [ path1_test_rdp2_x - path1_test_rdp1_x, path1_test_rdp2_y - path1_test_rdp1_y]
path1_vector2 = [ path1_test_rdp2_x - path1_test_rdp3_x, path1_test_rdp2_y - path1_test_rdp3_y]
path2_vector1 = [ path2_test_rdp2_x - path2_test_rdp1_x, path2_test_rdp2_y - path2_test_rdp1_y]
path2_vector2 = [ path2_test_rdp2_x - path2_test_rdp3_x, path2_test_rdp2_y - path2_test_rdp3_y]
# get the angle between path1 vector1 and path2 vector1 and 2
# and the angle between path2 vector2 and path2 vector1 and 2
angle1_1 = angle_between(path1_vector1, path2_vector1) * 57.2957795130823 # the angle of the angle in degrees
angle2_1 = angle_between(path1_vector2, path2_vector1) * 57.2957795130823 # the angle of the angle in degrees
angle1_2 = angle_between(path1_vector1, path2_vector2) * 57.2957795130823 # the angle of the angle in degrees
angle2_2 = angle_between(path1_vector2, path2_vector2) * 57.2957795130823 # the angle of the angle in degrees
not_a_match=1
# see if the first vector and the vector from path 2 are mostly aligned
if ( angle1_1 < vector_criteria or angle1_1 > (180-vector_criteria) or angle1_2 < vector_criteria or angle1_2 > (180-vector_criteria)):
# see if the second vector from path1 is mostly aligned with a vector from path 1
if ( angle2_1 < vector_criteria or angle2_1 > (180-vector_criteria) or angle2_2 < vector_criteria or angle2_2 > (180-vector_criteria)):
not_a_match=0 # this is a good enough match to continue
if (not_a_match ==0): # if the vectors are properly aligned
close_count += 1
close_list.append( path1_test_rdp2)
close_list2.append( path2_test_rdp2)
close_list3.append( [path1_test_rdp2, path2_test_rdp2] )
if (path1_angle_degrees < 135): # look for angles that aren't completely flat
good_angle_found =1
#if (path1_angle_degrees < 160): # look for angles that aren't completely flat
# good_angle_found_2 = 1
#if ( angle1_1 > 6 and angle1_1 < (180-6) and angle1_2 > 6 and angle1_2 < (180-6)):
# good_distance = 0
if ( close_count >= 3): # hold onto the lowest error case
#close_list3.sort()
#matching_distance_count = 0
#for rdp_cnt in range(0,len(close_list3)-1):
# rdp1_1 = close_list3[rdp_cnt][0] # get the path distance betwee these points
# rdp1_2 = close_list3[rdp_cnt+1][0]
#
# rdp2_1 = close_list3[rdp_cnt][1]
# rdp2_2 = close_list3[rdp_cnt+1][1]
#
# route_distance1 = path1.get_route_distance(int(path1.feature_loc[rdp1_1,2]), int(path1.feature_loc[rdp1_2,2]))
# route_distance2 = path2.get_route_distance(int(path2.feature_loc[rdp2_1,2]), int(path2.feature_loc[rdp2_2,2]))
#
# max_distance = max(route_distance1,route_distance2)
# min_distance = min(route_distance1,route_distance2)
#
# if (max_distance/min_distance < 1.25 or max_distance-min_distance < 20):
# matching_distance_count+=1
#
#if (matching_distance_count < 2):
# path1.print_flag = 1
matching_distance_count = 0
diff1 = max(close_list) - min(close_list)
diff2 = max(close_list2) - min(close_list2)
if (close_count >=5 or good_angle_found==1 or diff1 > 5 or diff2>5):
close_list3.sort()
matching_distance_count = 0
#print(path1.routeid, path2.routeid)
for rdp_cnt in range(0,len(close_list3)-1):
rdp1_1 = close_list3[rdp_cnt][0] # get the path distance betwee these points
rdp1_2 = close_list3[rdp_cnt+1][0]
rdp2_1 = close_list3[rdp_cnt][1]
rdp2_2 = close_list3[rdp_cnt+1][1]
#route_distance1 = path1.get_route_distance(int(path1.feature_loc[rdp1_1,2]), int(path1.feature_loc[rdp1_2,2]))
#route_distance2 = path2.get_route_distance(int(path2.feature_loc[rdp2_1,2]), int(path2.feature_loc[rdp2_2,2]))
path1_segment_start = int(path1.feature_loc[rdp1_1,2])
path1_segment_end = int(path1.feature_loc[rdp1_2,2])
path2_segment_start = int(path2.feature_loc[rdp2_1,2])
path2_segment_end = int(path2.feature_loc[rdp2_2,2])
max_distance = 0
max_distance = search_matches.max_distance_between_segments(path1.route, path2.route, path1_segment_start, path1_segment_end, \
path2_segment_start, path2_segment_end)
#print("Max distance is ",max_distance)
if ( max_distance < 18):
matching_distance_count+=1
#if (matching_distance_count < 2):
# path1.print_flag = 1
if (matching_distance_count >= 2):
# the current RDP has a problem with matching up gentle curves
# to combat this, we will look for either, 4 matching points, or 1 point with a sharp enough turn
# which I am starting to SWAG at 145 degrees, or that the three matching RDP points aren't all in a row
# for either path1 or path2
if (close_count >=5 or good_angle_found==1): # if we have at least 4 matches, or 1 of them was a good angle, count it
match_found = 1
#if (good_distance ==0):
# path1.print_flag = 1
# #print("here1")
return match_found
else:
diff1 = max(close_list) - min(close_list)
diff2 = max(close_list2) - min(close_list2)
if (diff1 > 5 or diff2>5): # if all of the RDP points aren't sequential then count it
match_found = 1
#if (good_distance ==0):
# path1.print_flag = 1
# #print("here2")
return match_found
return match_found
#********************
#**** this aligns and orients two matching paths the same before plotting and saving them two a file for viewing
#********************
def align_two_paths(path1, path2,driver_id,rdp_tolerance):
path1_matching_angle_list = path1.comparison[-1][3:]
path2_matching_angle_list = path2.comparison[-1][3:]
# loop through each of the angles that was a good match, and see which one makes the lowest error when they are aligned
match_found = 0
for cnt, angle1 in enumerate(path1_matching_angle_list):
angle2 = path2_matching_angle_list[cnt]
if (match_found ==0):
match_found = align_and_score_two_paths(path1, path2, angle1, angle2, path1_matching_angle_list, path2_matching_angle_list )
#print ("here2")
#print("match_found is ",match_found)
if (match_found == 1):
# if one path is a lot longer than the other, zoom in on the shorter one
#if (path1.distance < path2.distance / 5.0 or path2.distance < path1.distance / 5.0):
x1_max = np.amax ( path1.route[:,0] )
x1_min = np.amin ( path1.route[:,0] )
x2_max = np.amax ( path2.route[:,0] )
x2_min = np.amin ( path2.route[:,0] )
y1_max = np.amax ( path1.route[:,1] )
y1_min = np.amin ( path1.route[:,1] )
y2_max = np.amax ( path2.route[:,1] )
y2_min = np.amin ( path2.route[:,1] )
x_upper_bound = min( x1_max, x2_max) + 500
x_lower_bound = max( x1_min, x2_min) - 500
y_upper_bound = min( y1_max, y2_max) + 500
y_lower_bound = max( y1_min, y2_min) - 500
x_upper_bound2 = min( x1_max + 250, x2_max + 250, 1000)
x_lower_bound2 = max( x1_min - 250, x2_min - 250, -1000)
y_upper_bound2 = min( y1_max + 250, y2_max + 250, 1000)
y_lower_bound2 = max( y1_min - 250, y2_min - 250, -1000)
plt.figure()
plt.plot(path1.route[:,0],path1.route[:,1],markersize=2.0)
plt.plot(path2.route[:,0],path2.route[:,1],markersize=2.0)
feature_list1 = []
feature_list2 = []
for cnt, path1_angle in enumerate(path1_matching_angle_list):
path2_angle = path2_matching_angle_list[cnt] # find the MSE error between all of our points
path1_test_rdp1, path1_test_rdp2, path1_test_rdp3 = get_RDP_point_from_angle(path1, path1_angle)
path2_test_rdp1, path2_test_rdp2, path2_test_rdp3 = get_RDP_point_from_angle(path2, path2_angle)
path1_test_rdp2_x, path1_test_rdp2_y = get_RDP_xy(path1, path1_test_rdp2)
path2_test_rdp2_x, path2_test_rdp2_y = get_RDP_xy(path2, path2_test_rdp2)
feature_list1.append( [path1_test_rdp2_x, path1_test_rdp2_y] )
feature_list2.append( [path2_test_rdp2_x, path2_test_rdp2_y] )
# #* Temporary
path1.update_feature_loc()
path2.update_feature_loc()
path1_features = path1.feature_loc[:,0:2]
path2_features = path2.feature_loc[:,0:2]
#plt.scatter(path1_features[:,0],path1_features[:,1])
#plt.scatter(path2_features[:,0],path2_features[:,1])
# #* Temporary
#file1 = open("test1.csv",'wb')
#file1_csv = csv.writer(file1)
#for angle in path1.angles:
# file1_csv.writerow(angle)
#file1.close()
#file2 = open("test2.csv",'wb')
#file2_csv = csv.writer(file2)
#for angle in path2.angles:
# file2_csv.writerow(angle)
#file2.close()
feature_list1 = np.array(feature_list1)
plt.scatter(feature_list1[:,0],feature_list1[:,1],c='red')
feature_list2 = np.array(feature_list2)
plt.scatter(feature_list2[:,0],feature_list2[:,1],c='red')
plt.show()
#print ("here 3")
# if one path is a lot longer than the other, zoom in on the shorter one
if (path1.distance < path2.distance / 5.0 or path2.distance < path1.distance / 5.0):
plt.axis( (x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound) )
#else:
# plt.axis( (x_lower_bound2, x_upper_bound2, y_lower_bound2, y_upper_bound2) )
#plt.show()
plt.savefig("Test_Set\\Driver_" + str(driver_id)+"_" + str(path1.routeid) + "__" + str(path2.routeid) +"__"+ str(rdp_tolerance)+"m.png")
#plt.savefig("Test_Set\\Driver_1_" + str(path2.routeid) + "__" + str(path1.routeid) +".png")
plt.close()
return
#********************
#**** end aligns and orients two matching paths the same before plotting and saving them two a file for viewing
#********************
|
[
"search_matches.match_angles",
"matplotlib.pyplot.show",
"numpy.amin",
"numpy.subtract",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.amax",
"matplotlib.pyplot.figure",
"search_matches.max_distance_between_segments",
"numpy.array",
"numpy.sign",
"numpy.arctan"
] |
[((1257, 1353), 'search_matches.match_angles', 'search_matches.match_angles', (['path1_angles', 'path2_angles', 'angle_tolerance', 'distance_tolerance'], {}), '(path1_angles, path2_angles, angle_tolerance,\n distance_tolerance)\n', (1284, 1353), False, 'import search_matches\n'), ((9746, 9784), 'numpy.array', 'np.array', (['[path1_rdp2_x, path1_rdp2_y]'], {}), '([path1_rdp2_x, path1_rdp2_y])\n', (9754, 9784), True, 'import numpy as np\n'), ((9803, 9840), 'numpy.subtract', 'np.subtract', (['path1.route', 'index_array'], {}), '(path1.route, index_array)\n', (9814, 9840), True, 'import numpy as np\n'), ((9884, 9922), 'numpy.array', 'np.array', (['[path2_rdp2_x, path2_rdp2_y]'], {}), '([path2_rdp2_x, path2_rdp2_y])\n', (9892, 9922), True, 'import numpy as np\n'), ((9941, 9978), 'numpy.subtract', 'np.subtract', (['path2.route', 'index_array'], {}), '(path2.route, index_array)\n', (9952, 9978), True, 'import numpy as np\n'), ((24342, 24368), 'numpy.amax', 'np.amax', (['path1.route[:, 0]'], {}), '(path1.route[:, 0])\n', (24349, 24368), True, 'import numpy as np\n'), ((24387, 24413), 'numpy.amin', 'np.amin', (['path1.route[:, 0]'], {}), '(path1.route[:, 0])\n', (24394, 24413), True, 'import numpy as np\n'), ((24432, 24458), 'numpy.amax', 'np.amax', (['path2.route[:, 0]'], {}), '(path2.route[:, 0])\n', (24439, 24458), True, 'import numpy as np\n'), ((24477, 24503), 'numpy.amin', 'np.amin', (['path2.route[:, 0]'], {}), '(path2.route[:, 0])\n', (24484, 24503), True, 'import numpy as np\n'), ((24531, 24557), 'numpy.amax', 'np.amax', (['path1.route[:, 1]'], {}), '(path1.route[:, 1])\n', (24538, 24557), True, 'import numpy as np\n'), ((24576, 24602), 'numpy.amin', 'np.amin', (['path1.route[:, 1]'], {}), '(path1.route[:, 1])\n', (24583, 24602), True, 'import numpy as np\n'), ((24621, 24647), 'numpy.amax', 'np.amax', (['path2.route[:, 1]'], {}), '(path2.route[:, 1])\n', (24628, 24647), True, 'import numpy as np\n'), ((24666, 24692), 'numpy.amin', 'np.amin', (['path2.route[:, 1]'], {}), '(path2.route[:, 1])\n', (24673, 24692), True, 'import numpy as np\n'), ((25220, 25232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (25230, 25232), True, 'import matplotlib.pyplot as plt\n'), ((25240, 25302), 'matplotlib.pyplot.plot', 'plt.plot', (['path1.route[:, 0]', 'path1.route[:, 1]'], {'markersize': '(2.0)'}), '(path1.route[:, 0], path1.route[:, 1], markersize=2.0)\n', (25248, 25302), True, 'import matplotlib.pyplot as plt\n'), ((25306, 25368), 'matplotlib.pyplot.plot', 'plt.plot', (['path2.route[:, 0]', 'path2.route[:, 1]'], {'markersize': '(2.0)'}), '(path2.route[:, 0], path2.route[:, 1], markersize=2.0)\n', (25314, 25368), True, 'import matplotlib.pyplot as plt\n'), ((26892, 26915), 'numpy.array', 'np.array', (['feature_list1'], {}), '(feature_list1)\n', (26900, 26915), True, 'import numpy as np\n'), ((26923, 26985), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_list1[:, 0]', 'feature_list1[:, 1]'], {'c': '"""red"""'}), "(feature_list1[:, 0], feature_list1[:, 1], c='red')\n", (26934, 26985), True, 'import matplotlib.pyplot as plt\n'), ((27013, 27036), 'numpy.array', 'np.array', (['feature_list2'], {}), '(feature_list2)\n', (27021, 27036), True, 'import numpy as np\n'), ((27044, 27106), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_list2[:, 0]', 'feature_list2[:, 1]'], {'c': '"""red"""'}), "(feature_list2[:, 0], feature_list2[:, 1], c='red')\n", (27055, 27106), True, 'import matplotlib.pyplot as plt\n'), ((27115, 27125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27123, 27125), True, 'import matplotlib.pyplot as plt\n'), ((27817, 27828), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27826, 27828), True, 'import matplotlib.pyplot as plt\n'), ((11578, 11672), 'numpy.arctan', 'np.arctan', (['((path1_rdp2_y - path1_aligning_rdp2_y) / (path1_rdp2_x -\n path1_aligning_rdp2_x))'], {}), '((path1_rdp2_y - path1_aligning_rdp2_y) / (path1_rdp2_x -\n path1_aligning_rdp2_x))\n', (11587, 11672), True, 'import numpy as np\n'), ((11691, 11785), 'numpy.arctan', 'np.arctan', (['((path2_rdp2_y - path2_aligning_rdp2_y) / (path2_rdp2_x -\n path2_aligning_rdp2_x))'], {}), '((path2_rdp2_y - path2_aligning_rdp2_y) / (path2_rdp2_x -\n path2_aligning_rdp2_x))\n', (11700, 11785), True, 'import numpy as np\n'), ((27355, 27425), 'matplotlib.pyplot.axis', 'plt.axis', (['(x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound)'], {}), '((x_lower_bound, x_upper_bound, y_lower_bound, y_upper_bound))\n', (27363, 27425), True, 'import matplotlib.pyplot as plt\n'), ((12251, 12281), 'numpy.sign', 'np.sign', (['path1_aligning_rdp2_x'], {}), '(path1_aligning_rdp2_x)\n', (12258, 12281), True, 'import numpy as np\n'), ((12285, 12315), 'numpy.sign', 'np.sign', (['path2_aligning_rdp2_x'], {}), '(path2_aligning_rdp2_x)\n', (12292, 12315), True, 'import numpy as np\n'), ((21339, 21497), 'search_matches.max_distance_between_segments', 'search_matches.max_distance_between_segments', (['path1.route', 'path2.route', 'path1_segment_start', 'path1_segment_end', 'path2_segment_start', 'path2_segment_end'], {}), '(path1.route, path2.route,\n path1_segment_start, path1_segment_end, path2_segment_start,\n path2_segment_end)\n', (21383, 21497), False, 'import search_matches\n')]
|
import os
import glob
from shutil import copy2
from PIL import Image
import json
import numpy as np
import argparse
import shutil
from skimage import io
from tqdm import tqdm
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def copy_file(src, dst):
if os.path.exists(dst):
os.rmdir(dst)
shutil.copytree(src, dst)
def construct_box(inst_root, label_root, dst):
inst_list = os.listdir(inst_root)
cls_list = os.listdir(label_root)
for inst, cls in zip(*(inst_list, cls_list)):
inst_map = Image.open(os.path.join(inst_root, inst))
# inst_map = Image.open(inst)
inst_map = np.array(inst_map, dtype=np.int32)
cls_map = Image.open(os.path.join(label_root, cls))
# cls_map = Image.open(cls)
cls_map = np.array(cls_map, dtype=np.int32)
H, W = inst_map.shape
# get a list of unique instances
inst_info = {'imgHeight':H, 'imgWidth':W, 'objects':{}}
inst_ids = np.unique(inst_map)
for iid in inst_ids:
if int(iid) <=0: # filter out non-instance masks
continue
ys,xs = np.where(inst_map==iid)
ymin, ymax, xmin, xmax = \
ys.min(), ys.max(), xs.min(), xs.max()
cls_label = np.median(cls_map[inst_map==iid])
inst_info['objects'][str(iid)] = {'bbox': [xmin, ymin, xmax, ymax], 'cls':int(cls_label)}
# write a file to path
filename = os.path.splitext(os.path.basename(inst))[0]
savename = os.path.join(dst, filename + '.json')
with open(savename, 'w') as f:
json.dump(inst_info, f, cls=NpEncoder)
print('wrote a bbox summary of %s to %s' % (inst, savename))
def copy_label(src_path, dst_path1, dst_path2):
for img_name in tqdm(os.listdir(src_path)):
if '.png' in img_name:
img = io.imread(os.path.join(src_path, img_name))
img[img == 255] = 30
io.imsave(os.path.join(dst_path1, img_name), img)
img = img.astype('uint16')
img[img == 30] = 30*1000
io.imsave(os.path.join(dst_path2, img_name), img)
def process_files(source_base_path, target_base_pth, subset, COCO_path):
dst_path = {}
for name in ['img','label','inst','bbox']:
cur_path = os.path.join(target_base_pth, subset + '_' + name)
if not os.path.exists(cur_path):
os.makedirs(cur_path)
dst_path[name] = cur_path
print('process label and inst copy')
copy_label(source_base_path, dst_path['label'], dst_path['inst'])
### copy_file(dst_path['label'], dst_path['inst'])
print('process img copy')
if COCO_path:
copy_img_file(source_base_path, dst_path['img'], COCO_path+'/'+subset+'2017')
construct_box(dst_path['inst'], dst_path['label'], dst_path['bbox'])
def copy_img_file(source_base_path, target_base_path, COCO_path):
print({target_base_path})
for filepath in tqdm(os.listdir(source_base_path)):
if ('.png' in filepath) or ('.jpg' in filepath):
basename = os.path.basename(filepath).split('.')[0]
filename = basename.split('_')[0]
indexid = basename.split('_')[1]
if os.path.isfile(COCO_path + '/' + filename + '.jpg'):
os.symlink(COCO_path + '/' + filename + '.jpg', target_base_path + '/' + filename+'_'+indexid+'.jpg')
else:
print('File %s.jpg not Found. Please check mannually.' %filename)
# organize image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='List the content of a folder')
parser.add_argument('-s', '--subset', help='class for training the model', type=str)
parser.add_argument('-d', '--datapath',default='/home/yam28/Documents/phdYoop/datasets/COCO', type=str)
args = parser.parse_args()
source_base_path_train = 'dataset/train/' + args.subset
source_base_path_train_aug = 'dataset/train/' + args.subset+'_silvia'
source_base_path_valid = 'dataset/val/' + args.subset
target_base_pth = 'datasets/stamp_' + args.subset + '_aug'
COCO_path = args.datapath
process_files(source_base_path_train_aug, target_base_pth, 'train', None)
|
[
"json.dump",
"shutil.copytree",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.median",
"os.path.basename",
"os.path.exists",
"os.path.isfile",
"numpy.where",
"numpy.array",
"os.rmdir",
"os.symlink",
"os.path.join",
"os.listdir",
"numpy.unique"
] |
[((558, 577), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (572, 577), False, 'import os\n'), ((605, 630), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (620, 630), False, 'import shutil\n'), ((696, 717), 'os.listdir', 'os.listdir', (['inst_root'], {}), '(inst_root)\n', (706, 717), False, 'import os\n'), ((733, 755), 'os.listdir', 'os.listdir', (['label_root'], {}), '(label_root)\n', (743, 755), False, 'import os\n'), ((3839, 3906), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""List the content of a folder"""'}), "(description='List the content of a folder')\n", (3862, 3906), False, 'import argparse\n'), ((587, 600), 'os.rmdir', 'os.rmdir', (['dst'], {}), '(dst)\n', (595, 600), False, 'import os\n'), ((924, 958), 'numpy.array', 'np.array', (['inst_map'], {'dtype': 'np.int32'}), '(inst_map, dtype=np.int32)\n', (932, 958), True, 'import numpy as np\n'), ((1073, 1106), 'numpy.array', 'np.array', (['cls_map'], {'dtype': 'np.int32'}), '(cls_map, dtype=np.int32)\n', (1081, 1106), True, 'import numpy as np\n'), ((1261, 1280), 'numpy.unique', 'np.unique', (['inst_map'], {}), '(inst_map)\n', (1270, 1280), True, 'import numpy as np\n'), ((1812, 1849), 'os.path.join', 'os.path.join', (['dst', "(filename + '.json')"], {}), "(dst, filename + '.json')\n", (1824, 1849), False, 'import os\n'), ((2083, 2103), 'os.listdir', 'os.listdir', (['src_path'], {}), '(src_path)\n', (2093, 2103), False, 'import os\n'), ((2591, 2641), 'os.path.join', 'os.path.join', (['target_base_pth', "(subset + '_' + name)"], {}), "(target_base_pth, subset + '_' + name)\n", (2603, 2641), False, 'import os\n'), ((3247, 3275), 'os.listdir', 'os.listdir', (['source_base_path'], {}), '(source_base_path)\n', (3257, 3275), False, 'import os\n'), ((836, 865), 'os.path.join', 'os.path.join', (['inst_root', 'inst'], {}), '(inst_root, inst)\n', (848, 865), False, 'import os\n'), ((988, 1017), 'os.path.join', 'os.path.join', (['label_root', 'cls'], {}), '(label_root, cls)\n', (1000, 1017), False, 'import os\n'), ((1417, 1442), 'numpy.where', 'np.where', (['(inst_map == iid)'], {}), '(inst_map == iid)\n', (1425, 1442), True, 'import numpy as np\n'), ((1563, 1598), 'numpy.median', 'np.median', (['cls_map[inst_map == iid]'], {}), '(cls_map[inst_map == iid])\n', (1572, 1598), True, 'import numpy as np\n'), ((1901, 1939), 'json.dump', 'json.dump', (['inst_info', 'f'], {'cls': 'NpEncoder'}), '(inst_info, f, cls=NpEncoder)\n', (1910, 1939), False, 'import json\n'), ((2657, 2681), 'os.path.exists', 'os.path.exists', (['cur_path'], {}), '(cur_path)\n', (2671, 2681), False, 'import os\n'), ((2695, 2716), 'os.makedirs', 'os.makedirs', (['cur_path'], {}), '(cur_path)\n', (2706, 2716), False, 'import os\n'), ((3505, 3556), 'os.path.isfile', 'os.path.isfile', (["(COCO_path + '/' + filename + '.jpg')"], {}), "(COCO_path + '/' + filename + '.jpg')\n", (3519, 3556), False, 'import os\n'), ((1766, 1788), 'os.path.basename', 'os.path.basename', (['inst'], {}), '(inst)\n', (1782, 1788), False, 'import os\n'), ((2165, 2197), 'os.path.join', 'os.path.join', (['src_path', 'img_name'], {}), '(src_path, img_name)\n', (2177, 2197), False, 'import os\n'), ((2254, 2287), 'os.path.join', 'os.path.join', (['dst_path1', 'img_name'], {}), '(dst_path1, img_name)\n', (2266, 2287), False, 'import os\n'), ((2392, 2425), 'os.path.join', 'os.path.join', (['dst_path2', 'img_name'], {}), '(dst_path2, img_name)\n', (2404, 2425), False, 'import os\n'), ((3574, 3685), 'os.symlink', 'os.symlink', (["(COCO_path + '/' + filename + '.jpg')", "(target_base_path + '/' + filename + '_' + indexid + '.jpg')"], {}), "(COCO_path + '/' + filename + '.jpg', target_base_path + '/' +\n filename + '_' + indexid + '.jpg')\n", (3584, 3685), False, 'import os\n'), ((3358, 3384), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (3374, 3384), False, 'import os\n')]
|
import torch
import scipy.fft
import numpy as np
from functools import lru_cache
@lru_cache()
def compute_dct_mat(n: int, device: str, dtype: torch.dtype) -> torch.Tensor:
m = scipy.fft.dct(np.eye(n), norm="ortho")
return torch.tensor(m, device=device, dtype=dtype)
@lru_cache()
def compute_idct_mat(n: int, device: str, dtype: torch.dtype) -> torch.Tensor:
m = scipy.fft.idct(np.eye(n), norm="ortho")
return torch.tensor(m, device=device, dtype=dtype)
def dct(t: torch.Tensor) -> torch.Tensor:
m = compute_dct_mat(t.shape[-2], device=t.device, dtype=t.dtype)
return torch.einsum("...id,ij->jd", t, m)
def idct(t: torch.Tensor) -> torch.Tensor:
m = compute_idct_mat(t.shape[-2], device=t.device, dtype=t.dtype)
return torch.einsum("...id,ij->jd", t, m)
def dct2(t: torch.Tensor) -> torch.Tensor:
h, w = t.shape[-2:]
mh = compute_dct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_dct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...hw,hi,wj->...ij", t, mh, mw)
def idct2(t: torch.Tensor) -> torch.Tensor:
h, w = t.shape[-2:]
mh = compute_idct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_idct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...hw,hi,wj->...ij", t, mh, mw)
def dct3(t: torch.Tensor) -> torch.Tensor:
l, h, w = t.shape[-3:]
ml = compute_dct_mat(l, device=t.device, dtype=t.dtype)
mh = compute_dct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_dct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...lhw,li,hj,wk->...ijk", t, ml, mh, mw)
def idct3(t: torch.Tensor) -> torch.Tensor:
l, h, w = t.shape[-3:]
ml = compute_idct_mat(l, device=t.device, dtype=t.dtype)
mh = compute_idct_mat(h, device=t.device, dtype=t.dtype)
mw = compute_idct_mat(w, device=t.device, dtype=t.dtype)
return torch.einsum("...lhw,li,hj,wk->...ijk", t, ml, mh, mw)
|
[
"torch.einsum",
"functools.lru_cache",
"numpy.eye",
"torch.tensor"
] |
[((84, 95), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (93, 95), False, 'from functools import lru_cache\n'), ((279, 290), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (288, 290), False, 'from functools import lru_cache\n'), ((232, 275), 'torch.tensor', 'torch.tensor', (['m'], {'device': 'device', 'dtype': 'dtype'}), '(m, device=device, dtype=dtype)\n', (244, 275), False, 'import torch\n'), ((429, 472), 'torch.tensor', 'torch.tensor', (['m'], {'device': 'device', 'dtype': 'dtype'}), '(m, device=device, dtype=dtype)\n', (441, 472), False, 'import torch\n'), ((597, 631), 'torch.einsum', 'torch.einsum', (['"""...id,ij->jd"""', 't', 'm'], {}), "('...id,ij->jd', t, m)\n", (609, 631), False, 'import torch\n'), ((758, 792), 'torch.einsum', 'torch.einsum', (['"""...id,ij->jd"""', 't', 'm'], {}), "('...id,ij->jd', t, m)\n", (770, 792), False, 'import torch\n'), ((993, 1038), 'torch.einsum', 'torch.einsum', (['"""...hw,hi,wj->...ij"""', 't', 'mh', 'mw'], {}), "('...hw,hi,wj->...ij', t, mh, mw)\n", (1005, 1038), False, 'import torch\n'), ((1242, 1287), 'torch.einsum', 'torch.einsum', (['"""...hw,hi,wj->...ij"""', 't', 'mh', 'mw'], {}), "('...hw,hi,wj->...ij', t, mh, mw)\n", (1254, 1287), False, 'import torch\n'), ((1551, 1605), 'torch.einsum', 'torch.einsum', (['"""...lhw,li,hj,wk->...ijk"""', 't', 'ml', 'mh', 'mw'], {}), "('...lhw,li,hj,wk->...ijk', t, ml, mh, mw)\n", (1563, 1605), False, 'import torch\n'), ((1873, 1927), 'torch.einsum', 'torch.einsum', (['"""...lhw,li,hj,wk->...ijk"""', 't', 'ml', 'mh', 'mw'], {}), "('...lhw,li,hj,wk->...ijk', t, ml, mh, mw)\n", (1885, 1927), False, 'import torch\n'), ((196, 205), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (202, 205), True, 'import numpy as np\n'), ((393, 402), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (399, 402), True, 'import numpy as np\n')]
|
import numpy as np
def day_1(file: str):
"""Read in day 1 part 1 input and count increasing values"""
with open(file) as f:
data_in = f.read()
# convert data to float
data = [float(i) for i in data_in.split()]
# Part 1
print(sum(np.diff(np.array(data)) > 0))
# Part 2
convolution = []
for i in range(len(data) - 2):
convolution.append(data[i] + data[i + 1] + data[i + 2])
print(sum(np.diff(convolution) > 0))
pass
def main():
day_1("inputs/day_1_1.txt")
if __name__ == "__main__":
main()
|
[
"numpy.diff",
"numpy.array"
] |
[((443, 463), 'numpy.diff', 'np.diff', (['convolution'], {}), '(convolution)\n', (450, 463), True, 'import numpy as np\n'), ((272, 286), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (280, 286), True, 'import numpy as np\n')]
|
from util import get_dataset
from sklearn.metrics import classification_report
import numpy as np
from keras.layers import Dense, Input, concatenate
from keras.models import Model
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def build_model(bert_dim=768, profile_dim=32):
'''
bert network
'''
bert_input = Input(shape=(bert_dim,))
bert_output = Dense(256, activation='relu')(bert_input)
bert_output = Dense(256, activation='relu')(bert_output)
bert_output = Dense(256, activation='relu')(bert_output)
bert_output = Dense(32, activation='relu')(bert_output)
'''
input for profile network
'''
profile_input = Input(shape=(profile_dim,))
'''
model for combined features
'''
x = concatenate([profile_input, bert_output])
output = Dense(32, activation='relu')(x)
output = Dense(16, activation='relu')(output)
output = Dense(1, activation='sigmoid')(output)
model = Model(inputs=[profile_input, bert_input], outputs=[output])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc', f1_m])
return model
if __name__ == "__main__":
cross_val = None
for i in range(5):
'''
get data with bert embeddings
'''
train_x, train_y, test_x, test_y = get_dataset('bert')
'''
build neural network model
'''
model = build_model()
model.summary()
train_split = np.hsplit(train_x, np.array([32, 800]))[:2]
test_split = np.hsplit(test_x, np.array([32, 800]))[:2]
model.fit(x=train_split, y=train_y, batch_size=32, shuffle=True, epochs=100)
if cross_val == None:
cross_val = model.evaluate(test_split, test_y)
else:
cross_val += model.evaluate(test_split, test_y)
print([metric/5 for metric in cross_val])
model.save('bert_sent_parallel_cross_val.h5')
|
[
"keras.backend.epsilon",
"util.get_dataset",
"keras.models.Model",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Input",
"keras.layers.concatenate",
"keras.backend.clip"
] |
[((988, 1012), 'keras.layers.Input', 'Input', ([], {'shape': '(bert_dim,)'}), '(shape=(bert_dim,))\n', (993, 1012), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1326, 1353), 'keras.layers.Input', 'Input', ([], {'shape': '(profile_dim,)'}), '(shape=(profile_dim,))\n', (1331, 1353), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1415, 1456), 'keras.layers.concatenate', 'concatenate', (['[profile_input, bert_output]'], {}), '([profile_input, bert_output])\n', (1426, 1456), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1617, 1676), 'keras.models.Model', 'Model', ([], {'inputs': '[profile_input, bert_input]', 'outputs': '[output]'}), '(inputs=[profile_input, bert_input], outputs=[output])\n', (1622, 1676), False, 'from keras.models import Model\n'), ((1031, 1060), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1036, 1060), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1091, 1120), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1096, 1120), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1152, 1181), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1157, 1181), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1213, 1241), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1218, 1241), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1470, 1498), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1475, 1498), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1515, 1543), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (1520, 1543), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1565, 1595), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1570, 1595), False, 'from keras.layers import Dense, Input, concatenate\n'), ((1968, 1987), 'util.get_dataset', 'get_dataset', (['"""bert"""'], {}), "('bert')\n", (1979, 1987), False, 'from util import get_dataset\n'), ((282, 311), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (288, 311), True, 'from keras import backend as K\n'), ((353, 373), 'keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (359, 373), True, 'from keras import backend as K\n'), ((428, 439), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (437, 439), True, 'from keras import backend as K\n'), ((527, 556), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (533, 556), True, 'from keras import backend as K\n'), ((599, 619), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (605, 619), True, 'from keras import backend as K\n'), ((678, 689), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (687, 689), True, 'from keras import backend as K\n'), ((872, 883), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (881, 883), True, 'from keras import backend as K\n'), ((2148, 2167), 'numpy.array', 'np.array', (['[32, 800]'], {}), '([32, 800])\n', (2156, 2167), True, 'import numpy as np\n'), ((2212, 2231), 'numpy.array', 'np.array', (['[32, 800]'], {}), '([32, 800])\n', (2220, 2231), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created in September 2017
@author: mmalekzadeh
"""
import numpy as np
from keras.models import model_from_json
from keras import backend as K
def mcor(y_true, y_pred):
#matthews_correlation
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
### Global Variables ###
## Load Lists of Inferences
all_inferences = np.load("all_inferences.npy")
white_list = np.asarray(all_inferences[0].tolist()[0])
black_list = np.asarray(all_inferences[1].tolist()[0])
gray_list = np.asarray(all_inferences[2].tolist()[0])
num_classes = len(white_list)+len(black_list)+len(gray_list)
## Load Original Test Data of each list
## w -> white, b -> black, g -> gray
o_w_test_data = np.load("data_test_white.npy")
o_b_test_data = np.load("data_test_black.npy")
o_g_test_data = np.load("data_test_gray.npy")
o_w_test_data = np.reshape(o_w_test_data,
(len(o_w_test_data),
o_w_test_data.shape[1],
o_w_test_data.shape[2], 1))
o_b_test_data = np.reshape(o_b_test_data,
(len(o_b_test_data),
o_b_test_data.shape[1],
o_b_test_data.shape[2], 1))
o_g_test_data = np.reshape(o_g_test_data,
(len(o_g_test_data),
o_g_test_data.shape[1],
o_g_test_data.shape[2], 1))
## Load Transformed Test Data of each list
tr_w_test_data = np.load("transformed_w_test_data.npy")
tr_b_test_data = np.load("transformed_b_test_data.npy")
tr_g_test_data = np.load("transformed_g_test_data.npy")
## Load Labels of each list
w_test_label = np.load("label_test_white.npy")
b_test_label = np.load("label_test_black.npy")
g_test_label = np.load("label_test_gray.npy")
## Build one-hot codes for each list
y_white = np.zeros((w_test_label.shape[0], num_classes))
for i in range(w_test_label.shape[0]):
y_white[i, int(w_test_label[i])] = 1
y_black = np.zeros((b_test_label.shape[0], num_classes))
for i in range(b_test_label.shape[0]):
y_black[i, int(b_test_label[i])] = 1
y_gray = np.zeros((g_test_label.shape[0], num_classes))
for i in range(g_test_label.shape[0]):
y_gray[i, int(g_test_label[i])] = 1
# load json and create model
json_file = open('server_cnn_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("server_cnn_model_weights.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy',precision ,recall])
## Evaluate Original Data
o_w_scores = loaded_model.evaluate(o_w_test_data, y_white, verbose=1)
o_b_scores = loaded_model.evaluate(o_b_test_data, y_black, verbose=1)
o_g_scores = loaded_model.evaluate(o_g_test_data, y_gray, verbose=1)
## Predict Original Data
o_w_predict = loaded_model.predict(o_w_test_data, verbose=1)
o_b_predict = loaded_model.predict(o_b_test_data, verbose=1)
o_g_predict = loaded_model.predict(o_g_test_data, verbose=1)
## Evaluate Transformed Data
tr_w_scores = loaded_model.evaluate(tr_w_test_data, y_white, verbose=1)
tr_b_scores = loaded_model.evaluate(tr_b_test_data, y_black, verbose=1)
tr_g_scores = loaded_model.evaluate(tr_g_test_data, y_gray, verbose=1)
## Predict Transformed Data
tr_w_predict = loaded_model.predict(tr_w_test_data, verbose=1)
tr_b_predict = loaded_model.predict(tr_b_test_data, verbose=1)
tr_g_predict = loaded_model.predict(tr_g_test_data, verbose=1)
print("\n ~~~ Result: F1-Socre on Original Data:")
print("\n on white-listed: %.2f%%"%
((2*((o_w_scores[2]*o_w_scores[3])/(o_w_scores[2]+o_w_scores[3])))*100))
print("\n on black-listed %.2f%%"%
((2*((o_b_scores[2]*o_b_scores[3])/(o_b_scores[2]+o_b_scores[3])))*100))
print("\n on gray-listed %.2f%%"%
((2*((o_g_scores[2]*o_g_scores[3])/(o_g_scores[2]+o_g_scores[3])))*100))
print("\n ~~~ Result: F1-Socre on Transformed Data:")
print("\n on white-listed: %.2f%%"%
((2*((tr_w_scores[2]*tr_w_scores[3])/(tr_w_scores[2]+tr_w_scores[3])))*100))
print("\n on black-listed %.2f%%"%
((2*((tr_b_scores[2]*tr_b_scores[3])/(tr_b_scores[2]+tr_b_scores[3])))*100))
print("\n on gray-listed %.2f%%"%
((2*((tr_g_scores[2]*tr_g_scores[3])/(tr_g_scores[2]+tr_g_scores[3])))*100))
#
########### Calculating Confusion Matrix ###########
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.GnBu):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
fontsize=18,
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
o_w_pred = np.argmax(o_w_predict,axis=1)
o_b_pred = np.argmax(o_b_predict,axis=1)
o_g_pred = np.argmax(o_g_predict,axis=1)
tr_w_pred = np.argmax(tr_w_predict,axis=1)
tr_b_pred = np.argmax(tr_b_predict,axis=1)
tr_g_pred = np.argmax(tr_g_predict,axis=1)
w_true = np.zeros(o_w_pred.shape[0])
b_true = np.zeros(o_b_pred.shape[0])
g_true = np.zeros(o_g_pred.shape[0])
for i in range(o_w_pred.shape[0]):
w_true[i]= 1
if o_w_pred[i] in gray_list:
o_w_pred[i]= 2
elif o_w_pred[i] in white_list:
o_w_pred[i]= 1
else:
o_w_pred[i]= 0
for i in range(o_b_pred.shape[0]):
b_true[i]= 0
if o_b_pred[i] in gray_list:
o_b_pred[i]= 2
elif o_b_pred[i] in white_list:
o_b_pred[i]= 1
else:
o_b_pred[i]= 0
for i in range(o_g_pred.shape[0]):
g_true[i]= 2
if o_g_pred[i] in gray_list:
o_g_pred[i]= 2
elif o_g_pred[i] in white_list:
o_g_pred[i]= 1
else:
o_g_pred[i]= 0
for i in range(tr_w_pred.shape[0]):
if tr_w_pred[i] in gray_list:
tr_w_pred[i]= 2
elif tr_w_pred[i] in white_list:
tr_w_pred[i]= 1
else:
tr_w_pred[i]= 0
for i in range(tr_b_pred.shape[0]):
if tr_b_pred[i] in gray_list:
tr_b_pred[i]= 2
elif tr_b_pred[i] in white_list:
tr_b_pred[i]= 1
else:
tr_b_pred[i]= 0
for i in range(tr_g_pred.shape[0]):
if tr_g_pred[i] in gray_list:
tr_g_pred[i]= 2
elif tr_g_pred[i] in white_list:
tr_g_pred[i]= 1
else:
tr_g_pred[i]= 0
class_names =["B", "W", "G"]
ycf_test = np.append(w_true, g_true, axis=0)
ycf_test = np.append(ycf_test, b_true, axis=0)
ycf_o_pred = np.append(o_w_pred, o_g_pred, axis=0)
ycf_o_pred = np.append(ycf_o_pred, o_b_pred, axis=0)
ycf_tr_pred = np.append(tr_w_pred, tr_g_pred, axis=0)
ycf_tr_pred = np.append(ycf_tr_pred, tr_b_pred, axis=0)
## Compute confusion matrix for Original Data
o_cnf_matrix = confusion_matrix(ycf_test, ycf_o_pred)
np.set_printoptions(precision=3)
## Plot non-normalized confusion matrix
plot_confusion_matrix(o_cnf_matrix, classes=class_names, normalize=True,
title='Confusion Matrix of Original Data')
plt.savefig('OCF.pdf',bbox_inches='tight')
plt.gcf().clear()
## Compute confusion matrix for Transformed Data
tr_cnf_matrix = confusion_matrix(ycf_test, ycf_tr_pred)
np.set_printoptions(precision=3)
## Plot non-normalized confusion matrix
plot_confusion_matrix(tr_cnf_matrix, classes=class_names, normalize=True,
title='Confusion Matrix of Transformed Data')
plt.savefig('TrCF.pdf',bbox_inches='tight')
|
[
"matplotlib.pyplot.title",
"numpy.load",
"numpy.argmax",
"keras.backend.epsilon",
"matplotlib.pyplot.tight_layout",
"keras.backend.sqrt",
"numpy.set_printoptions",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.colorbar",
"numpy.append",
"matplotlib.pyplot.xticks",
"keras.backend.clip",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"numpy.zeros",
"keras.backend.sum",
"keras.models.model_from_json",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((1666, 1695), 'numpy.load', 'np.load', (['"""all_inferences.npy"""'], {}), "('all_inferences.npy')\n", (1673, 1695), True, 'import numpy as np\n'), ((2014, 2044), 'numpy.load', 'np.load', (['"""data_test_white.npy"""'], {}), "('data_test_white.npy')\n", (2021, 2044), True, 'import numpy as np\n'), ((2061, 2091), 'numpy.load', 'np.load', (['"""data_test_black.npy"""'], {}), "('data_test_black.npy')\n", (2068, 2091), True, 'import numpy as np\n'), ((2108, 2137), 'numpy.load', 'np.load', (['"""data_test_gray.npy"""'], {}), "('data_test_gray.npy')\n", (2115, 2137), True, 'import numpy as np\n'), ((2792, 2830), 'numpy.load', 'np.load', (['"""transformed_w_test_data.npy"""'], {}), "('transformed_w_test_data.npy')\n", (2799, 2830), True, 'import numpy as np\n'), ((2848, 2886), 'numpy.load', 'np.load', (['"""transformed_b_test_data.npy"""'], {}), "('transformed_b_test_data.npy')\n", (2855, 2886), True, 'import numpy as np\n'), ((2904, 2942), 'numpy.load', 'np.load', (['"""transformed_g_test_data.npy"""'], {}), "('transformed_g_test_data.npy')\n", (2911, 2942), True, 'import numpy as np\n'), ((2987, 3018), 'numpy.load', 'np.load', (['"""label_test_white.npy"""'], {}), "('label_test_white.npy')\n", (2994, 3018), True, 'import numpy as np\n'), ((3034, 3065), 'numpy.load', 'np.load', (['"""label_test_black.npy"""'], {}), "('label_test_black.npy')\n", (3041, 3065), True, 'import numpy as np\n'), ((3081, 3111), 'numpy.load', 'np.load', (['"""label_test_gray.npy"""'], {}), "('label_test_gray.npy')\n", (3088, 3111), True, 'import numpy as np\n'), ((3161, 3207), 'numpy.zeros', 'np.zeros', (['(w_test_label.shape[0], num_classes)'], {}), '((w_test_label.shape[0], num_classes))\n', (3169, 3207), True, 'import numpy as np\n'), ((3299, 3345), 'numpy.zeros', 'np.zeros', (['(b_test_label.shape[0], num_classes)'], {}), '((b_test_label.shape[0], num_classes))\n', (3307, 3345), True, 'import numpy as np\n'), ((3436, 3482), 'numpy.zeros', 'np.zeros', (['(g_test_label.shape[0], num_classes)'], {}), '((g_test_label.shape[0], num_classes))\n', (3444, 3482), True, 'import numpy as np\n'), ((3709, 3743), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (3724, 3743), False, 'from keras.models import model_from_json\n'), ((7110, 7140), 'numpy.argmax', 'np.argmax', (['o_w_predict'], {'axis': '(1)'}), '(o_w_predict, axis=1)\n', (7119, 7140), True, 'import numpy as np\n'), ((7151, 7181), 'numpy.argmax', 'np.argmax', (['o_b_predict'], {'axis': '(1)'}), '(o_b_predict, axis=1)\n', (7160, 7181), True, 'import numpy as np\n'), ((7192, 7222), 'numpy.argmax', 'np.argmax', (['o_g_predict'], {'axis': '(1)'}), '(o_g_predict, axis=1)\n', (7201, 7222), True, 'import numpy as np\n'), ((7234, 7265), 'numpy.argmax', 'np.argmax', (['tr_w_predict'], {'axis': '(1)'}), '(tr_w_predict, axis=1)\n', (7243, 7265), True, 'import numpy as np\n'), ((7277, 7308), 'numpy.argmax', 'np.argmax', (['tr_b_predict'], {'axis': '(1)'}), '(tr_b_predict, axis=1)\n', (7286, 7308), True, 'import numpy as np\n'), ((7320, 7351), 'numpy.argmax', 'np.argmax', (['tr_g_predict'], {'axis': '(1)'}), '(tr_g_predict, axis=1)\n', (7329, 7351), True, 'import numpy as np\n'), ((7361, 7388), 'numpy.zeros', 'np.zeros', (['o_w_pred.shape[0]'], {}), '(o_w_pred.shape[0])\n', (7369, 7388), True, 'import numpy as np\n'), ((7398, 7425), 'numpy.zeros', 'np.zeros', (['o_b_pred.shape[0]'], {}), '(o_b_pred.shape[0])\n', (7406, 7425), True, 'import numpy as np\n'), ((7435, 7462), 'numpy.zeros', 'np.zeros', (['o_g_pred.shape[0]'], {}), '(o_g_pred.shape[0])\n', (7443, 7462), True, 'import numpy as np\n'), ((8679, 8712), 'numpy.append', 'np.append', (['w_true', 'g_true'], {'axis': '(0)'}), '(w_true, g_true, axis=0)\n', (8688, 8712), True, 'import numpy as np\n'), ((8724, 8759), 'numpy.append', 'np.append', (['ycf_test', 'b_true'], {'axis': '(0)'}), '(ycf_test, b_true, axis=0)\n', (8733, 8759), True, 'import numpy as np\n'), ((8773, 8810), 'numpy.append', 'np.append', (['o_w_pred', 'o_g_pred'], {'axis': '(0)'}), '(o_w_pred, o_g_pred, axis=0)\n', (8782, 8810), True, 'import numpy as np\n'), ((8824, 8863), 'numpy.append', 'np.append', (['ycf_o_pred', 'o_b_pred'], {'axis': '(0)'}), '(ycf_o_pred, o_b_pred, axis=0)\n', (8833, 8863), True, 'import numpy as np\n'), ((8878, 8917), 'numpy.append', 'np.append', (['tr_w_pred', 'tr_g_pred'], {'axis': '(0)'}), '(tr_w_pred, tr_g_pred, axis=0)\n', (8887, 8917), True, 'import numpy as np\n'), ((8932, 8973), 'numpy.append', 'np.append', (['ycf_tr_pred', 'tr_b_pred'], {'axis': '(0)'}), '(ycf_tr_pred, tr_b_pred, axis=0)\n', (8941, 8973), True, 'import numpy as np\n'), ((9036, 9074), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ycf_test', 'ycf_o_pred'], {}), '(ycf_test, ycf_o_pred)\n', (9052, 9074), False, 'from sklearn.metrics import confusion_matrix\n'), ((9075, 9107), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (9094, 9107), True, 'import numpy as np\n'), ((9286, 9329), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""OCF.pdf"""'], {'bbox_inches': '"""tight"""'}), "('OCF.pdf', bbox_inches='tight')\n", (9297, 9329), True, 'import matplotlib.pyplot as plt\n'), ((9414, 9453), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ycf_test', 'ycf_tr_pred'], {}), '(ycf_test, ycf_tr_pred)\n', (9430, 9453), False, 'from sklearn.metrics import confusion_matrix\n'), ((9454, 9486), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (9473, 9486), True, 'import numpy as np\n'), ((9669, 9713), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""TrCF.pdf"""'], {'bbox_inches': '"""tight"""'}), "('TrCF.pdf', bbox_inches='tight')\n", (9680, 9713), True, 'import matplotlib.pyplot as plt\n'), ((407, 432), 'keras.backend.sum', 'K.sum', (['(y_pos * y_pred_pos)'], {}), '(y_pos * y_pred_pos)\n', (412, 432), True, 'from keras import backend as K\n'), ((443, 468), 'keras.backend.sum', 'K.sum', (['(y_neg * y_pred_neg)'], {}), '(y_neg * y_pred_neg)\n', (448, 468), True, 'from keras import backend as K\n'), ((481, 506), 'keras.backend.sum', 'K.sum', (['(y_neg * y_pred_pos)'], {}), '(y_neg * y_pred_pos)\n', (486, 506), True, 'from keras import backend as K\n'), ((517, 542), 'keras.backend.sum', 'K.sum', (['(y_pos * y_pred_neg)'], {}), '(y_pos * y_pred_neg)\n', (522, 542), True, 'from keras import backend as K\n'), ((601, 654), 'keras.backend.sqrt', 'K.sqrt', (['((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))'], {}), '((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n', (607, 654), True, 'from keras import backend as K\n'), ((6464, 6514), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (6474, 6514), True, 'import matplotlib.pyplot as plt\n'), ((6519, 6535), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6528, 6535), True, 'import matplotlib.pyplot as plt\n'), ((6540, 6554), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6552, 6554), True, 'import matplotlib.pyplot as plt\n'), ((6600, 6644), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (6610, 6644), True, 'import matplotlib.pyplot as plt\n'), ((6649, 6680), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (6659, 6680), True, 'import matplotlib.pyplot as plt\n'), ((7016, 7034), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7032, 7034), True, 'import matplotlib.pyplot as plt\n'), ((7039, 7063), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (7049, 7063), True, 'import matplotlib.pyplot as plt\n'), ((7068, 7097), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (7078, 7097), True, 'import matplotlib.pyplot as plt\n'), ((274, 294), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (280, 294), True, 'from keras import backend as K\n'), ((351, 371), 'keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (357, 371), True, 'from keras import backend as K\n'), ((9330, 9339), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9337, 9339), True, 'import matplotlib.pyplot as plt\n'), ((696, 707), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (705, 707), True, 'from keras import backend as K\n'), ((977, 1006), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (983, 1006), True, 'from keras import backend as K\n'), ((1049, 1069), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (1055, 1069), True, 'from keras import backend as K\n'), ((1128, 1139), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1137, 1139), True, 'from keras import backend as K\n'), ((1418, 1447), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (1424, 1447), True, 'from keras import backend as K\n'), ((1489, 1509), 'keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (1495, 1509), True, 'from keras import backend as K\n'), ((1564, 1575), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1573, 1575), True, 'from keras import backend as K\n')]
|
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
def calc_derivative(ypos, time):
return -2*ypos
time_vec = np.linspace(0, 4, 40)
yvec = odeint(calc_derivative, 1, time_vec)
plt.figure(figsize=(4, 3))
plt.plot(time_vec, yvec)
plt.xlabel('t: Time')
plt.ylabel('y: Position')
plt.tight_layout()
|
[
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] |
[((156, 177), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(40)'], {}), '(0, 4, 40)\n', (167, 177), True, 'import numpy as np\n'), ((185, 221), 'scipy.integrate.odeint', 'odeint', (['calc_derivative', '(1)', 'time_vec'], {}), '(calc_derivative, 1, time_vec)\n', (191, 221), False, 'from scipy.integrate import odeint\n'), ((223, 249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 3)'}), '(figsize=(4, 3))\n', (233, 249), True, 'from matplotlib import pyplot as plt\n'), ((250, 274), 'matplotlib.pyplot.plot', 'plt.plot', (['time_vec', 'yvec'], {}), '(time_vec, yvec)\n', (258, 274), True, 'from matplotlib import pyplot as plt\n'), ((275, 296), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t: Time"""'], {}), "('t: Time')\n", (285, 296), True, 'from matplotlib import pyplot as plt\n'), ((297, 322), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y: Position"""'], {}), "('y: Position')\n", (307, 322), True, 'from matplotlib import pyplot as plt\n'), ((323, 341), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (339, 341), True, 'from matplotlib import pyplot as plt\n')]
|
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import scipy.io as sio
import seaborn as sns
import matplotlib.pyplot as plt
import wget
import os
from random import shuffle
import cv2
from PIL import Image
sns.set_style("white")
# ----------------------Downloading DATA--------------------------
folder_name = 'svhn_data'
filename_list = ['train_32x32.mat', 'test_32x32.mat', 'extra_32x32.mat']
print('\n')
print('Checking if ' + folder_name + ' directory exists')
print('\n')
if not os.path.exists(folder_name):
print('Directory does not exist. Creating ' + folder_name + ' directory now')
print('\n')
os.mkdir(folder_name)
print('Directory ' + folder_name + ' created')
else:
print('Directory ' + folder_name + ' already exists.')
print('\n')
print('Downloading svhn data files...')
print('\n')
for filename in filename_list:
filepath = './svhn_data/' + filename
if not os.path.exists(filepath):
print('Downloading ' + filename + ' file')
print('\n')
url = 'http://ufldl.stanford.edu/housenumbers/' + filename
wget.download(url, filepath)
else:
print('File ' + filename + ' already exists.')
print('\n')
print(20*"+")
print('Downloading done')
# ------------------------------------------------------------------------
def image_compare(img,lab,fig_name):
plt.figure(str(fig_name))
for i in range(1, 10):
plt.subplot(3, 3, i)
plt.imshow(img[:,:,:,i])
plt.title('Num ' + str(lab[i]))
plt.xticks()
plt.yticks()
plt.tight_layout()
plt.show(block=False)
return
# ---------------------------LOADING SVHN DATA----------------------------
# These file contains dictionaries.
# The dictionaries keys are: dict_keys(['y', 'X', '__version__', '__header__', '__globals__'])
# We are only concerned with the 'y' and 'X'.
# The 'y' key contains the labels (What the number is in the image)
# The 'X' key contains the actual images.
train_data = sio.loadmat('svhn_data/train_32x32.mat')
test_data = sio.loadmat('svhn_data/test_32x32.mat')
extra_data = sio.loadmat('svhn_data/extra_32x32.mat')
# Combining X from train, test & extra & stacking them one above the other
x_train = np.array(train_data['X'])
x_test = np.array(test_data['X'])
x_extra = np.array(extra_data['X'])
x = np.concatenate((x_train,x_test,x_extra),axis=-1)
print(20*"+")
print("Combined all image matrices!")
# Combining y from train, test & extra & converting label 10 to 0 across the entire target variable
y_train = train_data['y']
y_test = test_data['y']
y_extra = extra_data['y']
y = np.concatenate((y_train,y_test,y_extra))
y[y == 10] = 0 # label 10 has been converted to 0
print(20*"+")
print("Combined all labels!")
ind_list = [i for i in range(len(x[1,1,1,:]))]
shuffle(ind_list)
x_s = x[:,:,:,ind_list]
y_s = y[ind_list,]
print(20*"+")
print("Data Shuffled!")
# Splitting into train & test
train_pct_index = int(0.8 * (len(x[1,1,1,:])))
X_train, X_test = x_s[:,:,:,:train_pct_index], x_s[:,:,:,train_pct_index:]
y_train, y_test = y_s[:train_pct_index], y_s[train_pct_index:]
#####################################################################
unique1, train_counts = np.unique(y_train, return_counts=True)
train_counts = np.asarray( (unique1, train_counts) ).T
unique2, test_counts = np.unique(y_test, return_counts=True)
test_counts = np.asarray( (unique2, test_counts) ).T
ax1 = plt.subplot(121)
ax1.grid(False)
sns.set_style("white")
sns.barplot(np.arange(0,len(train_counts)),train_counts[:,-1])
plt.xlabel("Categories")
plt.ylabel("Counts")
plt.title("Labels distribution in Train Dataset")
ax2 = plt.subplot(122,sharey=ax1)
ax2.grid(False)
sns.set_style("white")
sns.barplot(np.arange(0,len(test_counts)),test_counts[:,-1])
plt.xlabel("Categories")
plt.ylabel("Counts")
plt.title("Labels distribution in Test Dataset")
plt.show()
#####################################################################
print(20*"+")
print("Data Splitting Completed!")
# PLOTTING IMAGES
# Normalizing images
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
def transform_img(img, img_width, img_height):
#Histogram Equalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)
return img
x_train_normalized = []
x_test_normalized = []
image_compare(X_train,y_train,"before normalizing")
tot_train_images = len(X_train[1,1,1,:])
for i in range(tot_train_images):
image = X_train[:,:,:,i]
img = transform_img(image, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
x_train_normalized.append(img)
x_train_normalized = np.array(x_train_normalized)
x_train_normalized = np.transpose(x_train_normalized,(1,2,3,0))
image_compare(x_train_normalized,y_train,"after normalizing")
print(20*"+")
print("Normalized Training Images!")
tot_test_images = len(X_test[1,1,1,:])
for i in range(tot_test_images):
image = X_test[:,:,:,i]
img = transform_img(image, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)
x_test_normalized.append(img)
x_test_normalized = np.array(x_test_normalized)
x_test_normalized = np.transpose(x_test_normalized,(1,2,3,0))
print(20*"+")
print("Normalized Testing Images!")
print(20*"+")
print("Preprocessing Completed!")
# Note - Data has been combined, shuffled, splitted, normalized here. Also label 10 has been converted to 0
# Now we work on Frameworks
#### SHAPE OF X_TRAIN_NORMALIZED IS (32, 32, 3, 504336)
#### SHAPE OF X_TEST_NORMALIZED IS (32, 32, 3, 126084)
#### SHAPE OF Y_TRAIN_NORMALIZED IS (504336, 1)
#### SHAPE OF Y_TEST_NORMALIZED IS (126084, 1)
dict_train = {'x_train':x_train_normalized,'y_train':y_train}
dict_test = {'x_test':x_test_normalized,'y_test':y_test}
sio.savemat('./svhn_data/train_processed.mat',dict_train,format='5')
sio.savemat('./svhn_data/test_processed.mat',dict_test,format='5')
print(20*"+")
print("Files Created!")
print("Execute 02_LMDB_Creator now!")
print(20*"+")
|
[
"matplotlib.pyplot.title",
"os.mkdir",
"scipy.io.loadmat",
"random.shuffle",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"numpy.transpose",
"os.path.exists",
"matplotlib.pyplot.xticks",
"cv2.resize",
"seaborn.set_style",
"cv2.equalizeHist",
"matplotlib.pyplot.show",
"numpy.asarray",
"wget.download",
"matplotlib.pyplot.ylabel",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"warnings.filterwarnings",
"scipy.io.savemat",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((17, 50), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (40, 50), False, 'import warnings\n'), ((241, 263), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (254, 263), True, 'import seaborn as sns\n'), ((2080, 2120), 'scipy.io.loadmat', 'sio.loadmat', (['"""svhn_data/train_32x32.mat"""'], {}), "('svhn_data/train_32x32.mat')\n", (2091, 2120), True, 'import scipy.io as sio\n'), ((2134, 2173), 'scipy.io.loadmat', 'sio.loadmat', (['"""svhn_data/test_32x32.mat"""'], {}), "('svhn_data/test_32x32.mat')\n", (2145, 2173), True, 'import scipy.io as sio\n'), ((2188, 2228), 'scipy.io.loadmat', 'sio.loadmat', (['"""svhn_data/extra_32x32.mat"""'], {}), "('svhn_data/extra_32x32.mat')\n", (2199, 2228), True, 'import scipy.io as sio\n'), ((2320, 2345), 'numpy.array', 'np.array', (["train_data['X']"], {}), "(train_data['X'])\n", (2328, 2345), True, 'import numpy as np\n'), ((2356, 2380), 'numpy.array', 'np.array', (["test_data['X']"], {}), "(test_data['X'])\n", (2364, 2380), True, 'import numpy as np\n'), ((2392, 2417), 'numpy.array', 'np.array', (["extra_data['X']"], {}), "(extra_data['X'])\n", (2400, 2417), True, 'import numpy as np\n'), ((2423, 2474), 'numpy.concatenate', 'np.concatenate', (['(x_train, x_test, x_extra)'], {'axis': '(-1)'}), '((x_train, x_test, x_extra), axis=-1)\n', (2437, 2474), True, 'import numpy as np\n'), ((2717, 2759), 'numpy.concatenate', 'np.concatenate', (['(y_train, y_test, y_extra)'], {}), '((y_train, y_test, y_extra))\n', (2731, 2759), True, 'import numpy as np\n'), ((2908, 2925), 'random.shuffle', 'shuffle', (['ind_list'], {}), '(ind_list)\n', (2915, 2925), False, 'from random import shuffle\n'), ((3336, 3374), 'numpy.unique', 'np.unique', (['y_train'], {'return_counts': '(True)'}), '(y_train, return_counts=True)\n', (3345, 3374), True, 'import numpy as np\n'), ((3455, 3492), 'numpy.unique', 'np.unique', (['y_test'], {'return_counts': '(True)'}), '(y_test, return_counts=True)\n', (3464, 3492), True, 'import numpy as np\n'), ((3556, 3572), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (3567, 3572), True, 'import matplotlib.pyplot as plt\n'), ((3591, 3613), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (3604, 3613), True, 'import seaborn as sns\n'), ((3681, 3705), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Categories"""'], {}), "('Categories')\n", (3691, 3705), True, 'import matplotlib.pyplot as plt\n'), ((3707, 3727), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (3717, 3727), True, 'import matplotlib.pyplot as plt\n'), ((3729, 3778), 'matplotlib.pyplot.title', 'plt.title', (['"""Labels distribution in Train Dataset"""'], {}), "('Labels distribution in Train Dataset')\n", (3738, 3778), True, 'import matplotlib.pyplot as plt\n'), ((3788, 3816), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {'sharey': 'ax1'}), '(122, sharey=ax1)\n', (3799, 3816), True, 'import matplotlib.pyplot as plt\n'), ((3834, 3856), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (3847, 3856), True, 'import seaborn as sns\n'), ((3920, 3944), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Categories"""'], {}), "('Categories')\n", (3930, 3944), True, 'import matplotlib.pyplot as plt\n'), ((3946, 3966), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (3956, 3966), True, 'import matplotlib.pyplot as plt\n'), ((3968, 4016), 'matplotlib.pyplot.title', 'plt.title', (['"""Labels distribution in Test Dataset"""'], {}), "('Labels distribution in Test Dataset')\n", (3977, 4016), True, 'import matplotlib.pyplot as plt\n'), ((4020, 4030), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4028, 4030), True, 'import matplotlib.pyplot as plt\n'), ((4929, 4957), 'numpy.array', 'np.array', (['x_train_normalized'], {}), '(x_train_normalized)\n', (4937, 4957), True, 'import numpy as np\n'), ((4980, 5026), 'numpy.transpose', 'np.transpose', (['x_train_normalized', '(1, 2, 3, 0)'], {}), '(x_train_normalized, (1, 2, 3, 0))\n', (4992, 5026), True, 'import numpy as np\n'), ((5386, 5413), 'numpy.array', 'np.array', (['x_test_normalized'], {}), '(x_test_normalized)\n', (5394, 5413), True, 'import numpy as np\n'), ((5435, 5480), 'numpy.transpose', 'np.transpose', (['x_test_normalized', '(1, 2, 3, 0)'], {}), '(x_test_normalized, (1, 2, 3, 0))\n', (5447, 5480), True, 'import numpy as np\n'), ((6059, 6129), 'scipy.io.savemat', 'sio.savemat', (['"""./svhn_data/train_processed.mat"""', 'dict_train'], {'format': '"""5"""'}), "('./svhn_data/train_processed.mat', dict_train, format='5')\n", (6070, 6129), True, 'import scipy.io as sio\n'), ((6129, 6197), 'scipy.io.savemat', 'sio.savemat', (['"""./svhn_data/test_processed.mat"""', 'dict_test'], {'format': '"""5"""'}), "('./svhn_data/test_processed.mat', dict_test, format='5')\n", (6140, 6197), True, 'import scipy.io as sio\n'), ((534, 561), 'os.path.exists', 'os.path.exists', (['folder_name'], {}), '(folder_name)\n', (548, 561), False, 'import os\n'), ((668, 689), 'os.mkdir', 'os.mkdir', (['folder_name'], {}), '(folder_name)\n', (676, 689), False, 'import os\n'), ((1634, 1652), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1650, 1652), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1679), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1666, 1679), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3426), 'numpy.asarray', 'np.asarray', (['(unique1, train_counts)'], {}), '((unique1, train_counts))\n', (3401, 3426), True, 'import numpy as np\n'), ((3508, 3542), 'numpy.asarray', 'np.asarray', (['(unique2, test_counts)'], {}), '((unique2, test_counts))\n', (3518, 3542), True, 'import numpy as np\n'), ((4338, 4368), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (4354, 4368), False, 'import cv2\n'), ((4389, 4419), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img[:, :, 1]'], {}), '(img[:, :, 1])\n', (4405, 4419), False, 'import cv2\n'), ((4440, 4470), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img[:, :, 2]'], {}), '(img[:, :, 2])\n', (4456, 4470), False, 'import cv2\n'), ((4482, 4553), 'cv2.resize', 'cv2.resize', (['img', '(img_width, img_height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (img_width, img_height), interpolation=cv2.INTER_CUBIC)\n', (4492, 4553), False, 'import cv2\n'), ((968, 992), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (982, 992), False, 'import os\n'), ((1144, 1172), 'wget.download', 'wget.download', (['url', 'filepath'], {}), '(url, filepath)\n', (1157, 1172), False, 'import wget\n'), ((1489, 1509), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', 'i'], {}), '(3, 3, i)\n', (1500, 1509), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1546), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img[:, :, :, i]'], {}), '(img[:, :, :, i])\n', (1529, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1606), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (1604, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1616, 1628), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (1626, 1628), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines.formating import Collect
from ssod.core import TrimapMasks
@PIPELINES.register_module()
class ExtraAttrs(object):
def __init__(self, **attrs):
self.attrs = attrs
def __call__(self, results):
for k, v in self.attrs.items():
assert k not in results
results[k] = v
return results
@PIPELINES.register_module()
class ExtraCollect(Collect):
def __init__(self, *args, extra_meta_keys=[], **kwargs):
super().__init__(*args, **kwargs)
self.meta_keys = self.meta_keys + tuple(extra_meta_keys)
@PIPELINES.register_module()
class PseudoSamples(object):
def __init__(
self, with_bbox=False, with_mask=False, with_seg=False, fill_value=255
):
"""
Replacing gt labels in original data with fake labels or adding extra fake labels for unlabeled data.
This is to remove the effect of labeled data and keep its elements aligned with other sample.
Args:
with_bbox:
with_mask:
with_seg:
fill_value:
"""
self.with_bbox = with_bbox
self.with_mask = with_mask
self.with_seg = with_seg
self.fill_value = fill_value
def __call__(self, results):
if self.with_bbox:
results["gt_bboxes"] = np.zeros((0, 4))
results["gt_labels"] = np.zeros((0,))
if "bbox_fields" not in results:
results["bbox_fields"] = []
if "gt_bboxes" not in results["bbox_fields"]:
results["bbox_fields"].append("gt_bboxes")
if self.with_mask:
num_inst = len(results["gt_bboxes"])
h, w = results["img"].shape[:2]
results["gt_masks"] = TrimapMasks(
[
self.fill_value * np.ones((h, w), dtype=np.uint8)
for _ in range(num_inst)
],
h,
w,
)
if "mask_fields" not in results:
results["mask_fields"] = []
if "gt_masks" not in results["mask_fields"]:
results["mask_fields"].append("gt_masks")
if self.with_seg:
results["gt_semantic_seg"] = self.fill_value * np.ones(
results["img"].shape[:2], dtype=np.uint8
)
if "seg_fields" not in results:
results["seg_fields"] = []
if "gt_semantic_seg" not in results["seg_fields"]:
results["seg_fields"].append("gt_semantic_seg")
return results
|
[
"numpy.zeros",
"mmdet.datasets.PIPELINES.register_module",
"numpy.ones"
] |
[((149, 176), 'mmdet.datasets.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (174, 176), False, 'from mmdet.datasets import PIPELINES\n'), ((426, 453), 'mmdet.datasets.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (451, 453), False, 'from mmdet.datasets import PIPELINES\n'), ((654, 681), 'mmdet.datasets.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (679, 681), False, 'from mmdet.datasets import PIPELINES\n'), ((1393, 1409), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (1401, 1409), True, 'import numpy as np\n'), ((1445, 1459), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (1453, 1459), True, 'import numpy as np\n'), ((2327, 2376), 'numpy.ones', 'np.ones', (["results['img'].shape[:2]"], {'dtype': 'np.uint8'}), "(results['img'].shape[:2], dtype=np.uint8)\n", (2334, 2376), True, 'import numpy as np\n'), ((1889, 1920), 'numpy.ones', 'np.ones', (['(h, w)'], {'dtype': 'np.uint8'}), '((h, w), dtype=np.uint8)\n', (1896, 1920), True, 'import numpy as np\n')]
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:<EMAIL>)
import logging
import numpy
import pandas
import os
import os.path
import requests
import time
import typing
from datetime import datetime, timedelta
from dateutil import parser
from decimal import Decimal
from solana.publickey import PublicKey
from .account import Account
from .context import Context
# # 🥭 TradeHistory class
#
# Downloads and unifies trade history data.
#
class TradeHistory:
COLUMNS = ["Timestamp", "Market", "Side", "MakerOrTaker", "Change", "Price", "Quantity", "Fee",
"SequenceNumber", "FeeTier", "MarketType", "OrderId"]
__perp_column_name_mapper = {
"loadTimestamp": "Timestamp",
"seqNum": "SequenceNumber",
"price": "Price",
"quantity": "Quantity"
}
__spot_column_name_mapper = {
"loadTimestamp": "Timestamp",
"seqNum": "SequenceNumber",
"price": "Price",
"size": "Quantity",
"side": "Side",
"feeCost": "Fee",
"feeTier": "FeeTier",
"orderId": "OrderId"
}
__decimal_spot_columns = [
"openOrderSlot",
"feeTier",
"nativeQuantityReleased",
"nativeQuantityPaid",
"nativeFeeOrRebate",
"orderId",
"clientOrderId",
"source",
"seqNum",
"baseTokenDecimals",
"quoteTokenDecimals",
"price",
"feeCost",
"size"
]
__decimal_perp_columns = [
"seqNum",
"makerFee",
"takerFee",
"makerOrderId",
"takerOrderId",
"price",
"quantity"
]
__column_converters = {
"Timestamp": lambda value: parser.parse(value),
"SequenceNumber": lambda value: Decimal(value),
"Price": lambda value: Decimal(value),
"Change": lambda value: Decimal(value),
"Quantity": lambda value: Decimal(value),
"Fee": lambda value: Decimal(value),
"FeeTier": lambda value: Decimal(value),
"OrderId": lambda value: Decimal(value)
}
def __init__(self, seconds_pause_between_rest_calls: int = 1) -> None:
self._logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.__seconds_pause_between_rest_calls: int = seconds_pause_between_rest_calls
self.__trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
@staticmethod
def __market_lookup(context: Context) -> typing.Callable[[pandas.Series], str]:
def __safe_lookup(row: pandas.Series) -> str:
address: PublicKey = PublicKey(row["address"])
market = context.market_lookup.find_by_address(address)
if market is None:
raise Exception(f"No market found with address {address}")
return market.symbol
return __safe_lookup
@staticmethod
def __download_json(url: str) -> typing.Any:
response = requests.get(url)
response.raise_for_status()
return response.json()
@staticmethod
def __download_all_perps(context: Context, account: Account) -> pandas.DataFrame:
url = f"https://event-history-api.herokuapp.com/perp_trades/{account.address}?page=all"
data = TradeHistory.__download_json(url)
trades: pandas.DataFrame = TradeHistory.__perp_data_to_dataframe(context, account, data)
return trades
@staticmethod
def __download_updated_perps(context: Context, account: Account, newer_than: typing.Optional[datetime], seconds_pause_between_rest_calls: int) -> pandas.DataFrame:
trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
page: int = 0
complete: bool = False
while not complete:
page += 1
url = f"https://event-history-api.herokuapp.com/perp_trades/{account.address}?page={page}"
data = TradeHistory.__download_json(url)
frame: pandas.DataFrame = TradeHistory.__perp_data_to_dataframe(context, account, data)
if len(frame) == 0:
complete = True
else:
trades = trades.append(frame)
if (newer_than is not None) and (frame.loc[frame.index[-1], "Timestamp"] < newer_than):
complete = True
else:
time.sleep(seconds_pause_between_rest_calls)
return trades
@staticmethod
def __perp_data_to_dataframe(context: Context, account: Account, data: typing.Any) -> pandas.DataFrame:
# Perp data is an array of JSON packages like:
# {
# "loadTimestamp": "2021-09-02T10:54:56.000Z",
# "address": <PUBLIC-KEY-STRING>,
# "seqNum": "2831",
# "makerFee": "0",
# "takerFee": "0.0004999999999988347",
# "takerSide": "sell",
# "maker": <PUBLIC-KEY-STRING>,
# "makerOrderId": <BIG-INT>,
# "taker": <PUBLIC-KEY-STRING>,
# "takerOrderId": <BIG-INT>,
# "price": "50131.9",
# "quantity": "0.019"
# },
def __side_lookup(row: pandas.Series) -> str:
if row["MakerOrTaker"] == "taker":
return str(row["takerSide"])
elif row["takerSide"] == "buy":
return "sell"
else:
return "buy"
def __fee_calculator(row: pandas.Series) -> Decimal:
price: Decimal = row["Price"]
quantity: Decimal = row["Quantity"]
fee_rate: Decimal
if row["MakerOrTaker"] == "maker":
fee_rate = row["makerFee"]
else:
fee_rate = row["takerFee"]
return price * quantity * fee_rate
if len(data["data"]) <= 1:
return pandas.DataFrame(columns=TradeHistory.COLUMNS)
trade_data = data["data"][:-1]
for trade in trade_data:
for column_name in TradeHistory.__decimal_perp_columns:
trade[column_name] = Decimal(trade[column_name])
frame = pandas.DataFrame(trade_data).rename(mapper=TradeHistory.__perp_column_name_mapper, axis=1, copy=True)
frame["Timestamp"] = frame["Timestamp"].apply(lambda timestamp: parser.parse(timestamp).replace(microsecond=0))
frame["Market"] = frame.apply(TradeHistory.__market_lookup(context), axis=1)
frame["MarketType"] = "perp"
this_address = f"{account.address}"
frame["MakerOrTaker"] = frame["maker"].apply(lambda addy: "maker" if addy == this_address else "taker")
frame["FeeTier"] = -1
frame["Fee"] = frame.apply(__fee_calculator, axis=1)
frame["Side"] = frame.apply(__side_lookup, axis=1)
frame["Change"] = (frame["Price"] * frame["Quantity"]) - frame["Fee"]
frame["Change"] = frame["Change"].where(frame["Side"] == "sell", other=-frame["Change"])
frame["OrderId"] = numpy.where(frame["MakerOrTaker"] == "maker",
frame["makerOrderId"], frame["takerOrderId"])
return frame[TradeHistory.COLUMNS]
@staticmethod
def __download_all_spots(context: Context, account: Account) -> pandas.DataFrame:
trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
for spot_open_orders_address in account.spot_open_orders:
url = f"https://event-history-api.herokuapp.com/trades/open_orders/{spot_open_orders_address}?page=all"
data = TradeHistory.__download_json(url)
frame = TradeHistory.__spot_data_to_dataframe(context, account, data)
trades = trades.append(frame)
return trades
@staticmethod
def __download_updated_spots(context: Context, account: Account, newer_than: typing.Optional[datetime], seconds_pause_between_rest_calls: int) -> pandas.DataFrame:
trades: pandas.DataFrame = pandas.DataFrame(columns=TradeHistory.COLUMNS)
for spot_open_orders_address in account.spot_open_orders:
page: int = 0
complete: bool = False
while not complete:
page += 1
url = f"https://event-history-api.herokuapp.com/trades/open_orders/{spot_open_orders_address}?page={page}"
data = TradeHistory.__download_json(url)
frame = TradeHistory.__spot_data_to_dataframe(context, account, data)
if len(frame) == 0:
complete = True
else:
trades = trades.append(frame)
earliest_in_frame = frame.loc[frame.index[-1], "Timestamp"]
if (newer_than is not None) and (earliest_in_frame < newer_than):
complete = True
else:
time.sleep(seconds_pause_between_rest_calls)
return trades
@staticmethod
def __spot_data_to_dataframe(context: Context, account: Account, data: typing.Any) -> pandas.DataFrame:
# Spot data is an array of JSON packages like:
# {
# "loadTimestamp": "2021-10-05T16:04:50.717Z",
# "address": <PUBLIC-KEY-STRING>,
# "programId": <PUBLIC-KEY-STRING>,
# "baseCurrency": "SOL",
# "quoteCurrency": "USDC",
# "fill": true,
# "out": false,
# "bid": true,
# "maker": true,
# "openOrderSlot": "0",
# "feeTier": "4",
# "nativeQuantityReleased": "3000000000",
# "nativeQuantityPaid": "487482712",
# "nativeFeeOrRebate": "146288",
# "orderId": <BIG-INT>,
# "openOrders": <PUBLIC-KEY-STRING>,
# "clientOrderId": <BIG-INT>,
# "uuid": <LONG-OPAQUE-UUID-STRING>,
# "source": "2",
# "seqNum": "24827175",
# "baseTokenDecimals": 9,
# "quoteTokenDecimals": 6,
# "side": "buy",
# "price": 162.543,
# "feeCost": -0.146288,
# "size": 3
# }
if len(data["data"]) == 0:
return pandas.DataFrame(columns=TradeHistory.COLUMNS)
else:
trade_data = data["data"]
for trade in trade_data:
for column_name in TradeHistory.__decimal_spot_columns:
trade[column_name] = Decimal(trade[column_name])
frame = pandas.DataFrame(trade_data).rename(
mapper=TradeHistory.__spot_column_name_mapper, axis=1, copy=True)
frame["Timestamp"] = frame["Timestamp"].apply(
lambda timestamp: parser.parse(timestamp).replace(microsecond=0))
frame["Market"] = frame.apply(TradeHistory.__market_lookup(context), axis=1)
frame["MakerOrTaker"] = numpy.where(frame["maker"], "maker", "taker")
frame["Change"] = (frame["Price"] * frame["Quantity"]) - frame["Fee"]
frame["Change"] = frame["Change"].where(frame["Side"] == "sell", other=-frame["Change"])
frame["MarketType"] = "spot"
return frame[TradeHistory.COLUMNS]
@property
def trades(self) -> pandas.DataFrame:
return self.__trades.copy(deep=True)
def download_latest(self, context: Context, account: Account, cutoff: datetime) -> None:
# Go back further than we need to so we can be sure we're not skipping any trades due to race conditions.
# We remove duplicates a few lines further down.
self._logger.info(f"Downloading spot trades from {cutoff}")
spot: pandas.DataFrame = TradeHistory.__download_updated_spots(context,
account,
cutoff,
self.__seconds_pause_between_rest_calls)
self._logger.info(f"Downloading perp trades from {cutoff}")
perp: pandas.DataFrame = TradeHistory.__download_updated_perps(context,
account,
cutoff,
self.__seconds_pause_between_rest_calls)
all_trades: pandas.DataFrame = pandas.concat([self.__trades, spot, perp])
all_trades = all_trades[all_trades["Timestamp"] >= cutoff]
distinct_trades = all_trades.drop_duplicates()
sorted_trades = distinct_trades.sort_values(["Timestamp", "Market", "SequenceNumber"], axis=0, ascending=True)
self._logger.info(f"Download complete. Data contains {len(sorted_trades)} trades.")
self.__trades = sorted_trades
def update(self, context: Context, account: Account) -> None:
latest_trade: typing.Optional[datetime] = self.__trades.loc[self.__trades.index[-1],
"Timestamp"] if len(self.__trades) > 0 else None
spot: pandas.DataFrame
perp: pandas.DataFrame
if latest_trade is None:
self._logger.info("Downloading all spot trades.")
spot = TradeHistory.__download_all_spots(context, account)
self._logger.info("Downloading all perp trades.")
perp = TradeHistory.__download_all_perps(context, account)
else:
# Go back further than we need to so we can be sure we're not skipping any trades due to race conditions.
# We remove duplicates a few lines further down.
cutoff_safety_margin: timedelta = timedelta(hours=1)
cutoff: datetime = latest_trade - cutoff_safety_margin
self._logger.info(
f"Downloading spot trades from {cutoff}, {cutoff_safety_margin} before latest stored trade at {latest_trade}")
spot = TradeHistory.__download_updated_spots(context, account,
cutoff, self.__seconds_pause_between_rest_calls)
self._logger.info(
f"Downloading perp trades from {cutoff}, {cutoff_safety_margin} before latest stored trade at {latest_trade}")
perp = TradeHistory.__download_updated_perps(context, account,
cutoff, self.__seconds_pause_between_rest_calls)
all_trades = pandas.concat([self.__trades, spot, perp])
distinct_trades = all_trades.drop_duplicates()
sorted_trades = distinct_trades.sort_values(["Timestamp", "Market", "SequenceNumber"], axis=0, ascending=True)
self._logger.info(f"Download complete. Data contains {len(sorted_trades)} trades.")
self.__trades = sorted_trades
def load(self, filename: str, ok_if_missing: bool = False) -> None:
if not os.path.isfile(filename):
if not ok_if_missing:
raise Exception(f"File {filename} does not exist or is not a file.")
else:
existing = pandas.read_csv(filename,
float_precision="round_trip",
converters=TradeHistory.__column_converters)
self.__trades = self.__trades.append(existing)
def save(self, filename: str) -> None:
self.__trades.to_csv(filename, index=False, mode="w")
def __str__(self) -> str:
return f"« TradeHistory containing {len(self.__trades)} trades »"
def __repr__(self) -> str:
return f"{self}"
|
[
"pandas.DataFrame",
"dateutil.parser.parse",
"decimal.Decimal",
"pandas.read_csv",
"time.sleep",
"solana.publickey.PublicKey",
"os.path.isfile",
"numpy.where",
"datetime.timedelta",
"requests.get",
"pandas.concat",
"logging.getLogger"
] |
[((2888, 2930), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (2905, 2930), False, 'import logging\n'), ((3061, 3107), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'TradeHistory.COLUMNS'}), '(columns=TradeHistory.COLUMNS)\n', (3077, 3107), False, 'import pandas\n'), ((3647, 3664), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3659, 3664), False, 'import requests\n'), ((4324, 4370), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'TradeHistory.COLUMNS'}), '(columns=TradeHistory.COLUMNS)\n', (4340, 4370), False, 'import pandas\n'), ((7633, 7729), 'numpy.where', 'numpy.where', (["(frame['MakerOrTaker'] == 'maker')", "frame['makerOrderId']", "frame['takerOrderId']"], {}), "(frame['MakerOrTaker'] == 'maker', frame['makerOrderId'], frame[\n 'takerOrderId'])\n", (7644, 7729), False, 'import numpy\n'), ((7948, 7994), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'TradeHistory.COLUMNS'}), '(columns=TradeHistory.COLUMNS)\n', (7964, 7994), False, 'import pandas\n'), ((8599, 8645), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'TradeHistory.COLUMNS'}), '(columns=TradeHistory.COLUMNS)\n', (8615, 8645), False, 'import pandas\n'), ((13066, 13108), 'pandas.concat', 'pandas.concat', (['[self.__trades, spot, perp]'], {}), '([self.__trades, spot, perp])\n', (13079, 13108), False, 'import pandas\n'), ((15144, 15186), 'pandas.concat', 'pandas.concat', (['[self.__trades, spot, perp]'], {}), '([self.__trades, spot, perp])\n', (15157, 15186), False, 'import pandas\n'), ((2403, 2422), 'dateutil.parser.parse', 'parser.parse', (['value'], {}), '(value)\n', (2415, 2422), False, 'from dateutil import parser\n'), ((2464, 2478), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (2471, 2478), False, 'from decimal import Decimal\n'), ((2511, 2525), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (2518, 2525), False, 'from decimal import Decimal\n'), ((2559, 2573), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (2566, 2573), False, 'from decimal import Decimal\n'), ((2609, 2623), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (2616, 2623), False, 'from decimal import Decimal\n'), ((2654, 2668), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (2661, 2668), False, 'from decimal import Decimal\n'), ((2703, 2717), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (2710, 2717), False, 'from decimal import Decimal\n'), ((2752, 2766), 'decimal.Decimal', 'Decimal', (['value'], {}), '(value)\n', (2759, 2766), False, 'from decimal import Decimal\n'), ((3298, 3323), 'solana.publickey.PublicKey', 'PublicKey', (["row['address']"], {}), "(row['address'])\n", (3307, 3323), False, 'from solana.publickey import PublicKey\n'), ((6509, 6555), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'TradeHistory.COLUMNS'}), '(columns=TradeHistory.COLUMNS)\n', (6525, 6555), False, 'import pandas\n'), ((10820, 10866), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'TradeHistory.COLUMNS'}), '(columns=TradeHistory.COLUMNS)\n', (10836, 10866), False, 'import pandas\n'), ((11503, 11548), 'numpy.where', 'numpy.where', (["frame['maker']", '"""maker"""', '"""taker"""'], {}), "(frame['maker'], 'maker', 'taker')\n", (11514, 11548), False, 'import numpy\n'), ((14358, 14376), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (14367, 14376), False, 'from datetime import datetime, timedelta\n'), ((15579, 15603), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (15593, 15603), False, 'import os\n'), ((15761, 15866), 'pandas.read_csv', 'pandas.read_csv', (['filename'], {'float_precision': '"""round_trip"""', 'converters': 'TradeHistory.__column_converters'}), "(filename, float_precision='round_trip', converters=\n TradeHistory.__column_converters)\n", (15776, 15866), False, 'import pandas\n'), ((6734, 6761), 'decimal.Decimal', 'Decimal', (['trade[column_name]'], {}), '(trade[column_name])\n', (6741, 6761), False, 'from decimal import Decimal\n'), ((6779, 6807), 'pandas.DataFrame', 'pandas.DataFrame', (['trade_data'], {}), '(trade_data)\n', (6795, 6807), False, 'import pandas\n'), ((5040, 5084), 'time.sleep', 'time.sleep', (['seconds_pause_between_rest_calls'], {}), '(seconds_pause_between_rest_calls)\n', (5050, 5084), False, 'import time\n'), ((11069, 11096), 'decimal.Decimal', 'Decimal', (['trade[column_name]'], {}), '(trade[column_name])\n', (11076, 11096), False, 'from decimal import Decimal\n'), ((11118, 11146), 'pandas.DataFrame', 'pandas.DataFrame', (['trade_data'], {}), '(trade_data)\n', (11134, 11146), False, 'import pandas\n'), ((6953, 6976), 'dateutil.parser.parse', 'parser.parse', (['timestamp'], {}), '(timestamp)\n', (6965, 6976), False, 'from dateutil import parser\n'), ((9498, 9542), 'time.sleep', 'time.sleep', (['seconds_pause_between_rest_calls'], {}), '(seconds_pause_between_rest_calls)\n', (9508, 9542), False, 'import time\n'), ((11330, 11353), 'dateutil.parser.parse', 'parser.parse', (['timestamp'], {}), '(timestamp)\n', (11342, 11353), False, 'from dateutil import parser\n')]
|
import numpy as np
import pandas as pd
import os
from collections import Counter
from scipy.stats import hypergeom
fdr_threshold = 0.05
def main():
os.makedirs('results/enrichment', exist_ok=True)
os.makedirs('results/GO', exist_ok=True)
# LOAD
# single cell gene data
all_gene_data = pd.read_csv('data/gene_lists/all-scRNA-data.csv')
# normalised RPKM bulk data corrected for age, sex, etc
bulk_data = pd.read_csv('data/processed_psychencode/PsychENCODE-prenatal-bulk-RPKM-data-scRNA-filtered-Winsor-log2-residualised.csv')
# gene-wise correlation with PC components
correlation_results = pd.read_csv('results/gene_correlations/PCA_correlations-KendallTau-residualisedRPKM.csv')
# fetal background geneset = all filtered genes in bulk data
background_genes = pd.read_csv('data/gene_lists/background_genes.txt', header=None)[0]
print('number of background genes: {:}'.format(len(background_genes)))
# get gene lists
print('gathering gene lists')
# genes differentially expressed by classes or categories, returning all genes, as well as those that are unique to each class
# CELL TIMING: PRECURSOR OR MATURE
cell_timing, cell_timing_genes, cell_timing_unique_genes = get_gene_lists(all_gene_data, background_genes, class_type='timing')
# CELL CLASS
cell_classes, cell_class_genes, cell_class_unique_genes = get_gene_lists(all_gene_data, background_genes, class_type='class')
# CELL TYPE
cell_types, cell_type_genes, cell_type_unique_genes = get_gene_lists(all_gene_data, background_genes, class_type='cluster_study')
# get significant genes
significant_genes = pd.read_csv('results/gene_correlations/PCA_correlations-KendallTau-PC-significant_genes-p' + str(fdr_threshold) + '.csv')
# genes positively correlated to PC component
positive_significant_genes_list = list(significant_genes.loc[significant_genes['PC1_tau']>0,'symbol'])
# genes negatively correlated to PC component
negative_significant_genes_list = list(significant_genes.loc[significant_genes['PC1_tau']<0,'symbol'])
# ENRICHMENT
print("cell enrichments")
cell_timing_enrichment_results = run_enrichment(cell_timing, cell_timing_genes, cell_timing_unique_genes, positive_significant_genes_list, negative_significant_genes_list, background_genes)
cell_timing_enrichment_results.to_csv('results/enrichment/cell_timing_enrichment-PC1-significant_genes-p' + str(fdr_threshold) + '.csv', index=False)
print("see results/enrichment/cell_timing_enrichment-significant_genes-p" + str(fdr_threshold) + ".csv")
cell_class_enrichment_results = run_enrichment(cell_classes, cell_class_genes, cell_class_unique_genes, positive_significant_genes_list, negative_significant_genes_list, background_genes)
cell_class_enrichment_results.to_csv('results/enrichment/cell_class_enrichment-PC1-significant_genes-p' + str(fdr_threshold) + '.csv', index=False)
print("see results/enrichment/cell_class_enrichment-significant_genes-p" + str(fdr_threshold) + ".csv")
cell_type_enrichment_results = run_enrichment(cell_types, cell_type_genes, cell_type_unique_genes, positive_significant_genes_list, negative_significant_genes_list, background_genes)
cell_type_enrichment_results.to_csv('results/enrichment/cell_type_enrichment-PC1-significant_genes-p' + str(fdr_threshold) + '.csv', index=False)
print("see results/enrichment/cell_type_enrichment-significant_genes-p" + str(fdr_threshold) + ".csv")
# save out lists for webgestalt
np.savetxt('results/GO/positive_genes.txt', positive_significant_genes_list, fmt='%s')
np.savetxt('results/GO/negative_genes.txt', negative_significant_genes_list, fmt='%s')
### HELPERS ##############################################################################################
def get_gene_lists(data, background_genes, class_type='class'):
# ALL GENES EXPRESSED IN A GIVEN CELL CLASS
class_genes = []
classes = np.unique(data[class_type])
# remove neuron class if it's there (not split into excit. and inhib.),
classes = list(set(classes) - set(['neuron']))
for cell_class in classes:
# genes in cell class
class_data = np.array(data.loc[data[class_type]==cell_class, 'gene'].values, dtype=str).reshape(-1)
# only keep genes that are also present in bulk data
class_data = class_data[pd.DataFrame(class_data).isin(list(background_genes)).values.reshape(-1)]
class_genes.append(np.unique(class_data))
# ALL GENES *UNIQUELY* EXPRESSED IN A GIVEN CELL CLASS
ctr = Counter(np.hstack(class_genes))
gene_count = pd.DataFrame([list(ctr.keys()), list(ctr.values())]).T
repeat_genes = gene_count.loc[(gene_count[1]>1),0].values
class_unique_genes = class_genes.copy()
for n,cell_class in enumerate(classes):
# remove shared
class_unique_genes[n] = list(set(class_unique_genes[n])-set(repeat_genes))
return classes, class_genes, class_unique_genes
def safe_div(x,y):
if y == 0:
return np.array([0])
return x / y
def calculate_enrichment(hit_list, top_genes, full_gene_list):
x = sum(pd.DataFrame(top_genes).isin(hit_list).values) # how many top genes in cell list
n = sum(pd.DataFrame(hit_list).isin(full_gene_list).values)[0] # how many cell genes in full list
N = len(top_genes) # number of samples
M = len(full_gene_list) # total number in population
enrichment = safe_div( (x/N) , ((n-x) / (M-N)) )
p = hypergeom.sf(x-1, M, n, N)
return enrichment, p
def run_enrichment(classes, gene_lists, unique_gene_lists, positive_genes, negative_genes, background_genes):
enrichment_results = []
num_genes = []
# for each cell class/type
for i in np.arange(len(classes)):
# for full and unique gene lists
for gl in [gene_lists, unique_gene_lists]:
# as long as there are some genes
if len(gl[i])>0:
# calculate enrichment in the postively and negatively correlated lists
for g in [positive_genes, negative_genes]:
enrichment_results.append(calculate_enrichment(list(gl[i]), list(g), list(background_genes)))
num_genes.append(len(gl[i]))
# otherwise is nan
else:
for g in [positive_genes, negative_genes]:
enrichment_results.append((np.array([np.nan]),np.array([np.nan])))
num_genes.append(np.array([0]))
# collate into dataframe
results = pd.DataFrame(np.hstack(enrichment_results).T)
results.columns=['enrichment', 'p']
results['class'] = np.repeat(classes, 4)
results['loading'] = ['positive', 'negative']*(len(classes)*2)
results['gene_list'] = np.hstack([np.repeat(['all', 'unique'], 2)]*len(classes))
results['num_genes'] = np.squeeze(num_genes)
results = results.loc[:,['class','loading','gene_list','num_genes','enrichment','p']]
return results
if __name__ == '__main__':
main()
|
[
"scipy.stats.hypergeom.sf",
"pandas.DataFrame",
"os.makedirs",
"pandas.read_csv",
"numpy.savetxt",
"numpy.hstack",
"numpy.array",
"numpy.squeeze",
"numpy.unique",
"numpy.repeat"
] |
[((156, 204), 'os.makedirs', 'os.makedirs', (['"""results/enrichment"""'], {'exist_ok': '(True)'}), "('results/enrichment', exist_ok=True)\n", (167, 204), False, 'import os\n'), ((209, 249), 'os.makedirs', 'os.makedirs', (['"""results/GO"""'], {'exist_ok': '(True)'}), "('results/GO', exist_ok=True)\n", (220, 249), False, 'import os\n'), ((310, 359), 'pandas.read_csv', 'pd.read_csv', (['"""data/gene_lists/all-scRNA-data.csv"""'], {}), "('data/gene_lists/all-scRNA-data.csv')\n", (321, 359), True, 'import pandas as pd\n'), ((437, 568), 'pandas.read_csv', 'pd.read_csv', (['"""data/processed_psychencode/PsychENCODE-prenatal-bulk-RPKM-data-scRNA-filtered-Winsor-log2-residualised.csv"""'], {}), "(\n 'data/processed_psychencode/PsychENCODE-prenatal-bulk-RPKM-data-scRNA-filtered-Winsor-log2-residualised.csv'\n )\n", (448, 568), True, 'import pandas as pd\n'), ((633, 732), 'pandas.read_csv', 'pd.read_csv', (['"""results/gene_correlations/PCA_correlations-KendallTau-residualisedRPKM.csv"""'], {}), "(\n 'results/gene_correlations/PCA_correlations-KendallTau-residualisedRPKM.csv'\n )\n", (644, 732), True, 'import pandas as pd\n'), ((3544, 3634), 'numpy.savetxt', 'np.savetxt', (['"""results/GO/positive_genes.txt"""', 'positive_significant_genes_list'], {'fmt': '"""%s"""'}), "('results/GO/positive_genes.txt', positive_significant_genes_list,\n fmt='%s')\n", (3554, 3634), True, 'import numpy as np\n'), ((3635, 3725), 'numpy.savetxt', 'np.savetxt', (['"""results/GO/negative_genes.txt"""', 'negative_significant_genes_list'], {'fmt': '"""%s"""'}), "('results/GO/negative_genes.txt', negative_significant_genes_list,\n fmt='%s')\n", (3645, 3725), True, 'import numpy as np\n'), ((3977, 4004), 'numpy.unique', 'np.unique', (['data[class_type]'], {}), '(data[class_type])\n', (3986, 4004), True, 'import numpy as np\n'), ((5511, 5539), 'scipy.stats.hypergeom.sf', 'hypergeom.sf', (['(x - 1)', 'M', 'n', 'N'], {}), '(x - 1, M, n, N)\n', (5523, 5539), False, 'from scipy.stats import hypergeom\n'), ((6659, 6680), 'numpy.repeat', 'np.repeat', (['classes', '(4)'], {}), '(classes, 4)\n', (6668, 6680), True, 'import numpy as np\n'), ((6860, 6881), 'numpy.squeeze', 'np.squeeze', (['num_genes'], {}), '(num_genes)\n', (6870, 6881), True, 'import numpy as np\n'), ((812, 876), 'pandas.read_csv', 'pd.read_csv', (['"""data/gene_lists/background_genes.txt"""'], {'header': 'None'}), "('data/gene_lists/background_genes.txt', header=None)\n", (823, 876), True, 'import pandas as pd\n'), ((4598, 4620), 'numpy.hstack', 'np.hstack', (['class_genes'], {}), '(class_genes)\n', (4607, 4620), True, 'import numpy as np\n'), ((5056, 5069), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5064, 5069), True, 'import numpy as np\n'), ((4497, 4518), 'numpy.unique', 'np.unique', (['class_data'], {}), '(class_data)\n', (4506, 4518), True, 'import numpy as np\n'), ((6563, 6592), 'numpy.hstack', 'np.hstack', (['enrichment_results'], {}), '(enrichment_results)\n', (6572, 6592), True, 'import numpy as np\n'), ((4216, 4292), 'numpy.array', 'np.array', (["data.loc[data[class_type] == cell_class, 'gene'].values"], {'dtype': 'str'}), "(data.loc[data[class_type] == cell_class, 'gene'].values, dtype=str)\n", (4224, 4292), True, 'import numpy as np\n'), ((6786, 6817), 'numpy.repeat', 'np.repeat', (["['all', 'unique']", '(2)'], {}), "(['all', 'unique'], 2)\n", (6795, 6817), True, 'import numpy as np\n'), ((5164, 5187), 'pandas.DataFrame', 'pd.DataFrame', (['top_genes'], {}), '(top_genes)\n', (5176, 5187), True, 'import pandas as pd\n'), ((5257, 5279), 'pandas.DataFrame', 'pd.DataFrame', (['hit_list'], {}), '(hit_list)\n', (5269, 5279), True, 'import pandas as pd\n'), ((6491, 6504), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (6499, 6504), True, 'import numpy as np\n'), ((6414, 6432), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6422, 6432), True, 'import numpy as np\n'), ((6433, 6451), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (6441, 6451), True, 'import numpy as np\n'), ((4396, 4420), 'pandas.DataFrame', 'pd.DataFrame', (['class_data'], {}), '(class_data)\n', (4408, 4420), True, 'import pandas as pd\n')]
|
"""
Mask R-CNN
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 auto.py train --dataset=/path/to/auto/dataset --weights=coco
# Resume training a model that you had trained earlier
python3 auto.py train --dataset=/path/to/auto/dataset --weights=last
# Train a new model starting from ImageNet weights
python3 auto.py train --dataset=/path/to/auto/dataset --weights=imagenet
# Apply color overlay to an image
python3 auto.py overlay --weights=/path/to/weights/file.h5 --image=<URL or path to file>
# Apply color overlay to video using the last weights you trained
python3 auto.py overlay --weights=last --video=<URL or path to file>
"""
import os
import sys
import datetime
import enum
import numpy as np
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from math import isnan
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
#classchange
class ClassName(enum.Enum):
lane = 1
pedestrian = 2
vehicle = 3
sign_board = 4
street_light = 5
class AutoConfig(Config):
"""Configuration for training on the auto dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "auto"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# Number of classes (including background)
NUM_CLASSES = 1 + 5 # Background + 5 custom classes
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
global dataset_dir
############################################################
# Dataset
############################################################
class AutoDataset(utils.Dataset):
def load_auto(self, dataset_dir, subset):
"""Load a subset of the Auto dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. #classchange
self.add_class("auto", 1, "lane")
self.add_class("auto", 2, "pedestrian")
self.add_class("auto", 3, "vehicle")
self.add_class("auto", 4, "sign_board")
self.add_class("auto", 5, "street_light")
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
images = os.listdir(dataset_dir)
for i in images:
if i == ".directory":
continue
image_path = os.path.join(dataset_dir, i)
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
source='auto',
image_id=i, # use file name as a unique image id
path=image_path,
width=width, height=height,
)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
info = self.image_info[image_id]
mask_dir = os.path.join(dataset_dir, "../masks")
image_path = os.path.join(mask_dir, str(info["id"]))
image = skimage.io.imread(image_path)
#classchange :start:
lane = np.all(image == (0, 255, 0), axis=-1)
pedestrian = np.all(image == (255, 0, 255), axis=-1)
vehicle = np.all(image == (0, 255, 255), axis=-1)
sign_board = np.all(image == (255, 0, 0), axis=-1)
street_light = np.all(image == (255, 255, 0), axis=-1)
mask = np.stack((lane, pedestrian, vehicle, sign_board, street_light), axis=2).astype(np.bool)
class_ids = np.arange(1, 6).astype(np.int32) #classchange (includes background)
#classchange :end:
return mask, class_ids
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = AutoDataset()
dataset_train.load_auto(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = AutoDataset()
dataset_val.load_auto(args.dataset, "val")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
def color_overlay(image, mask, class_ids):
"""Apply color overlay.
image: RGB image [height, width, 3]
mask: segmentation mask [height, width, #classes]
Returns result image.
"""
overlayed = image
print("Found classes: ", [ClassName(class_id).name for class_id in class_ids])
if mask.shape[-1] > 0:
for i in range(mask.shape[-1]):
m = mask[:, :, i]
m = np.stack((m, m, m), axis=2)
# classchange
if class_ids[i] == 1:
overlayed = np.where(m, (115, 255, 115), overlayed).astype(np.uint8)
elif class_ids[i] == 2:
overlayed = np.where(m, (255, 115, 255), overlayed).astype(np.uint8)
elif class_ids[i] == 3:
overlayed = np.where(m, (115, 255, 255), overlayed).astype(np.uint8)
elif class_ids[i] == 4:
overlayed = np.where(m, (255, 115, 115), overlayed).astype(np.uint8)
elif class_ids[i] == 5:
overlayed = np.where(m, (255, 255, 115), overlayed).astype(np.uint8)
else:
overlayed = overlayed.astype(np.uint8)
return overlayed
def detect_and_overlay(model, image_path=None, video_path=None):
assert image_path or video_path
# Image or video?
if image_path:
# Run model detection and generate the color overlay
print("Running on {}".format(args.image))
# Read image
image = skimage.io.imread(args.image)
# Convert grayscale images to 3D
if len(image.shape) == 2:
image = np.stack((image, image, image), axis=2)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color overlay
overlay = color_overlay(image, r['masks'], r['class_ids'])
# Save output
file_name = "overlay_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave(file_name, overlay)
elif video_path:
import cv2
# Video capture
vcapture = cv2.VideoCapture(video_path)
width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "overlay_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
while success:
print("frame: ", count)
# Read next image
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color overlay
overlay = color_overlay(image, r['masks'], r['class_ids'])
# RGB -> BGR to save image to video
overlay = overlay[..., ::-1]
# Add image to video writer
vwriter.write(overlay)
count += 1
vwriter.release()
print("Saved to ", file_name)
############################################################
# Evaluate
############################################################
def get_mask(image, mask, class_ids):
"""Apply color overlay.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
overlay = image
pd_mask = np.zeros([overlay.shape[0], overlay.shape[1], 5], dtype=np.uint8) #classchange
if mask.shape[-1] > 0:
for i in range(mask.shape[-1]):
m = mask[:, :, i:i+1]
pd_mask[:,:,class_ids[i]-1:class_ids[i]] = np.where(m, True, pd_mask[:,:,class_ids[i]-1:class_ids[i]]).astype(np.uint8)
############## For visualizing mask ##############
# pd_mask = np.zeros([overlay.shape[0], overlay.shape[1], 3], dtype=np.uint8)
# if mask.shape[-1] > 0:
# for i in range(mask.shape[-1]):
# m = mask[:, :, i]
# m = np.stack((m, m, m), axis=2)
# #classchange
# if class_ids[i] == 1:
# pd_mask = np.where(m, (0, 255, 0), pd_mask).astype(np.uint8)
# elif class_ids[i] == 2:
# pd_mask = np.where(m, (255, 0, 255), pd_mask).astype(np.uint8)
# elif class_ids[i] == 3:
# pd_mask = np.where(m, (0, 255, 255), pd_mask).astype(np.uint8)
# elif class_ids[i] == 4:
# pd_mask = np.where(m, (255, 0, 0), pd_mask).astype(np.uint8)
# elif class_ids[i] == 5:
# pd_mask = np.where(m, (255, 255, 0), pd_mask).astype(np.uint8)
#################################################
return pd_mask
def evaluate(model, dataset, limit=0, image_ids=None):
"""Evaluates a set of data for IOU scores.
dataset: A Dataset object with validation data
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
import time
t_prediction = 0
t_start = time.time()
results = []
total_iou_score = 0
total_class_iou = np.zeros(5) #classchange
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
gt_mask, class_ids = dataset.load_mask(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
################ Save predicted images ##############
# Color overlay
overlay = color_overlay(image, r['masks'], r['class_ids'])
# Save output
file_name = "overlay_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
skimage.io.imsave("dataset/images/predicted/" + file_name, overlay)
#####################################################
pd_mask = get_mask(image, r['masks'], r['class_ids'])
intersection = np.logical_and(gt_mask, pd_mask)
union = np.logical_or(gt_mask, pd_mask)
iou_score = np.sum(intersection) / np.sum(union)
total_iou_score += iou_score
class_iou = np.zeros(5) #classchange
for j in range(5): #classchange
inter = np.logical_and(gt_mask[:,:,j], pd_mask[:,:,j])
un = np.logical_or(gt_mask[:,:,j], pd_mask[:,:,j])
class_iou[j] = np.sum(inter) / np.sum(un)
if not isnan(class_iou[j]):
total_class_iou[j] += class_iou[j]
class_names = [ClassName(class_id).name for class_id in class_ids]
print(f"Class IOU scores")
for j in range(5): #classchange
print(class_names[j].ljust(14) + ": " + str(class_iou[j]))
print(f"IOU score for {image_id} = {iou_score}")
print("".ljust(50,'-'))
results.extend((image_id, iou_score))
print("IOUs = ", results)
print()
print("".ljust(50,'-'))
class_names = [ClassName(class_id).name for class_id in class_ids]
print(f"Average Class IOU scores")
for j in range(5): #classchange
print(class_names[j].ljust(14) + ": " + str((total_class_iou[j]/len(image_ids))))
print(f"------ Average IOU score = {total_iou_score/len(image_ids)} ------\n".ljust(50,'-'))
print("Prediction time: {}. \nAverage time: {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect classes for autonomous driving.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'overlay'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/auto/dataset/",
help='Directory of the required dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color overlay on')
parser.add_argument('--video', required=False,
metavar="path or URL to video",
help='Video to apply the color overlay on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "overlay":
assert args.image or args.video, \
"Provide --image or --video to apply color overlay"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
dataset_dir = args.dataset
# Configurations
if args.command == "train":
config = AutoConfig()
else:
class InferenceConfig(AutoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
elif args.command == "overlay":
detect_and_overlay(model, image_path=args.image,
video_path=args.video)
elif args.command == "evaluate":
# Validation dataset
dataset_val = AutoDataset()
dataset_val.load_auto(args.dataset, "val")
dataset_val.prepare()
# print("Running COCO evaluation on {} images.".format(args.limit))
evaluate(model, dataset_val)
else:
print("'{}' is not recognized. "
"Use 'train' or 'overlay'".format(args.command))
|
[
"numpy.sum",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"numpy.arange",
"mrcnn.model.MaskRCNN",
"os.path.join",
"sys.path.append",
"mrcnn.utils.download_trained_weights",
"os.path.abspath",
"os.path.exists",
"datetime.datetime.now",
"numpy.stack",
"math.isnan",
"os.listdir",
"numpy.all",
"numpy.logical_and",
"numpy.zeros",
"time.time",
"cv2.VideoCapture",
"numpy.where",
"numpy.logical_or"
] |
[((1002, 1024), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (1017, 1024), False, 'import os\n'), ((1045, 1070), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (1060, 1070), False, 'import sys\n'), ((1262, 1305), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (1274, 1305), False, 'import os\n'), ((1433, 1463), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1445, 1463), False, 'import os\n'), ((9139, 9204), 'numpy.zeros', 'np.zeros', (['[overlay.shape[0], overlay.shape[1], 5]'], {'dtype': 'np.uint8'}), '([overlay.shape[0], overlay.shape[1], 5], dtype=np.uint8)\n', (9147, 9204), True, 'import numpy as np\n'), ((10876, 10887), 'time.time', 'time.time', ([], {}), '()\n', (10885, 10887), False, 'import time\n'), ((10952, 10963), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (10960, 10963), True, 'import numpy as np\n'), ((13476, 13578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Mask R-CNN to detect classes for autonomous driving."""'}), "(description=\n 'Train Mask R-CNN to detect classes for autonomous driving.')\n", (13499, 13578), False, 'import argparse\n'), ((3112, 3145), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset'], {}), '(dataset_dir, subset)\n', (3124, 3145), False, 'import os\n'), ((3163, 3186), 'os.listdir', 'os.listdir', (['dataset_dir'], {}), '(dataset_dir)\n', (3173, 3186), False, 'import os\n'), ((3987, 4024), 'os.path.join', 'os.path.join', (['dataset_dir', '"""../masks"""'], {}), "(dataset_dir, '../masks')\n", (3999, 4024), False, 'import os\n'), ((4177, 4214), 'numpy.all', 'np.all', (['(image == (0, 255, 0))'], {'axis': '(-1)'}), '(image == (0, 255, 0), axis=-1)\n', (4183, 4214), True, 'import numpy as np\n'), ((4236, 4275), 'numpy.all', 'np.all', (['(image == (255, 0, 255))'], {'axis': '(-1)'}), '(image == (255, 0, 255), axis=-1)\n', (4242, 4275), True, 'import numpy as np\n'), ((4294, 4333), 'numpy.all', 'np.all', (['(image == (0, 255, 255))'], {'axis': '(-1)'}), '(image == (0, 255, 255), axis=-1)\n', (4300, 4333), True, 'import numpy as np\n'), ((4355, 4392), 'numpy.all', 'np.all', (['(image == (255, 0, 0))'], {'axis': '(-1)'}), '(image == (255, 0, 0), axis=-1)\n', (4361, 4392), True, 'import numpy as np\n'), ((4416, 4455), 'numpy.all', 'np.all', (['(image == (255, 255, 0))'], {'axis': '(-1)'}), '(image == (255, 255, 0), axis=-1)\n', (4422, 4455), True, 'import numpy as np\n'), ((11184, 11195), 'time.time', 'time.time', ([], {}), '()\n', (11193, 11195), False, 'import time\n'), ((11770, 11802), 'numpy.logical_and', 'np.logical_and', (['gt_mask', 'pd_mask'], {}), '(gt_mask, pd_mask)\n', (11784, 11802), True, 'import numpy as np\n'), ((11819, 11850), 'numpy.logical_or', 'np.logical_or', (['gt_mask', 'pd_mask'], {}), '(gt_mask, pd_mask)\n', (11832, 11850), True, 'import numpy as np\n'), ((11966, 11977), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (11974, 11977), True, 'import numpy as np\n'), ((15554, 15624), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""training"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='training', config=config, model_dir=args.logs)\n", (15571, 15624), True, 'from mrcnn import model as modellib, utils\n'), ((15685, 15756), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'config', 'model_dir': 'args.logs'}), "(mode='inference', config=config, model_dir=args.logs)\n", (15702, 15756), True, 'from mrcnn import model as modellib, utils\n'), ((3297, 3325), 'os.path.join', 'os.path.join', (['dataset_dir', 'i'], {}), '(dataset_dir, i)\n', (3309, 3325), False, 'import os\n'), ((5903, 5930), 'numpy.stack', 'np.stack', (['(m, m, m)'], {'axis': '(2)'}), '((m, m, m), axis=2)\n', (5911, 5930), True, 'import numpy as np\n'), ((7056, 7095), 'numpy.stack', 'np.stack', (['(image, image, image)'], {'axis': '(2)'}), '((image, image, image), axis=2)\n', (7064, 7095), True, 'import numpy as np\n'), ((7342, 7365), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7363, 7365), False, 'import datetime\n'), ((7496, 7524), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (7512, 7524), False, 'import cv2\n'), ((11269, 11280), 'time.time', 'time.time', ([], {}), '()\n', (11278, 11280), False, 'import time\n'), ((11520, 11543), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11541, 11543), False, 'import datetime\n'), ((11871, 11891), 'numpy.sum', 'np.sum', (['intersection'], {}), '(intersection)\n', (11877, 11891), True, 'import numpy as np\n'), ((11894, 11907), 'numpy.sum', 'np.sum', (['union'], {}), '(union)\n', (11900, 11907), True, 'import numpy as np\n'), ((12052, 12102), 'numpy.logical_and', 'np.logical_and', (['gt_mask[:, :, j]', 'pd_mask[:, :, j]'], {}), '(gt_mask[:, :, j], pd_mask[:, :, j])\n', (12066, 12102), True, 'import numpy as np\n'), ((12116, 12165), 'numpy.logical_or', 'np.logical_or', (['gt_mask[:, :, j]', 'pd_mask[:, :, j]'], {}), '(gt_mask[:, :, j], pd_mask[:, :, j])\n', (12129, 12165), True, 'import numpy as np\n'), ((13220, 13231), 'time.time', 'time.time', ([], {}), '()\n', (13229, 13231), False, 'import time\n'), ((15953, 15981), 'os.path.exists', 'os.path.exists', (['weights_path'], {}), '(weights_path)\n', (15967, 15981), False, 'import os\n'), ((15995, 16039), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['weights_path'], {}), '(weights_path)\n', (16025, 16039), False, 'from mrcnn import model as modellib, utils\n'), ((4472, 4543), 'numpy.stack', 'np.stack', (['(lane, pedestrian, vehicle, sign_board, street_light)'], {'axis': '(2)'}), '((lane, pedestrian, vehicle, sign_board, street_light), axis=2)\n', (4480, 4543), True, 'import numpy as np\n'), ((4580, 4595), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (4589, 4595), True, 'import numpy as np\n'), ((7798, 7821), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7819, 7821), False, 'import datetime\n'), ((7902, 7933), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (7924, 7933), False, 'import cv2\n'), ((12189, 12202), 'numpy.sum', 'np.sum', (['inter'], {}), '(inter)\n', (12195, 12202), True, 'import numpy as np\n'), ((12205, 12215), 'numpy.sum', 'np.sum', (['un'], {}), '(un)\n', (12211, 12215), True, 'import numpy as np\n'), ((12235, 12254), 'math.isnan', 'isnan', (['class_iou[j]'], {}), '(class_iou[j])\n', (12240, 12254), False, 'from math import isnan\n'), ((9375, 9438), 'numpy.where', 'np.where', (['m', '(True)', 'pd_mask[:, :, class_ids[i] - 1:class_ids[i]]'], {}), '(m, True, pd_mask[:, :, class_ids[i] - 1:class_ids[i]])\n', (9383, 9438), True, 'import numpy as np\n'), ((6019, 6058), 'numpy.where', 'np.where', (['m', '(115, 255, 115)', 'overlayed'], {}), '(m, (115, 255, 115), overlayed)\n', (6027, 6058), True, 'import numpy as np\n'), ((6140, 6179), 'numpy.where', 'np.where', (['m', '(255, 115, 255)', 'overlayed'], {}), '(m, (255, 115, 255), overlayed)\n', (6148, 6179), True, 'import numpy as np\n'), ((6261, 6300), 'numpy.where', 'np.where', (['m', '(115, 255, 255)', 'overlayed'], {}), '(m, (115, 255, 255), overlayed)\n', (6269, 6300), True, 'import numpy as np\n'), ((6382, 6421), 'numpy.where', 'np.where', (['m', '(255, 115, 115)', 'overlayed'], {}), '(m, (255, 115, 115), overlayed)\n', (6390, 6421), True, 'import numpy as np\n'), ((6503, 6542), 'numpy.where', 'np.where', (['m', '(255, 255, 115)', 'overlayed'], {}), '(m, (255, 255, 115), overlayed)\n', (6511, 6542), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from skimage.viewer import ImageViewer
def remove_rows(image, rows, cols):
newrows = int(rows / 2)
newimg = np.zeros((newrows, cols), np.uint8)
for r in range(1, newrows + 1):
newimg[r - 1:r, :] = image[r * 2 - 1:r * 2, :]
return newimg
img = cv2.imread('pirate.jpg', cv2.IMREAD_GRAYSCALE)
print(img.shape)
img = remove_rows(img, img.shape[0], img.shape[1])
viewer = ImageViewer(img)
viewer.show()
|
[
"cv2.imread",
"skimage.viewer.ImageViewer",
"numpy.zeros"
] |
[((301, 347), 'cv2.imread', 'cv2.imread', (['"""pirate.jpg"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('pirate.jpg', cv2.IMREAD_GRAYSCALE)\n", (311, 347), False, 'import cv2\n'), ((426, 442), 'skimage.viewer.ImageViewer', 'ImageViewer', (['img'], {}), '(img)\n', (437, 442), False, 'from skimage.viewer import ImageViewer\n'), ((148, 183), 'numpy.zeros', 'np.zeros', (['(newrows, cols)', 'np.uint8'], {}), '((newrows, cols), np.uint8)\n', (156, 183), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as p
import numpy as n
import pylab
import scipy.stats as stats
import networkx as nwx
import glob
import builtins
from matplotlib.pyplot import margins
import os.path
import json
from sklearn import svm, cross_validation, datasets
def classify(data, cls):
y = data[0, :]
X = data[1:, :]
if(cls == 'svm'):
model = svm.SVC(kernel='linear');
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
# split the data 80/20
for path in sorted(glob.glob('motive.*.csv')):
data = n.genfromtxt(path, delimiter=',')
svmresult = classify(data, 'svm')
for path in sorted(glob.glob('motive.*.csv')):
data = n.genfromtxt(path, delimiter=',')
svmresult = classify(data, 'svm')
ax = p.subplot(111)
ax.plot(data[:, :], alpha=0.5)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(True)
ax.spines["left"].set_visible(True)
ax.get_xaxis().set_tick_params(which='both', top='off')
ax.set_xlabel('iterations')
ax.set_ylabel('perturbation')
p.savefig('am.perturbation.png')
|
[
"matplotlib.pyplot.subplot",
"sklearn.cross_validation.train_test_split",
"sklearn.svm.SVC",
"numpy.genfromtxt",
"matplotlib.use",
"glob.glob",
"matplotlib.pyplot.savefig"
] |
[((50, 64), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (57, 64), True, 'import matplotlib as mpl\n'), ((870, 884), 'matplotlib.pyplot.subplot', 'p.subplot', (['(111)'], {}), '(111)\n', (879, 884), True, 'import matplotlib.pyplot as p\n'), ((1182, 1214), 'matplotlib.pyplot.savefig', 'p.savefig', (['"""am.perturbation.png"""'], {}), "('am.perturbation.png')\n", (1191, 1214), True, 'import matplotlib.pyplot as p\n'), ((618, 643), 'glob.glob', 'glob.glob', (['"""motive.*.csv"""'], {}), "('motive.*.csv')\n", (627, 643), False, 'import glob\n'), ((657, 690), 'numpy.genfromtxt', 'n.genfromtxt', (['path'], {'delimiter': '""","""'}), "(path, delimiter=',')\n", (669, 690), True, 'import numpy as n\n'), ((749, 774), 'glob.glob', 'glob.glob', (['"""motive.*.csv"""'], {}), "('motive.*.csv')\n", (758, 774), False, 'import glob\n'), ((788, 821), 'numpy.genfromtxt', 'n.genfromtxt', (['path'], {'delimiter': '""","""'}), "(path, delimiter=',')\n", (800, 821), True, 'import numpy as n\n'), ((433, 457), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (440, 457), False, 'from sklearn import svm, cross_validation, datasets\n'), ((502, 556), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (535, 556), False, 'from sklearn import svm, cross_validation, datasets\n')]
|
# MIT License.
# Copyright (c) 2021 by BioicDL. All rights reserved.
# Created by LiuXb on 2021/1/5
# -*- coding:utf-8 -*-
"""
@Modified:
@Description:
"""
import time
import cv2
import numpy as np
class BackgroundDetector(object):
def __init__(self):
self.fgmask = None
self.fgbg = None
def diffGround(self, groundImg, currrentImg, img_threshold=10, show_mask=False):
""" generate mask from a background image"""
# transfer to gray image
groundImg_gray = cv2.cvtColor(groundImg, cv2.COLOR_BGR2GRAY)
groundBlur = cv2.GaussianBlur(groundImg_gray, (3, 3), 1)
groundBlur.dtype = 'int16'
currrentImg_gray = cv2.cvtColor(currrentImg, cv2.COLOR_BGR2GRAY)
currrentImgBlur = cv2.GaussianBlur(currrentImg_gray, (3, 3), 1)
currrentImgBlur.dtype = 'int16'
# subtraction
dGrayBlur = abs(groundBlur-currrentImgBlur)
dGrayBlur.dtype = 'uint8'
dGrayMidBlur = cv2.medianBlur(dGrayBlur, 5)
ret, thresh = cv2.threshold(dGrayMidBlur, img_threshold, 255, cv2.THRESH_BINARY)
if show_mask:
cv2.imshow('diff img', dGrayMidBlur)
cv2.imshow('binary img from diff', thresh)
cv2.waitKey()
return thresh
def filterColor(self, currrentImg, lower=np.array([10, 20, 0]), upper=np.array([60, 80, 40]), show_result=False):
""" BGR channels"""
mask = cv2.inRange(currrentImg, lower, upper)
mask = cv2.bitwise_not(mask)
if show_result:
cv2.imshow('binary img from color', mask)
cv2.waitKey()
return mask
def multiFrameLoader(self, ref_video='outpy.avi', ref_num=500, mog_threshold=20):
""" load background video"""
cap = cv2.VideoCapture(ref_video)
self.fgbg = cv2.createBackgroundSubtractorMOG2(history=ref_num, detectShadows=True, varThreshold=mog_threshold)
cnt = 0
while (cnt < ref_num):
ret, frame = cap.read()
if not ret:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.fgmask = self.fgbg.apply(gray)
# self.fgmask = cv2.medianBlur(self.fgmask, 5)
def multiFrameFilter(self, color_img, show_mask=False):
""" create Gaussian Mixture Model from multi images as background"""
gray = cv2.cvtColor(color_img, cv2.COLOR_BGR2GRAY)
mask = self.fgbg.apply(gray, self.fgmask, 0)
# median filter
mask = cv2.medianBlur(mask, 5)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
mask = cv2.dilate(mask, kernel)
mask = cv2.erode(mask, kernel, iterations=1)
if show_mask:
cv2.imshow('binary img from mog', mask)
cv2.waitKey()
return mask
def grabCut_rect(self, color_img, rect=[200, 0, 900, 720]):
""" rect = [col_min, row_min, col_max, row_max]"""
mask = np.zeros(color_img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
# rect = (200, 0, 900, 720)
cv2.grabCut(color_img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
return mask2
def grabCut_mask(self, color_img, mask):
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
mask[mask == 0] = 0
mask[mask != 0] = 1
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
mask, bgdModel, fgdModel = cv2.grabCut(color_img, mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
return mask
def getConnectedDomain(self, binary_img, connectivity=4, region_area=1000, show_label=False):
""" obtain connected domain"""
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(binary_img, connectivity, cv2.CV_32S)
# delete small regions
label_index = []
for i in range(num_labels):
if stats[i][4] < region_area:
labels[labels == i] = 0
else:
label_index.append(i)
# Map component labels to hue val, 0-179 is the hue range in OpenCV
if np.max(labels) == 0:
label_hue = np.uint8(labels)
else:
label_hue = np.uint8(179*labels/np.max(labels))
blank_ch = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# Converting cvt to BGR
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
if show_label:
cv2.imshow('color labels', labeled_img)
cv2.waitKey()
return labels, label_index, labeled_img
def getConvexHull(self, img, show_convex=False):
# convex hull
result = cv2.findContours(np.uint8(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# contour: if hierarchy[0][i][3] == -1, it means there are contours inside
# len(contours[i] is the num of the contour
contours = []
if len(result) == 2:
contours = result[0]
hierarchy = result[1]
elif len(result) == 3:
contours = result[1]
hierarchy = result[2]
hull = []
for i in range(len(contours)):
# creating convex hull object for each contour
hull.append(cv2.convexHull(contours[i], False))
if show_convex:
# create an empty black image
drawing = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
# draw contours and hull points
for i in range(len(contours)):
color_contours = (0, 255, 0) # green - color for contours
color = (255, 0, 0) # blue - color for convex hull
# draw ith contour
cv2.drawContours(drawing, contours, i, color_contours, 1, 8, hierarchy)
# draw ith convex hull object
cv2.drawContours(drawing, hull, i, color, 1, 8)
cv2.imshow('convex', drawing)
cv2.waitKey()
return hull
if __name__ == '__main__':
from deepclaw.driver.sensors.camera.Realsense_L515 import Realsense
camera = Realsense('./configs/basic_config/camera_rs_d435_c1.yaml')
frame = camera.get_frame()
color = frame.color_image[0]
time.sleep(1)
frame = camera.get_frame()
color = frame.color_image[0]
save_ref_img = False
if save_ref_img:
cv2.imwrite('./projects/ICRA2020/ref_img.jpg', color)
refImg = cv2.imread('./projects/ICRA2020/ref_img.jpg')
cv2.imshow("a", refImg)
cv2.waitKey()
bd_test = BackgroundDetector()
# generate mask
# thresh = bd_test.diffGround(refImg, color, img_threshold=20, show_mask=True)
thresh = bd_test.filterColor(color, show_result=True)
# bd_test.multiFrameLoader(ref_video='d435qi_v40.avi', ref_num=500, mog_threshold=16)
# thresh = bd_test.multiFrameFilter(color, show_mask=True)
labels, labels_index, color_labels = bd_test.getConnectedDomain(thresh, show_label=True, region_area=2000)
hul = bd_test.getConvexHull(labels, show_convex=True)
|
[
"cv2.GaussianBlur",
"cv2.medianBlur",
"deepclaw.driver.sensors.camera.Realsense_L515.Realsense",
"cv2.erode",
"cv2.imshow",
"cv2.inRange",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.max",
"cv2.drawContours",
"numpy.uint8",
"cv2.bitwise_not",
"numpy.ones_like",
"cv2.waitKey",
"time.sleep",
"cv2.connectedComponentsWithStats",
"cv2.convexHull",
"cv2.merge",
"cv2.grabCut",
"cv2.createBackgroundSubtractorMOG2",
"cv2.getStructuringElement",
"cv2.threshold",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.imread",
"numpy.where",
"numpy.array"
] |
[((6489, 6547), 'deepclaw.driver.sensors.camera.Realsense_L515.Realsense', 'Realsense', (['"""./configs/basic_config/camera_rs_d435_c1.yaml"""'], {}), "('./configs/basic_config/camera_rs_d435_c1.yaml')\n", (6498, 6547), False, 'from deepclaw.driver.sensors.camera.Realsense_L515 import Realsense\n'), ((6616, 6629), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6626, 6629), False, 'import time\n'), ((6816, 6861), 'cv2.imread', 'cv2.imread', (['"""./projects/ICRA2020/ref_img.jpg"""'], {}), "('./projects/ICRA2020/ref_img.jpg')\n", (6826, 6861), False, 'import cv2\n'), ((6866, 6889), 'cv2.imshow', 'cv2.imshow', (['"""a"""', 'refImg'], {}), "('a', refImg)\n", (6876, 6889), False, 'import cv2\n'), ((6894, 6907), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (6905, 6907), False, 'import cv2\n'), ((510, 553), 'cv2.cvtColor', 'cv2.cvtColor', (['groundImg', 'cv2.COLOR_BGR2GRAY'], {}), '(groundImg, cv2.COLOR_BGR2GRAY)\n', (522, 553), False, 'import cv2\n'), ((575, 618), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['groundImg_gray', '(3, 3)', '(1)'], {}), '(groundImg_gray, (3, 3), 1)\n', (591, 618), False, 'import cv2\n'), ((681, 726), 'cv2.cvtColor', 'cv2.cvtColor', (['currrentImg', 'cv2.COLOR_BGR2GRAY'], {}), '(currrentImg, cv2.COLOR_BGR2GRAY)\n', (693, 726), False, 'import cv2\n'), ((753, 798), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['currrentImg_gray', '(3, 3)', '(1)'], {}), '(currrentImg_gray, (3, 3), 1)\n', (769, 798), False, 'import cv2\n'), ((970, 998), 'cv2.medianBlur', 'cv2.medianBlur', (['dGrayBlur', '(5)'], {}), '(dGrayBlur, 5)\n', (984, 998), False, 'import cv2\n'), ((1022, 1088), 'cv2.threshold', 'cv2.threshold', (['dGrayMidBlur', 'img_threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(dGrayMidBlur, img_threshold, 255, cv2.THRESH_BINARY)\n', (1035, 1088), False, 'import cv2\n'), ((1309, 1330), 'numpy.array', 'np.array', (['[10, 20, 0]'], {}), '([10, 20, 0])\n', (1317, 1330), True, 'import numpy as np\n'), ((1338, 1360), 'numpy.array', 'np.array', (['[60, 80, 40]'], {}), '([60, 80, 40])\n', (1346, 1360), True, 'import numpy as np\n'), ((1425, 1463), 'cv2.inRange', 'cv2.inRange', (['currrentImg', 'lower', 'upper'], {}), '(currrentImg, lower, upper)\n', (1436, 1463), False, 'import cv2\n'), ((1479, 1500), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (1494, 1500), False, 'import cv2\n'), ((1763, 1790), 'cv2.VideoCapture', 'cv2.VideoCapture', (['ref_video'], {}), '(ref_video)\n', (1779, 1790), False, 'import cv2\n'), ((1811, 1914), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {'history': 'ref_num', 'detectShadows': '(True)', 'varThreshold': 'mog_threshold'}), '(history=ref_num, detectShadows=True,\n varThreshold=mog_threshold)\n', (1845, 1914), False, 'import cv2\n'), ((2359, 2402), 'cv2.cvtColor', 'cv2.cvtColor', (['color_img', 'cv2.COLOR_BGR2GRAY'], {}), '(color_img, cv2.COLOR_BGR2GRAY)\n', (2371, 2402), False, 'import cv2\n'), ((2495, 2518), 'cv2.medianBlur', 'cv2.medianBlur', (['mask', '(5)'], {}), '(mask, 5)\n', (2509, 2518), False, 'import cv2\n'), ((2536, 2585), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(5, 5)'], {}), '(cv2.MORPH_RECT, (5, 5))\n', (2561, 2585), False, 'import cv2\n'), ((2601, 2625), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel'], {}), '(mask, kernel)\n', (2611, 2625), False, 'import cv2\n'), ((2641, 2678), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (2650, 2678), False, 'import cv2\n'), ((2939, 2978), 'numpy.zeros', 'np.zeros', (['color_img.shape[:2]', 'np.uint8'], {}), '(color_img.shape[:2], np.uint8)\n', (2947, 2978), True, 'import numpy as np\n'), ((2998, 3027), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (3006, 3027), True, 'import numpy as np\n'), ((3047, 3076), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (3055, 3076), True, 'import numpy as np\n'), ((3121, 3206), 'cv2.grabCut', 'cv2.grabCut', (['color_img', 'mask', 'rect', 'bgdModel', 'fgdModel', '(5)', 'cv2.GC_INIT_WITH_RECT'], {}), '(color_img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT\n )\n', (3132, 3206), False, 'import cv2\n'), ((3560, 3589), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (3568, 3589), True, 'import numpy as np\n'), ((3609, 3638), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (3617, 3638), True, 'import numpy as np\n'), ((3674, 3759), 'cv2.grabCut', 'cv2.grabCut', (['color_img', 'mask', 'None', 'bgdModel', 'fgdModel', '(5)', 'cv2.GC_INIT_WITH_MASK'], {}), '(color_img, mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK\n )\n', (3685, 3759), False, 'import cv2\n'), ((4033, 4103), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['binary_img', 'connectivity', 'cv2.CV_32S'], {}), '(binary_img, connectivity, cv2.CV_32S)\n', (4065, 4103), False, 'import cv2\n'), ((4627, 4669), 'cv2.merge', 'cv2.merge', (['[label_hue, blank_ch, blank_ch]'], {}), '([label_hue, blank_ch, blank_ch])\n', (4636, 4669), False, 'import cv2\n'), ((4724, 4768), 'cv2.cvtColor', 'cv2.cvtColor', (['labeled_img', 'cv2.COLOR_HSV2BGR'], {}), '(labeled_img, cv2.COLOR_HSV2BGR)\n', (4736, 4768), False, 'import cv2\n'), ((6748, 6801), 'cv2.imwrite', 'cv2.imwrite', (['"""./projects/ICRA2020/ref_img.jpg"""', 'color'], {}), "('./projects/ICRA2020/ref_img.jpg', color)\n", (6759, 6801), False, 'import cv2\n'), ((1123, 1159), 'cv2.imshow', 'cv2.imshow', (['"""diff img"""', 'dGrayMidBlur'], {}), "('diff img', dGrayMidBlur)\n", (1133, 1159), False, 'import cv2\n'), ((1172, 1214), 'cv2.imshow', 'cv2.imshow', (['"""binary img from diff"""', 'thresh'], {}), "('binary img from diff', thresh)\n", (1182, 1214), False, 'import cv2\n'), ((1227, 1240), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1238, 1240), False, 'import cv2\n'), ((1537, 1578), 'cv2.imshow', 'cv2.imshow', (['"""binary img from color"""', 'mask'], {}), "('binary img from color', mask)\n", (1547, 1578), False, 'import cv2\n'), ((1591, 1604), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1602, 1604), False, 'import cv2\n'), ((2059, 2098), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (2071, 2098), False, 'import cv2\n'), ((2714, 2753), 'cv2.imshow', 'cv2.imshow', (['"""binary img from mog"""', 'mask'], {}), "('binary img from mog', mask)\n", (2724, 2753), False, 'import cv2\n'), ((2766, 2779), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2777, 2779), False, 'import cv2\n'), ((4422, 4436), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (4428, 4436), True, 'import numpy as np\n'), ((4467, 4483), 'numpy.uint8', 'np.uint8', (['labels'], {}), '(labels)\n', (4475, 4483), True, 'import numpy as np\n'), ((4581, 4604), 'numpy.ones_like', 'np.ones_like', (['label_hue'], {}), '(label_hue)\n', (4593, 4604), True, 'import numpy as np\n'), ((4876, 4915), 'cv2.imshow', 'cv2.imshow', (['"""color labels"""', 'labeled_img'], {}), "('color labels', labeled_img)\n", (4886, 4915), False, 'import cv2\n'), ((4928, 4941), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (4939, 4941), False, 'import cv2\n'), ((5100, 5113), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (5108, 5113), True, 'import numpy as np\n'), ((5774, 5825), 'numpy.zeros', 'np.zeros', (['(img.shape[0], img.shape[1], 3)', 'np.uint8'], {}), '((img.shape[0], img.shape[1], 3), np.uint8)\n', (5782, 5825), True, 'import numpy as np\n'), ((6299, 6328), 'cv2.imshow', 'cv2.imshow', (['"""convex"""', 'drawing'], {}), "('convex', drawing)\n", (6309, 6328), False, 'import cv2\n'), ((6341, 6354), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (6352, 6354), False, 'import cv2\n'), ((3218, 3259), 'numpy.where', 'np.where', (['((mask == 2) | (mask == 0))', '(0)', '(1)'], {}), '((mask == 2) | (mask == 0), 0, 1)\n', (3226, 3259), True, 'import numpy as np\n'), ((3770, 3811), 'numpy.where', 'np.where', (['((mask == 2) | (mask == 0))', '(0)', '(1)'], {}), '((mask == 2) | (mask == 0), 0, 1)\n', (3778, 3811), True, 'import numpy as np\n'), ((5649, 5683), 'cv2.convexHull', 'cv2.convexHull', (['contours[i]', '(False)'], {}), '(contours[i], False)\n', (5663, 5683), False, 'import cv2\n'), ((6105, 6176), 'cv2.drawContours', 'cv2.drawContours', (['drawing', 'contours', 'i', 'color_contours', '(1)', '(8)', 'hierarchy'], {}), '(drawing, contours, i, color_contours, 1, 8, hierarchy)\n', (6121, 6176), False, 'import cv2\n'), ((6239, 6286), 'cv2.drawContours', 'cv2.drawContours', (['drawing', 'hull', 'i', 'color', '(1)', '(8)'], {}), '(drawing, hull, i, color, 1, 8)\n', (6255, 6286), False, 'import cv2\n'), ((4542, 4556), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (4548, 4556), True, 'import numpy as np\n')]
|
from pydata import Data, h5todata
import numpy as np
import os
import h5py
def test_Data(tmpdir):
o = Data(x=np.ones(3), y=np.ones(3), a=5, b='hh')
assert o.b=='hh'
assert o['a']==5
o.append(np.ones(5),np.ones(5))
o.save(os.path.join(tmpdir, 'test.txt'))
o.save(os.path.join(tmpdir, 'test.h5'))
def test_h5todata(tmpdir):
o = Data(x=np.ones(3), y=np.ones(3), a=5, b='hh')
o.save(os.path.join(tmpdir, 'test.h5'))
f = h5py.File(os.path.join(tmpdir, 'test.h5'),'r')
d = h5todata(f['data0000'])
assert d.a==5
assert d.x[0]==1
|
[
"os.path.join",
"pydata.h5todata",
"numpy.ones"
] |
[((510, 533), 'pydata.h5todata', 'h5todata', (["f['data0000']"], {}), "(f['data0000'])\n", (518, 533), False, 'from pydata import Data, h5todata\n'), ((208, 218), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (215, 218), True, 'import numpy as np\n'), ((219, 229), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (226, 229), True, 'import numpy as np\n'), ((242, 274), 'os.path.join', 'os.path.join', (['tmpdir', '"""test.txt"""'], {}), "(tmpdir, 'test.txt')\n", (254, 274), False, 'import os\n'), ((287, 318), 'os.path.join', 'os.path.join', (['tmpdir', '"""test.h5"""'], {}), "(tmpdir, 'test.h5')\n", (299, 318), False, 'import os\n'), ((414, 445), 'os.path.join', 'os.path.join', (['tmpdir', '"""test.h5"""'], {}), "(tmpdir, 'test.h5')\n", (426, 445), False, 'import os\n'), ((465, 496), 'os.path.join', 'os.path.join', (['tmpdir', '"""test.h5"""'], {}), "(tmpdir, 'test.h5')\n", (477, 496), False, 'import os\n'), ((114, 124), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (121, 124), True, 'import numpy as np\n'), ((128, 138), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (135, 138), True, 'import numpy as np\n'), ((364, 374), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (371, 374), True, 'import numpy as np\n'), ((378, 388), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (385, 388), True, 'import numpy as np\n')]
|
__author__ = "<NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
"""
Todo:
[Organized HIGH to LOW priority...]
Organize in Functions better -
Remove Redundant Code & Optimize -
"""
# Modules required
import pygame
import time
import numpy
from typing import List
# Initialize global variables
pygame.init()
SQ_DIM = 480
top = 48
left = 86
y = 32
screen = pygame.display.set_mode((SQ_DIM,SQ_DIM))
clock = pygame.time.Clock()
COLOR_INACTIVE = pygame.Color(156,156,156)
COLOR_ACTIVE = pygame.Color(255,255,255)
COLOR_TAKEN = pygame.Color(255,89,89)
ALLOWED = ['1','2','3','4','5','6','7','8','9']
FONT = pygame.font.Font("font/Roboto-Medium.ttf", 32)
FONT_SMALL = pygame.font.Font("font/Roboto-Medium.ttf", 16)
# Predefined Sudoku game boars
def initialboard(difficulty: int = 3) -> List:
if difficulty == 1:
easy_1 = [[(0,1),1],[(0,3),9],[(0,4),4],[(0,7),6],
[(1,1),2],[(1,3),7],[(1,5),6],[(1,6),1],[(1,8),3],
[(2,0),6],[(2,2),9],[(2,3),1],[(2,6),7],[(2,8),4],
[(3,2),7],[(3,3),4],
[(4,0),4],[(4,2),3],[(4,6),8],[(4,8),9],
[(5,5),8],[(5,6),4],
[(6,0),9],[(6,2),6],[(6,5),4],[(6,6),2],[(6,8),7],
[(7,0),2],[(7,2),1],[(7,3),6],[(7,5),5],[(7,7),3],
[(8,1),7],[(8,4),2],[(8,5),9],[(8,7),4]]
return isomorphic_board(easy_1)
elif difficulty == 2:
med_1 = [[(0,0),6],[(0,1),4],[(0,3),9],
[(1,5),1],[(1,6),3],
[(2,8),2],
[(3,1),3],[(3,6),9],[(3,8),6],
[(4,0),1],[(4,6),7],[(4,7),5],
[(5,1),2],[(5,4),8],[(5,5),5],[(5,8),1],
[(6,5),8],
[(7,7),9],[(7,8),7],
[(8,0),2],[(8,1),7],[(8,2),9],[(8,7),6],[(8,8),4]]
return isomorphic_board(med_1)
elif difficulty == 3:
hard_1 = [[(0,3),4],
[(1,5),8],[(1,7),9],[(1,8),6],
[(2,4),5],[(2,5),3],[(2,7),8],
[(3,1),4],[(3,2),8],
[(4,0),2],[(4,4),4],[(4,5),9],[(4,8),1],
[(5,0),6],[(5,6),5],[(5,8),9],
[(6,0),4],[(6,3),1],[(6,6),7],
[(7,1),8],[(7,3),9],[(7,6),4],
[(8,1),1],[(8,4),7],[(8,7),2]]
return isomorphic_board(hard_1)
else:
return easy_1
return hard_1
# Creates an isomorphic sudoku board
# Only symbol, row, column permutations implemented
# TODO: Implement block and stack permutations
def isomorphic_board(board: List) -> List:
iso_board = []
permute_symbols = numpy.random.permutation(9)
row_permutations = [numpy.random.permutation(range(3*i,3*(i+1))) for i in range(3)]
col_permutations = [numpy.random.permutation(range(3*i,3*(i+1))) for i in range(3)]
#block_permutation = numpy.random.permutation(range(3))
#stack_permutation = numpy.random.permutation(range(3))
for entry in board:
pos = entry[0]
val = entry[1]
r_perm = row_permutations[pos[0]//3]
c_perm = col_permutations[pos[1]//3]
iso_board.append([(r_perm.item(pos[0]%3),c_perm.item(pos[1]%3)),permute_symbols.item(val-1)+1])
return iso_board
# Create number boxes and user input boxes based on the initial board chosen
def create_board(taken_positions: List, number_boxes: List, input_boxes: List, board: List, difficulty: int) -> None:
init_board = initialboard(difficulty)
for position in init_board:
pos = position[0]
taken_positions.append(pos)
num = position[1]
number_boxes.append(NumBox(left+pos[1]*(y+1),top+pos[0]*(y+1),y,y,board_coordinates=(pos[0],pos[1]),value=num,text=str(num)))
board[pos[0],pos[1]] = num
for i in range(9):
for j in range(9):
if (i,j) not in taken_positions:
input_boxes.append(InputBox(left+j*(y+1),top+i*(y+1),y,y,board_coordinates=(i,j)))
# Make the sudoku board look nice with borders and such
def borders(screen):
shift = 32
total = 296
for i in range(9):
pygame.draw.line(screen, COLOR_INACTIVE, (left,top+i*(shift+1)-1), (left+total,top+i*(shift+1)-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left-1,top-1), (left+total+2,top-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top), (left,top+total+2),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left+total,top), (left+total,top+total+2),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top+total), (left+total,top+total),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top+3*(shift+1)-1), (left+total,top+3*(shift+1)-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left,top+6*(shift+1)-1), (left+total,top+6*(shift+1)-1),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left+3*(shift+1)-1,top), (left+3*(shift+1)-1,top+total),4)
pygame.draw.line(screen, COLOR_ACTIVE, (left+6*(shift+1)-1,top), (left+6*(shift+1)-1,top+total),4)
# Check whether the board has Sudoku Properties
# 1) Whole Board is Latin Square
# 2) Each subsquare has a distinct entries
def win(board: List) -> bool:
comp = numpy.array(list(range(1,10)))
# Latin Square Check
for i in range(9):
if not (numpy.array_equal(numpy.sort(board[i]),comp) and numpy.array_equal(numpy.sort(board[:,i]),comp)):
return False
# Subsquare Checks
for offset_x in range(0,7,3):
for offset_y in range(0,7,3):
box_xy = []
for smallbox_x in range(3):
for smallbox_y in range(3):
box_xy.append(board.item(offset_x+smallbox_x,offset_y+smallbox_y))
if not numpy.array_equal(numpy.sort(numpy.array(box_xy)),comp):
return False
return True
# Is this a valid number placement, i.e., does it maintain the Latin Square
# property and the subsquare property?
def is_taken(coord: tuple, num: int, board) -> bool:
# 0's are default values, do not check them
if num != 0:
# Latin Square rows
for i in range(9):
if board.item(i,coord[1]) == num and coord[0] != i:
return True
# Latin Square columns
for j in range(9):
if board.item(coord[0],j) == num and coord[1] != j:
return True
startx = coord[0]//3
starty = coord[1]//3
# Subsquare property?
for i in range(startx*3,startx*3+3,1):
for j in range(starty*3,starty*3+3,1):
if board.item(i,j) == num and coord[0] != i and coord[1] != j:
return True
return False
def find_first_empty_location(sboard) -> bool:
for r in range(9):
for c in range(9):
if sboard.item(r,c) == 0:
return (r,c)
return (-1,-1)
# Solve Sudoku by backtracking
def sudoku_backtracking_solver(sboard) -> bool:
loc = find_first_empty_location(sboard)
if loc[0] == loc[1] == -1:
return True
(row,col) = loc
for number in range(1,10):
if not is_taken((row,col),number,sboard):
sboard[row,col] = number
if sudoku_backtracking_solver(sboard):
return True
sboard[row,col] = 0
return False
# Class defining user input boxes
class InputBox:
def __init__(self, x, y, w, h, text='', cursor_visible=True, max_string_length=1, board_coordinates=(0,0)):
self.rect = pygame.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.txt_surface = FONT.render(text, True, self.color)
self.active = False
self.cursor_color = (0, 0, 1)
self.cursor_visible = cursor_visible
self.max_string_length = max_string_length # set to -1 for no limit
self.board_coordinates = board_coordinates
self.value = 0
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if self.rect.collidepoint(event.pos):
self.active = not self.active
else:
self.active = False
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
if event.type == pygame.KEYDOWN:
if self.active:
if event.key == pygame.K_RETURN:
print(self.text)
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
elif len(self.text) < self.max_string_length or self.max_string_length == -1:
self.text += event.unicode
if(self.text == ''):
self.value = 0
elif(self.text in ALLOWED):
self.value = int(self.text)
else:
self.text = ''
self.value = 0
self.txt_surface = FONT.render(self.text, True, self.color)
def get_attr(self):
return (self.board_coordinates,self.value)
def draw(self, screen):
screen.blit(self.txt_surface, (self.rect.x+6, self.rect.y-2))
pygame.draw.rect(screen, self.color, self.rect, 2)
# Number boxes from predefined board, not user interactive.
class NumBox:
def __init__(self, x, y, w, h, text='', value=0, board_coordinates=(0,0)):
self.rect = pygame.Rect(x, y, w, h)
self.text = text
self.color = COLOR_INACTIVE
self.hint = 0
self.board_coordinates = board_coordinates
self.value = value
self.txt_surface = FONT.render(text, True, self.color)
def draw(self, screen):
surf = self.txt_surface.get_rect()
surf.center = (self.rect.x+int(self.rect.w/2), (self.rect.y + int(self.rect.h/2)))
screen.blit(self.txt_surface, surf)
pygame.draw.rect(screen, self.color, self.rect, 2)
# Messages to inform player
class MessageBox:
def __init__(self, x, y, w, h, text, font=FONT):
self.rect = pygame.Rect(x, y, w, h)
self.text = text
self.color = COLOR_ACTIVE
self.hint = 0
self.font = font
self.txt_surface = font.render(text, True, self.color)
def __draw__(self, screen):
surf = self.txt_surface.get_rect()
surf.center = (self.rect.x+int(self.rect.w/2), (self.rect.y + int(self.rect.h/2)))
screen.blit(self.txt_surface, surf)
pygame.draw.rect(screen, self.color, self.rect, 2)
# Message to indicate whether the move just made was valid
class TextBox(MessageBox):
def __init__(self, x, y, w, h, text='',font=FONT_SMALL):
super().__init__(x,y,w,h,text,font)
def update(self,hint: bool):
if hint:
self.text="Try again"
else:
self.text="Go!"
self.txt_surface = self.font.render(self.text, True, self.color)
def draw(self, screen):
super().__draw__(screen)
# Message to indicate whether the board is properly completed
class WinBox(MessageBox):
def __init__(self, x, y, w, h, text='',font=FONT_SMALL):
super().__init__(x,y,w,h,text,font)
self.win = False
self.score_changed = False
def update(self,board):
if win(board):
self.text="You Win!"
self.win = True
else:
self.text="Not done"
self.txt_surface = self.font.render(self.text, True, self.color)
def draw(self, screen):
super().__draw__(screen)
class ScoreBox(MessageBox):
def __init__(self, x, y, w, h, text='',font=FONT_SMALL):
super().__init__(x,y,w,h,text,font)
self.value = 0
def update(self,move):
self.value += move
self.text = str(self.value)
self.txt_surface = self.font.render(self.text, True, self.color)
def draw(self, screen):
super().__draw__(screen)
def text_objects(text, font):
textSurface = font.render(text, True, pygame.Color(0,0,0))
return textSurface, textSurface.get_rect()
def button(msg,x,y,w,h,ic,ac,action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(screen, ac,(x,y,w,h))
if click[0] == 1 and action != None:
difficulty = int(msg)
action(difficulty)
else:
pygame.draw.rect(screen, ic,(x,y,w,h))
textSurf, textRect = text_objects(msg, FONT_SMALL)
textRect.center = ( (x+int(w/2)), (y+int(h/2)) )
screen.blit(textSurf, textRect)
# Get the list of Highscores from the file: highscores.txt
def get_highscores():
scores = []
scoreBoxes = []
with open('data/highscores.txt') as f:
scores = f.readlines()
scores = sorted([int(score.rstrip()) for score in scores],reverse=True)
f.close()
space = 10
height = 20
i = 0
for score in scores:
scoreBoxes.append(MessageBox(200,330+space*i+height*(i-1),80,height,text="{}".format(score),font=FONT_SMALL))
i += 1
return scoreBoxes
# Update the highscores.txt file
def update_leaderboard(new_score: int) -> None:
scores = []
with open('data/highscores.txt') as f:
scores = f.readlines()
scores = sorted([int(score.rstrip()) for score in scores])
f.close()
if len(scores) != 0:
i = 0
if new_score <= scores[i]:
return
while new_score > scores[i] and i < len(scores)-1:
i += 1
if i > 0:
for j in range(i):
scores[j] = scores[j+1]
scores[i-1] = new_score
with open('data/highscores.txt','w') as f:
scores = sorted(scores,reverse=True)
f.seek(0)
f.truncate()
for score in scores:
f.write("{}\n".format(score))
f.close()
# Update user score
def update_score(lastboard,board,Hint,change_to_zero,changed_up_one,scorebox1,screen):
if numpy.array_equal(lastboard,board):
scorebox1.update(0)
elif (not numpy.array_equal(lastboard,board)) and (not change_to_zero):
(r,c) = matrix_not_equal(lastboard,board)
if (not Hint) and (not changed_up_one.item(r,c)):
scorebox1.update(1)
# Only allow a box to increase the score once
changed_up_one[r,c] = True
elif Hint:
scorebox1.update(-1)
else:
scorebox1.update(0)
else:
scorebox1.update(0)
scorebox1.draw(screen)
# Returns lexicographic first place two matrices not equal if the matrices are the same shape
def matrix_not_equal(A,B):
row = -1
col = -1
if A.shape == B.shape:
(nrows,ncols) = A.shape
for i in range(nrows):
if not numpy.array_equal(A[i],B[i]):
row = i
for j in range(ncols):
if not numpy.array_equal(A[:,j],B[:,j]):
col = j
return (row,col)
else:
return (row,col)
def main():
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
else:
screen.fill((0, 0, 0))
sudoku = MessageBox(140,50,200,50,text="SUDOKU!")
sudoku.__draw__(screen)
selectdiff = MessageBox(165,150,150,30,text="Select Difficulty",font=FONT_SMALL)
selectdiff.__draw__(screen)
button("1",140,200,40,50,COLOR_INACTIVE,COLOR_ACTIVE,action=game)
button("2",220,200,40,50,COLOR_INACTIVE,COLOR_ACTIVE,action=game)
button("3",300,200,40,50,COLOR_INACTIVE,COLOR_ACTIVE,action=game)
highscores = MessageBox(165,270,150,30,text="Highscores",font=FONT_SMALL)
scoreBoxes = get_highscores()
for scoreBox in scoreBoxes:
scoreBox.__draw__(screen)
highscores.__draw__(screen)
pygame.display.update()
clock.tick(40)
return
def game(difficulty):
# Initialize board components
board = numpy.zeros((9,9),dtype=int)
Taken = numpy.zeros((9,9),dtype=bool)
# lastlastboard = numpy.zeros((9,9),dtype=int)
lastboard = numpy.zeros((9,9),dtype=int)
number_boxes = []
taken_positions = []
input_boxes = []
changed_up_one = numpy.zeros((9,9),dtype=bool)
create_board(taken_positions,number_boxes,input_boxes,board,difficulty)
sboard = numpy.copy(board)
sudoku_backtracking_solver(sboard)
print(sboard)
# Create Progress Messages
resetbox1 = WinBox(left,top+310,150,40,text='Not done',font=FONT)
hintbox1 = TextBox(left,top+360,150,40,text='GO!',font=FONT)
scorebox1 = ScoreBox(left+170,top+310,100,40,text='0',font=FONT)
# Run until user asks to quit
running = True
while running:
lastboard = numpy.copy(board)
change_to_zero = False
# Did user click window close button?
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
for box in input_boxes:
box.handle_event(event)
# Check whether moves were valid
for box in input_boxes:
(coord,number) = box.get_attr()
board[coord[0],coord[1]] = number
if lastboard[coord[0],coord[1]] != 0 and number == 0:
change_to_zero = True
toggle = is_taken(coord,number,board)
if toggle:
Taken[coord[0],coord[1]] = True
else:
Taken[coord[0],coord[1]] = False
# Draw the number the user inputed
screen.fill((0, 0, 0))
for numbox in number_boxes:
numbox.draw(screen)
for box in input_boxes:
box.draw(screen)
# Are there any invalid moves on the board?
Hint = numpy.any(Taken)
# Update Hint Message
hintbox1.update(Hint)
hintbox1.draw(screen)
# Update user score
update_score(lastboard,board,Hint,change_to_zero,changed_up_one,scorebox1,screen)
# Indicate to user whether game is finished
resetbox1.update(board)
resetbox1.draw(screen)
# Edit highscores if user won and score merits leaderboard
if resetbox1.win and not resetbox1.score_changed:
new_score = int(scorebox1.text)
update_leaderboard(new_score)
resetbox1.score_changed = True
borders(screen)
pygame.display.update()
clock.tick(40)
screen.fill((0, 0, 0))
pygame.display.update()
|
[
"pygame.draw.line",
"pygame.event.get",
"pygame.Rect",
"pygame.display.update",
"pygame.font.Font",
"pygame.mouse.get_pos",
"numpy.copy",
"pygame.display.set_mode",
"pygame.quit",
"pygame.mouse.get_pressed",
"pygame.draw.rect",
"pygame.init",
"numpy.sort",
"numpy.random.permutation",
"pygame.time.Clock",
"pygame.Color",
"numpy.zeros",
"numpy.any",
"numpy.array",
"numpy.array_equal"
] |
[((314, 327), 'pygame.init', 'pygame.init', ([], {}), '()\n', (325, 327), False, 'import pygame\n'), ((376, 417), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(SQ_DIM, SQ_DIM)'], {}), '((SQ_DIM, SQ_DIM))\n', (399, 417), False, 'import pygame\n'), ((425, 444), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (442, 444), False, 'import pygame\n'), ((462, 489), 'pygame.Color', 'pygame.Color', (['(156)', '(156)', '(156)'], {}), '(156, 156, 156)\n', (474, 489), False, 'import pygame\n'), ((503, 530), 'pygame.Color', 'pygame.Color', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (515, 530), False, 'import pygame\n'), ((543, 568), 'pygame.Color', 'pygame.Color', (['(255)', '(89)', '(89)'], {}), '(255, 89, 89)\n', (555, 568), False, 'import pygame\n'), ((622, 668), 'pygame.font.Font', 'pygame.font.Font', (['"""font/Roboto-Medium.ttf"""', '(32)'], {}), "('font/Roboto-Medium.ttf', 32)\n", (638, 668), False, 'import pygame\n'), ((682, 728), 'pygame.font.Font', 'pygame.font.Font', (['"""font/Roboto-Medium.ttf"""', '(16)'], {}), "('font/Roboto-Medium.ttf', 16)\n", (698, 728), False, 'import pygame\n'), ((2688, 2715), 'numpy.random.permutation', 'numpy.random.permutation', (['(9)'], {}), '(9)\n', (2712, 2715), False, 'import numpy\n'), ((4266, 4361), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left - 1, top - 1)', '(left + total + 2, top - 1)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left - 1, top - 1), (left + total +\n 2, top - 1), 4)\n', (4282, 4361), False, 'import pygame\n'), ((4349, 4428), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left, top)', '(left, top + total + 2)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left, top), (left, top + total + 2), 4)\n', (4365, 4428), False, 'import pygame\n'), ((4426, 4526), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left + total, top)', '(left + total, top + total + 2)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left + total, top), (left + total, \n top + total + 2), 4)\n', (4442, 4526), False, 'import pygame\n'), ((4515, 4611), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left, top + total)', '(left + total, top + total)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left, top + total), (left + total, \n top + total), 4)\n', (4531, 4611), False, 'import pygame\n'), ((4603, 4727), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left, top + 3 * (shift + 1) - 1)', '(left + total, top + 3 * (shift + 1) - 1)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left, top + 3 * (shift + 1) - 1), (\n left + total, top + 3 * (shift + 1) - 1), 4)\n', (4619, 4727), False, 'import pygame\n'), ((4706, 4830), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left, top + 6 * (shift + 1) - 1)', '(left + total, top + 6 * (shift + 1) - 1)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left, top + 6 * (shift + 1) - 1), (\n left + total, top + 6 * (shift + 1) - 1), 4)\n', (4722, 4830), False, 'import pygame\n'), ((4810, 4934), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left + 3 * (shift + 1) - 1, top)', '(left + 3 * (shift + 1) - 1, top + total)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left + 3 * (shift + 1) - 1, top), (\n left + 3 * (shift + 1) - 1, top + total), 4)\n', (4826, 4934), False, 'import pygame\n'), ((4913, 5037), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_ACTIVE', '(left + 6 * (shift + 1) - 1, top)', '(left + 6 * (shift + 1) - 1, top + total)', '(4)'], {}), '(screen, COLOR_ACTIVE, (left + 6 * (shift + 1) - 1, top), (\n left + 6 * (shift + 1) - 1, top + total), 4)\n', (4929, 5037), False, 'import pygame\n'), ((12012, 12034), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (12032, 12034), False, 'import pygame\n'), ((12047, 12073), 'pygame.mouse.get_pressed', 'pygame.mouse.get_pressed', ([], {}), '()\n', (12071, 12073), False, 'import pygame\n'), ((13909, 13944), 'numpy.array_equal', 'numpy.array_equal', (['lastboard', 'board'], {}), '(lastboard, board)\n', (13926, 13944), False, 'import numpy\n'), ((16060, 16090), 'numpy.zeros', 'numpy.zeros', (['(9, 9)'], {'dtype': 'int'}), '((9, 9), dtype=int)\n', (16071, 16090), False, 'import numpy\n'), ((16101, 16132), 'numpy.zeros', 'numpy.zeros', (['(9, 9)'], {'dtype': 'bool'}), '((9, 9), dtype=bool)\n', (16112, 16132), False, 'import numpy\n'), ((16198, 16228), 'numpy.zeros', 'numpy.zeros', (['(9, 9)'], {'dtype': 'int'}), '((9, 9), dtype=int)\n', (16209, 16228), False, 'import numpy\n'), ((16316, 16347), 'numpy.zeros', 'numpy.zeros', (['(9, 9)'], {'dtype': 'bool'}), '((9, 9), dtype=bool)\n', (16327, 16347), False, 'import numpy\n'), ((16435, 16452), 'numpy.copy', 'numpy.copy', (['board'], {}), '(board)\n', (16445, 16452), False, 'import numpy\n'), ((18565, 18588), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (18586, 18588), False, 'import pygame\n'), ((4160, 4285), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'COLOR_INACTIVE', '(left, top + i * (shift + 1) - 1)', '(left + total, top + i * (shift + 1) - 1)', '(4)'], {}), '(screen, COLOR_INACTIVE, (left, top + i * (shift + 1) - 1),\n (left + total, top + i * (shift + 1) - 1), 4)\n', (4176, 4285), False, 'import pygame\n'), ((7470, 7493), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (7481, 7493), False, 'import pygame\n'), ((9099, 9149), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self.color', 'self.rect', '(2)'], {}), '(screen, self.color, self.rect, 2)\n', (9115, 9149), False, 'import pygame\n'), ((9325, 9348), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (9336, 9348), False, 'import pygame\n'), ((9788, 9838), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self.color', 'self.rect', '(2)'], {}), '(screen, self.color, self.rect, 2)\n', (9804, 9838), False, 'import pygame\n'), ((9960, 9983), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (9971, 9983), False, 'import pygame\n'), ((10372, 10422), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self.color', 'self.rect', '(2)'], {}), '(screen, self.color, self.rect, 2)\n', (10388, 10422), False, 'import pygame\n'), ((11887, 11908), 'pygame.Color', 'pygame.Color', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (11899, 11908), False, 'import pygame\n'), ((12132, 12174), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'ac', '(x, y, w, h)'], {}), '(screen, ac, (x, y, w, h))\n', (12148, 12174), False, 'import pygame\n'), ((12299, 12341), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'ic', '(x, y, w, h)'], {}), '(screen, ic, (x, y, w, h))\n', (12315, 12341), False, 'import pygame\n'), ((14997, 15015), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (15013, 15015), False, 'import pygame\n'), ((16840, 16857), 'numpy.copy', 'numpy.copy', (['board'], {}), '(board)\n', (16850, 16857), False, 'import numpy\n'), ((16957, 16975), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (16973, 16975), False, 'import pygame\n'), ((17855, 17871), 'numpy.any', 'numpy.any', (['Taken'], {}), '(Taken)\n', (17864, 17871), False, 'import numpy\n'), ((18487, 18510), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (18508, 18510), False, 'import pygame\n'), ((13987, 14022), 'numpy.array_equal', 'numpy.array_equal', (['lastboard', 'board'], {}), '(lastboard, board)\n', (14004, 14022), False, 'import numpy\n'), ((14706, 14735), 'numpy.array_equal', 'numpy.array_equal', (['A[i]', 'B[i]'], {}), '(A[i], B[i])\n', (14723, 14735), False, 'import numpy\n'), ((14810, 14845), 'numpy.array_equal', 'numpy.array_equal', (['A[:, j]', 'B[:, j]'], {}), '(A[:, j], B[:, j])\n', (14827, 14845), False, 'import numpy\n'), ((15075, 15088), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (15086, 15088), False, 'import pygame\n'), ((15925, 15948), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (15946, 15948), False, 'import pygame\n'), ((5296, 5316), 'numpy.sort', 'numpy.sort', (['board[i]'], {}), '(board[i])\n', (5306, 5316), False, 'import numpy\n'), ((5345, 5368), 'numpy.sort', 'numpy.sort', (['board[:, i]'], {}), '(board[:, i])\n', (5355, 5368), False, 'import numpy\n'), ((5739, 5758), 'numpy.array', 'numpy.array', (['box_xy'], {}), '(box_xy)\n', (5750, 5758), False, 'import numpy\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
The basic algorithm simulation archtecture of the C/C++ system
"""
import numpy as np
PRIME= np.array([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151,
157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233,
239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,
331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419,
421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,
509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607,
613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811,
821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911,
919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013,
1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091,
1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181,
1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277,
1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361,
1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451,
1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531,
1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609,
1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699,
1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789,
1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889,
1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,
1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083,
2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161,
2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273,
2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357,
2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441,
2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551,
2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663,
2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729,
2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819,
2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917,
2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023,
3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137,
3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251,
3253, 3257, 3259, 3271]);
print(PRIME[::-1])
|
[
"numpy.array"
] |
[((142, 2882), 'numpy.array', 'np.array', (['[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71,\n 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, \n 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, \n 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, \n 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, \n 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, \n 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, \n 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, \n 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, \n 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, \n 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, \n 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, \n 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109,\n 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213,\n 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291,\n 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399,\n 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481,\n 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559,\n 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627,\n 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733,\n 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831,\n 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931,\n 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017,\n 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111,\n 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213,\n 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297,\n 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383,\n 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473,\n 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593,\n 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687,\n 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753,\n 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851,\n 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957,\n 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061,\n 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181,\n 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271]'], {}), '([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, \n 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137,\n 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, \n 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, \n 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, \n 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, \n 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, \n 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, \n 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, \n 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, \n 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, \n 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, \n 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097,\n 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193,\n 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283,\n 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373,\n 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459,\n 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549,\n 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619,\n 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721,\n 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811,\n 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907,\n 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003,\n 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089,\n 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203,\n 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287,\n 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377,\n 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459,\n 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579,\n 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677,\n 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741,\n 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837,\n 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939,\n 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041,\n 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167,\n 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257,\n 3259, 3271])\n', (150, 2882), True, 'import numpy as np\n')]
|
import numpy as np
import scipy as sp
import scipy.stats
from ngboost.distns import Normal, Laplace, LogNormal, LogLaplace
from ngboost.ngboost import NGBoost
from ngboost.scores import MLE, CRPS, MLE_SURV, CRPS_SURV
from ngboost.learners import default_tree_learner, default_linear_learner
from ngboost.evaluation import *
from sklearn.metrics import r2_score
from matplotlib import pyplot as plt
from argparse import ArgumentParser
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument("--dist", type=str, default="Laplace")
argparser.add_argument("--noise-dist", type=str, default="Normal")
args = argparser.parse_args()
m, n = 1000, 50
if args.noise_dist == "Normal":
noise = np.random.randn(*(m, 1))
elif args.noise_dist == "Laplace":
noise = sp.stats.laplace.rvs(size=(m, 1))
beta = np.random.randn(n, 1)
X = np.random.randn(m, n) / np.sqrt(n)
Y = np.exp(X @ beta + 0.5 * noise)
print(X.shape, Y.shape)
dist = eval("Log" + args.dist)
ngb = NGBoost(n_estimators=50, learning_rate=0.5,
Dist=dist,
Base=default_linear_learner,
natural_gradient=False,
minibatch_frac=1.0,
Score=CRPS())
losses = ngb.fit(X, Y)
preds = ngb.pred_dist(X)
print(f"R2: {r2_score(Y, np.exp(preds.loc)):.4f}")
pctles, observed, slope, intercept = calibration_regression(preds, Y)
plt.figure(figsize = (8, 3))
plt.subplot(1, 2, 1)
plot_pit_histogram(pctles, observed)
plt.title("Original scale")
Y = np.log(Y)
dist = eval(args.dist)
ngb = NGBoost(n_estimators=50, learning_rate=0.5,
Dist=dist,
Base=default_linear_learner,
natural_gradient=False,
minibatch_frac=1.0,
Score=CRPS())
losses = ngb.fit(X, Y)
preds = ngb.pred_dist(X)
print(f"R2: {r2_score(Y, np.exp(preds.loc)):.4f}")
pctles, observed, slope, intercept = calibration_regression(preds, Y)
plt.subplot(1, 2, 2)
plot_pit_histogram(pctles, observed)
plt.title("Log-scale")
plt.tight_layout()
plt.savefig("./figures/pit_logscale.pdf")
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"scipy.stats.laplace.rvs",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.log",
"numpy.random.randn",
"matplotlib.pyplot.figure",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"ngboost.scores.CRPS",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((480, 496), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (494, 496), False, 'from argparse import ArgumentParser\n'), ((866, 887), 'numpy.random.randn', 'np.random.randn', (['n', '(1)'], {}), '(n, 1)\n', (881, 887), True, 'import numpy as np\n'), ((939, 969), 'numpy.exp', 'np.exp', (['(X @ beta + 0.5 * noise)'], {}), '(X @ beta + 0.5 * noise)\n', (945, 969), True, 'import numpy as np\n'), ((1469, 1495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (1479, 1495), True, 'from matplotlib import pyplot as plt\n'), ((1502, 1522), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1513, 1522), True, 'from matplotlib import pyplot as plt\n'), ((1568, 1595), 'matplotlib.pyplot.title', 'plt.title', (['"""Original scale"""'], {}), "('Original scale')\n", (1577, 1595), True, 'from matplotlib import pyplot as plt\n'), ((1605, 1614), 'numpy.log', 'np.log', (['Y'], {}), '(Y)\n', (1611, 1614), True, 'import numpy as np\n'), ((2077, 2097), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2088, 2097), True, 'from matplotlib import pyplot as plt\n'), ((2143, 2165), 'matplotlib.pyplot.title', 'plt.title', (['"""Log-scale"""'], {}), "('Log-scale')\n", (2152, 2165), True, 'from matplotlib import pyplot as plt\n'), ((2170, 2188), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2186, 2188), True, 'from matplotlib import pyplot as plt\n'), ((2193, 2234), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figures/pit_logscale.pdf"""'], {}), "('./figures/pit_logscale.pdf')\n", (2204, 2234), True, 'from matplotlib import pyplot as plt\n'), ((2239, 2249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2247, 2249), True, 'from matplotlib import pyplot as plt\n'), ((741, 765), 'numpy.random.randn', 'np.random.randn', (['*(m, 1)'], {}), '(*(m, 1))\n', (756, 765), True, 'import numpy as np\n'), ((896, 917), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (911, 917), True, 'import numpy as np\n'), ((920, 930), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (927, 930), True, 'import numpy as np\n'), ((821, 854), 'scipy.stats.laplace.rvs', 'sp.stats.laplace.rvs', ([], {'size': '(m, 1)'}), '(size=(m, 1))\n', (841, 854), True, 'import scipy as sp\n'), ((1269, 1275), 'ngboost.scores.CRPS', 'CRPS', ([], {}), '()\n', (1273, 1275), False, 'from ngboost.scores import MLE, CRPS, MLE_SURV, CRPS_SURV\n'), ((1877, 1883), 'ngboost.scores.CRPS', 'CRPS', ([], {}), '()\n', (1881, 1883), False, 'from ngboost.scores import MLE, CRPS, MLE_SURV, CRPS_SURV\n'), ((1364, 1381), 'numpy.exp', 'np.exp', (['preds.loc'], {}), '(preds.loc)\n', (1370, 1381), True, 'import numpy as np\n'), ((1972, 1989), 'numpy.exp', 'np.exp', (['preds.loc'], {}), '(preds.loc)\n', (1978, 1989), True, 'import numpy as np\n')]
|
from __future__ import print_function
import sys
import os
import numpy as np
from multiprocessing import Pool, freeze_support
import tempfile
from impy.definitions import *
from impy.constants import *
from impy.kinematics import EventKinematics
from impy import impy_config, pdata
from impy.util import info
# AF: This is what the user interaction has to yield.
# It is the typical expected configuration that one
# wants to run (read pp-mode at energies not exceeding
# 7 TeV). If you want cosmic ray energies, this should
# be rather p-N at 10 EeV and lab frame (not yet defined).
event_kinematics = EventKinematics(ecm=7000 * GeV,
p1pdg=2212,
p2pdg=2212
# nuc2_prop=(14,7)
)
impy_config["user_frame"] = 'center-of-mass'
gen_list = [
'SIBYLL23D',
'SIBYLL23C',
'SIBYLL23',
'SIBYLL21',
'DPMJETIII306',
'DPMJETIII191',
'EPOSLHC',
'PHOJET112',
'PHOJET191',
'URQMD34',
# 'PYTHIA8',
'QGSJET01C',
'QGSJETII03',
'QGSJETII04'
]
xlab_bins = np.linspace(0,1,21)
xlab_widths = xlab_bins[1:] - xlab_bins[:-1]
xlab_centers = 0.5*(xlab_bins[1:] + xlab_bins[:-1])
nevents = 5000
norm = 1./float(nevents)/xlab_widths
def run_generator(gen,*args):
print('Testing',gen)
hist_p = np.zeros(len(xlab_centers))
hist_pi = np.zeros(len(xlab_centers))
try:
log = tempfile.mkstemp()[1]
generator = make_generator_instance(interaction_model_by_tag[gen])
generator.init_generator(event_kinematics,logfname=log)
for event in generator.event_generator(event_kinematics, nevents):
event.filter_final_state_charged()
hist_p += np.histogram(event.xlab[event.p_ids == 2212],
bins=xlab_bins,
weights=event.xlab[event.p_ids == 2212]**1.7)[0]
hist_pi += np.histogram(event.xlab[np.abs(event.p_ids) == 211],
bins=xlab_bins,
weights=event.xlab[np.abs(event.p_ids) == 211]**1.7)[0]
return True, gen, log, hist_p, hist_pi
except:
return False, gen, log, hist_p, hist_pi
if __name__ in ['__main__', '__test__']:
freeze_support()
pool = Pool(processes=32)
result = [pool.apply_async(run_generator, (gen,)) for gen in gen_list]
result = [res.get(timeout=100000) for res in result]
logs = {}
xlab_protons = {}
xlab_piplus = {}
failed = []
passed = []
for r, gen, log, hist_p, hist_pi in result:
if r:
passed.append(gen)
xlab_protons[gen] = hist_p
xlab_piplus[gen] = hist_pi
else:
failed.append(gen)
with open(log) as f:
logs[gen] = f.read()
info(0, 'Test results for 158 GeV pC collisions in lab frame:\n')
info(0, 'Passed:', '\n', '\n '.join(passed))
info(0, '\nFailed:', '\n', '\n '.join(failed))
import pickle
pickle.dump((xlab_bins, xlab_protons, xlab_piplus, logs),
open(os.path.splitext(__file__)[0] + '.pkl','wb'), protocol=-1)
|
[
"numpy.abs",
"impy.util.info",
"tempfile.mkstemp",
"impy.kinematics.EventKinematics",
"numpy.histogram",
"os.path.splitext",
"numpy.linspace",
"multiprocessing.Pool",
"multiprocessing.freeze_support"
] |
[((608, 663), 'impy.kinematics.EventKinematics', 'EventKinematics', ([], {'ecm': '(7000 * GeV)', 'p1pdg': '(2212)', 'p2pdg': '(2212)'}), '(ecm=7000 * GeV, p1pdg=2212, p2pdg=2212)\n', (623, 663), False, 'from impy.kinematics import EventKinematics\n'), ((1144, 1165), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(21)'], {}), '(0, 1, 21)\n', (1155, 1165), True, 'import numpy as np\n'), ((2335, 2351), 'multiprocessing.freeze_support', 'freeze_support', ([], {}), '()\n', (2349, 2351), False, 'from multiprocessing import Pool, freeze_support\n'), ((2363, 2381), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(32)'}), '(processes=32)\n', (2367, 2381), False, 'from multiprocessing import Pool, freeze_support\n'), ((2918, 2983), 'impy.util.info', 'info', (['(0)', '"""Test results for 158 GeV pC collisions in lab frame:\n"""'], {}), "(0, 'Test results for 158 GeV pC collisions in lab frame:\\n')\n", (2922, 2983), False, 'from impy.util import info\n'), ((1476, 1494), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1492, 1494), False, 'import tempfile\n'), ((1782, 1896), 'numpy.histogram', 'np.histogram', (['event.xlab[event.p_ids == 2212]'], {'bins': 'xlab_bins', 'weights': '(event.xlab[event.p_ids == 2212] ** 1.7)'}), '(event.xlab[event.p_ids == 2212], bins=xlab_bins, weights=event\n .xlab[event.p_ids == 2212] ** 1.7)\n', (1794, 1896), True, 'import numpy as np\n'), ((3186, 3212), 'os.path.splitext', 'os.path.splitext', (['__file__'], {}), '(__file__)\n', (3202, 3212), False, 'import os\n'), ((2003, 2022), 'numpy.abs', 'np.abs', (['event.p_ids'], {}), '(event.p_ids)\n', (2009, 2022), True, 'import numpy as np\n'), ((2131, 2150), 'numpy.abs', 'np.abs', (['event.p_ids'], {}), '(event.p_ids)\n', (2137, 2150), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
@author: abhilash
"""
import numpy as np
import cv2
# load the image to detect, get width, height
# resize to match input size, convert to blob to pass into model
img_to_detect = cv2.imread('images/testing/scene3.jpg')
img_height = img_to_detect.shape[0]
img_width = img_to_detect.shape[1]
resized_img_to_detect = cv2.resize(img_to_detect,(300,300))
img_blob = cv2.dnn.blobFromImage(resized_img_to_detect,0.007843,(300,300),127.5)
#recommended scale factor is 0.007843, width,height of blob is 300,300, mean of 255 is 127.5,
# set of 21 class labels in alphabetical order (background + rest of 20 classes)
class_labels = ["background", "aeroplane", "bicycle", "bird", "boat","bottle", "bus", "car", "cat", "chair", "cow", "diningtable","dog", "horse", "motorbike", "person", "pottedplant", "sheep","sofa", "train", "tvmonitor"]
# Loading pretrained model from prototext and caffemodel files
# input preprocessed blob into model and pass through the model
# obtain the detection predictions by the model using forward() method
mobilenetssd = cv2.dnn.readNetFromCaffe('dataset/mobilenetssd.prototext','dataset/mobilenetssd.caffemodel')
mobilenetssd.setInput(img_blob)
obj_detections = mobilenetssd.forward()
# returned obj_detections[0, 0, index, 1] , 1 => will have the prediction class index
# 2 => will have confidence, 3 to 7 => will have the bounding box co-ordinates
no_of_detections = obj_detections.shape[2]
# loop over the detections
for index in np.arange(0, no_of_detections):
prediction_confidence = obj_detections[0, 0, index, 2]
# take only predictions with confidence more than 20%
if prediction_confidence > 0.20:
#get the predicted label
predicted_class_index = int(obj_detections[0, 0, index, 1])
predicted_class_label = class_labels[predicted_class_index]
#obtain the bounding box co-oridnates for actual image from resized image size
bounding_box = obj_detections[0, 0, index, 3:7] * np.array([img_width, img_height, img_width, img_height])
(start_x_pt, start_y_pt, end_x_pt, end_y_pt) = bounding_box.astype("int")
# print the prediction in console
predicted_class_label = "{}: {:.2f}%".format(class_labels[predicted_class_index], prediction_confidence * 100)
print("predicted object {}: {}".format(index+1, predicted_class_label))
# draw rectangle and text in the image
cv2.rectangle(img_to_detect, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt), (0,255,0), 2)
cv2.putText(img_to_detect, predicted_class_label, (start_x_pt, start_y_pt-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
cv2.imshow("Detection Output", img_to_detect)
|
[
"cv2.putText",
"cv2.dnn.blobFromImage",
"cv2.rectangle",
"cv2.imread",
"numpy.arange",
"numpy.array",
"cv2.dnn.readNetFromCaffe",
"cv2.imshow",
"cv2.resize"
] |
[((223, 262), 'cv2.imread', 'cv2.imread', (['"""images/testing/scene3.jpg"""'], {}), "('images/testing/scene3.jpg')\n", (233, 262), False, 'import cv2\n'), ((361, 398), 'cv2.resize', 'cv2.resize', (['img_to_detect', '(300, 300)'], {}), '(img_to_detect, (300, 300))\n', (371, 398), False, 'import cv2\n'), ((409, 482), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['resized_img_to_detect', '(0.007843)', '(300, 300)', '(127.5)'], {}), '(resized_img_to_detect, 0.007843, (300, 300), 127.5)\n', (430, 482), False, 'import cv2\n'), ((1101, 1198), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['"""dataset/mobilenetssd.prototext"""', '"""dataset/mobilenetssd.caffemodel"""'], {}), "('dataset/mobilenetssd.prototext',\n 'dataset/mobilenetssd.caffemodel')\n", (1125, 1198), False, 'import cv2\n'), ((1523, 1553), 'numpy.arange', 'np.arange', (['(0)', 'no_of_detections'], {}), '(0, no_of_detections)\n', (1532, 1553), True, 'import numpy as np\n'), ((2740, 2785), 'cv2.imshow', 'cv2.imshow', (['"""Detection Output"""', 'img_to_detect'], {}), "('Detection Output', img_to_detect)\n", (2750, 2785), False, 'import cv2\n'), ((2512, 2608), 'cv2.rectangle', 'cv2.rectangle', (['img_to_detect', '(start_x_pt, start_y_pt)', '(end_x_pt, end_y_pt)', '(0, 255, 0)', '(2)'], {}), '(img_to_detect, (start_x_pt, start_y_pt), (end_x_pt, end_y_pt),\n (0, 255, 0), 2)\n', (2525, 2608), False, 'import cv2\n'), ((2612, 2742), 'cv2.putText', 'cv2.putText', (['img_to_detect', 'predicted_class_label', '(start_x_pt, start_y_pt - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(1)'], {}), '(img_to_detect, predicted_class_label, (start_x_pt, start_y_pt -\n 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n', (2623, 2742), False, 'import cv2\n'), ((2051, 2107), 'numpy.array', 'np.array', (['[img_width, img_height, img_width, img_height]'], {}), '([img_width, img_height, img_width, img_height])\n', (2059, 2107), True, 'import numpy as np\n')]
|
from utils.data_loader import MNIST
from models.gru import GRU
from models.tcn import TCN
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
# 1エポック学習します
def train(model, optimizer, train_loader, log_interval=10):
model.train()
loss_in_log_interval = 0
n_samples_processed = 0
for i_batch, (x, y) in enumerate(train_loader):
x, y = Variable(x), Variable(y)
optimizer.zero_grad()
if type(model) is TCN:
y_hat = model(x)
else:
hidden = model.generate_initial_hidden(x.size()[0])
y_hat, hidden = model(x, hidden)
loss = F.nll_loss(y_hat, y)
loss.backward()
optimizer.step()
loss_in_log_interval += loss.item()
n_samples_processed += x.size()[0]
if (i_batch + 1) % log_interval == 0:
print('{}/{} バッチ ({}/{} サンプル) 流れました 最近 {} バッチの平均損失 {}'.format(
i_batch + 1, len(train_loader),
n_samples_processed, len(train_loader.dataset),
log_interval, loss_in_log_interval / log_interval
))
loss_in_log_interval = 0
# テストデータに対してテストします
def test(model, test_loader):
model.eval()
n_total = len(test_loader.dataset)
test_loss = 0
n_correct = 0.0
with torch.no_grad():
for x, y in test_loader:
if type(model) is TCN:
y_hat = model(x)
else:
hidden = model.generate_initial_hidden(x.size()[0])
y_hat, hidden = model(x, hidden)
test_loss += F.nll_loss(y_hat, y, reduction='sum').item()
pred = y_hat.data.max(1, keepdim=True)[1]
n_correct += pred.eq(y.data.view_as(pred)).sum()
test_loss /= n_total
print(f'テストデータでの平均損失 {test_loss}')
print('テストデータでの正解率 {}/{} ({:.2%})'.format(int(n_correct), n_total, n_correct / n_total))
# メイン
# - arch : 学習するモデル構造を gru, tcn から指定します
# - id : 吐き出す重みファイルの識別子です
# - weight_dict : 既に重みファイルがあれば読み込みます
# - epochs : エポック数です 0にすると訓練スキップになります
# - permute : これを指定すると系列をこのインデックスの順序に入れ換えます
def main(arch='gru', id='hoge', weight_dict=None, epochs=10, permute=None):
batch_size = 64
train_loader, test_loader = MNIST(batch_size=batch_size,
sequential=(arch == 'tcn'),
sequential_rnn=(arch != 'tcn'),
permute=permute)
if arch == 'tcn':
model = TCN(input_size=1, output_size=10, num_channels=[25]*8,
kernel_size=7, dropout=0.0)
optimizer = optim.Adam(model.parameters(), lr=2e-3)
elif arch == 'gru':
model = GRU(input_size=1, output_size=10, num_layers=1, d_hidden=128,
initial_update_gate_bias=0.5, dropout=0.0)
optimizer = optim.RMSprop(model.parameters(), lr=1e-3)
if weight_dict is not None:
model.load_state_dict(torch.load(weight_dict))
for epoch in range(epochs):
print(f'エポック {epoch}')
train(model, optimizer, train_loader)
test(model, test_loader)
torch.save(model.state_dict(), f'./weights/{arch}_sequential_mnist_{id}_{epoch}.dict')
test(model, test_loader)
if __name__ == '__main__':
np.random.seed(0)
torch.manual_seed(0)
main(arch='gru', weight_dict='./weights/gru_sequential_mnist_sample.dict', epochs=0)
main(arch='tcn', weight_dict='./weights/tcn_sequential_mnist_sample.dict', epochs=0)
# main(arch='gru', epochs=1)
# main(arch='tcn', epochs=1)
# Permuted MNIST をする場合
permute = np.random.permutation(784)
# main(arch='gru', epochs=1, permute=permute)
# main(arch='tcn', epochs=1, permute=permute)
|
[
"numpy.random.seed",
"models.tcn.TCN",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.load",
"utils.data_loader.MNIST",
"torch.nn.functional.nll_loss",
"numpy.random.permutation",
"models.gru.GRU",
"torch.no_grad"
] |
[((2284, 2389), 'utils.data_loader.MNIST', 'MNIST', ([], {'batch_size': 'batch_size', 'sequential': "(arch == 'tcn')", 'sequential_rnn': "(arch != 'tcn')", 'permute': 'permute'}), "(batch_size=batch_size, sequential=arch == 'tcn', sequential_rnn=arch !=\n 'tcn', permute=permute)\n", (2289, 2389), False, 'from utils.data_loader import MNIST\n'), ((3323, 3340), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3337, 3340), True, 'import numpy as np\n'), ((3345, 3365), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3362, 3365), False, 'import torch\n'), ((3653, 3679), 'numpy.random.permutation', 'np.random.permutation', (['(784)'], {}), '(784)\n', (3674, 3679), True, 'import numpy as np\n'), ((688, 708), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['y_hat', 'y'], {}), '(y_hat, y)\n', (698, 708), True, 'import torch.nn.functional as F\n'), ((1351, 1366), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1364, 1366), False, 'import torch\n'), ((2542, 2630), 'models.tcn.TCN', 'TCN', ([], {'input_size': '(1)', 'output_size': '(10)', 'num_channels': '([25] * 8)', 'kernel_size': '(7)', 'dropout': '(0.0)'}), '(input_size=1, output_size=10, num_channels=[25] * 8, kernel_size=7,\n dropout=0.0)\n', (2545, 2630), False, 'from models.tcn import TCN\n'), ((435, 446), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (443, 446), False, 'from torch.autograd import Variable\n'), ((448, 459), 'torch.autograd.Variable', 'Variable', (['y'], {}), '(y)\n', (456, 459), False, 'from torch.autograd import Variable\n'), ((2745, 2853), 'models.gru.GRU', 'GRU', ([], {'input_size': '(1)', 'output_size': '(10)', 'num_layers': '(1)', 'd_hidden': '(128)', 'initial_update_gate_bias': '(0.5)', 'dropout': '(0.0)'}), '(input_size=1, output_size=10, num_layers=1, d_hidden=128,\n initial_update_gate_bias=0.5, dropout=0.0)\n', (2748, 2853), False, 'from models.gru import GRU\n'), ((2997, 3020), 'torch.load', 'torch.load', (['weight_dict'], {}), '(weight_dict)\n', (3007, 3020), False, 'import torch\n'), ((1629, 1666), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['y_hat', 'y'], {'reduction': '"""sum"""'}), "(y_hat, y, reduction='sum')\n", (1639, 1666), True, 'import torch.nn.functional as F\n')]
|
import streamlit as st
import numpy as np
import pickle
from sklearn.tree import DecisionTreeClassifier
#model = DecisionTreeClassifier(max_depth=8)
model = pickle.load(open('model.pickle','rb'))
st.write("""
# CoverMyMeds - PA Approval Chances
""")
st.write("This project was done as part of the Erdos Data Science bootcamp Fall 2021. The data was provided by CoverMyMeds.")
st.header("User Information")
st.write("Please fill in the following information." )
bin = st.radio("Select the BIN of Insurance payer: ", ("417380","417614","417740","999001"))
drug = st.radio("Select the drug that you want covered: ", ("A","B","C"))
tried_failed = st.radio("Have you tried and failed the generic alternative?", ("Yes","No"))
contraindication = st.radio("Do you have an associated contraindication for the medication requested (i.e. is there any reason you cannot take this drug)?",("Yes","No"));
correct_diagnosis = st.radio("Do you have the corrected diagnosis for the associated drug?",("Yes","No"));
# Find reject code:
reject_code = 0;
if bin == "417380":
if drug == "A":
reject_code = 75;
elif drug == "B":
reject_code = 76;
elif drug == "C":
reject_code = 70;
elif bin == "417614":
if drug == "A":
reject_code = 70;
elif drug == "B":
reject_code = 75;
elif drug == "C":
reject_code = 76;
elif bin == "417740":
if drug == "A":
reject_code = 76;
elif drug == "B":
reject_code = 70;
elif drug == "C":
reject_code = 75;
elif bin == "999001":
reject_code = 76;
#Set features
d = {"Yes":1, "No":0} #Dictionary for Yes = 1, No = 0
cd = d[correct_diagnosis]
tf = d[tried_failed]
contra = d[contraindication]
drug_B = int(drug == "B")
drug_C = int(drug == "C")
bin_417614 = int(bin == "417614")
bin_417740 = int(bin == "417740")
bin_999001 = int(bin == "999001")
reject_code_75 = int(reject_code == 75)
reject_code_76 = int(reject_code == 76)
#Predict
pred = model.predict_proba([[cd,tf,contra,drug_B,drug_C,bin_417614,bin_417740,bin_999001, reject_code_75, reject_code_76]])
if tf == 0:
pred1 = model.predict_proba([[cd,1,contra,drug_B,drug_C,bin_417614,bin_417740,bin_999001, reject_code_75, reject_code_76]])
st.header("Result")
st.write("""The chances of your PA being approved are: **{}**""".format(np.round(100*pred[0,1],3)), "%.")
if tf == 0:
st.write("""In addition, if you first try the generic alternative but still need this drug, then the chances of your PA form being approved are: {}""".format(np.round(100*pred1[0,1],3)), "%.")
|
[
"streamlit.header",
"numpy.round",
"streamlit.radio",
"streamlit.write"
] |
[((200, 253), 'streamlit.write', 'st.write', (['"""\n# CoverMyMeds - PA Approval Chances\n"""'], {}), '("""\n# CoverMyMeds - PA Approval Chances\n""")\n', (208, 253), True, 'import streamlit as st\n'), ((255, 390), 'streamlit.write', 'st.write', (['"""This project was done as part of the Erdos Data Science bootcamp Fall 2021. The data was provided by CoverMyMeds."""'], {}), "(\n 'This project was done as part of the Erdos Data Science bootcamp Fall 2021. The data was provided by CoverMyMeds.'\n )\n", (263, 390), True, 'import streamlit as st\n'), ((382, 411), 'streamlit.header', 'st.header', (['"""User Information"""'], {}), "('User Information')\n", (391, 411), True, 'import streamlit as st\n'), ((413, 466), 'streamlit.write', 'st.write', (['"""Please fill in the following information."""'], {}), "('Please fill in the following information.')\n", (421, 466), True, 'import streamlit as st\n'), ((475, 568), 'streamlit.radio', 'st.radio', (['"""Select the BIN of Insurance payer: """', "('417380', '417614', '417740', '999001')"], {}), "('Select the BIN of Insurance payer: ', ('417380', '417614',\n '417740', '999001'))\n", (483, 568), True, 'import streamlit as st\n'), ((570, 638), 'streamlit.radio', 'st.radio', (['"""Select the drug that you want covered: """', "('A', 'B', 'C')"], {}), "('Select the drug that you want covered: ', ('A', 'B', 'C'))\n", (578, 638), True, 'import streamlit as st\n'), ((653, 730), 'streamlit.radio', 'st.radio', (['"""Have you tried and failed the generic alternative?"""', "('Yes', 'No')"], {}), "('Have you tried and failed the generic alternative?', ('Yes', 'No'))\n", (661, 730), True, 'import streamlit as st\n'), ((750, 912), 'streamlit.radio', 'st.radio', (['"""Do you have an associated contraindication for the medication requested (i.e. is there any reason you cannot take this drug)?"""', "('Yes', 'No')"], {}), "(\n 'Do you have an associated contraindication for the medication requested (i.e. is there any reason you cannot take this drug)?'\n , ('Yes', 'No'))\n", (758, 912), True, 'import streamlit as st\n'), ((923, 1015), 'streamlit.radio', 'st.radio', (['"""Do you have the corrected diagnosis for the associated drug?"""', "('Yes', 'No')"], {}), "('Do you have the corrected diagnosis for the associated drug?', (\n 'Yes', 'No'))\n", (931, 1015), True, 'import streamlit as st\n'), ((2240, 2259), 'streamlit.header', 'st.header', (['"""Result"""'], {}), "('Result')\n", (2249, 2259), True, 'import streamlit as st\n'), ((2332, 2361), 'numpy.round', 'np.round', (['(100 * pred[0, 1])', '(3)'], {}), '(100 * pred[0, 1], 3)\n', (2340, 2361), True, 'import numpy as np\n'), ((2541, 2571), 'numpy.round', 'np.round', (['(100 * pred1[0, 1])', '(3)'], {}), '(100 * pred1[0, 1], 3)\n', (2549, 2571), True, 'import numpy as np\n')]
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
import os
from absl.testing import absltest
from alphafold_paddle.common import protein
from alphafold_paddle.relax import utils
import numpy as np
# Internal import (7716).
class UtilsTest(absltest.TestCase):
def test_overwrite_b_factors(self):
testdir = os.path.join(
absltest.get_default_test_srcdir(),
'alphafold/relax/testdata/'
'multiple_disulfides_target.pdb')
with open(testdir) as f:
test_pdb = f.read()
n_residues = 191
bfactors = np.stack([np.arange(0, n_residues)] * 37, axis=-1)
output_pdb = utils.overwrite_b_factors(test_pdb, bfactors)
# Check that the atom lines are unchanged apart from the B-factors.
atom_lines_original = [l for l in test_pdb.split('\n') if l[:4] == ('ATOM')]
atom_lines_new = [l for l in output_pdb.split('\n') if l[:4] == ('ATOM')]
for line_original, line_new in zip(atom_lines_original, atom_lines_new):
self.assertEqual(line_original[:60].strip(), line_new[:60].strip())
self.assertEqual(line_original[66:].strip(), line_new[66:].strip())
# Check B-factors are correctly set for all atoms present.
as_protein = protein.from_pdb_string(output_pdb)
np.testing.assert_almost_equal(
np.where(as_protein.atom_mask > 0, as_protein.b_factors, 0),
np.where(as_protein.atom_mask > 0, bfactors, 0))
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"alphafold_paddle.common.protein.from_pdb_string",
"alphafold_paddle.relax.utils.overwrite_b_factors",
"absl.testing.absltest.get_default_test_srcdir",
"numpy.where",
"numpy.arange"
] |
[((1994, 2009), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2007, 2009), False, 'from absl.testing import absltest\n'), ((1181, 1226), 'alphafold_paddle.relax.utils.overwrite_b_factors', 'utils.overwrite_b_factors', (['test_pdb', 'bfactors'], {}), '(test_pdb, bfactors)\n', (1206, 1226), False, 'from alphafold_paddle.relax import utils\n'), ((1765, 1800), 'alphafold_paddle.common.protein.from_pdb_string', 'protein.from_pdb_string', (['output_pdb'], {}), '(output_pdb)\n', (1788, 1800), False, 'from alphafold_paddle.common import protein\n'), ((907, 941), 'absl.testing.absltest.get_default_test_srcdir', 'absltest.get_default_test_srcdir', ([], {}), '()\n', (939, 941), False, 'from absl.testing import absltest\n'), ((1845, 1904), 'numpy.where', 'np.where', (['(as_protein.atom_mask > 0)', 'as_protein.b_factors', '(0)'], {}), '(as_protein.atom_mask > 0, as_protein.b_factors, 0)\n', (1853, 1904), True, 'import numpy as np\n'), ((1914, 1961), 'numpy.where', 'np.where', (['(as_protein.atom_mask > 0)', 'bfactors', '(0)'], {}), '(as_protein.atom_mask > 0, bfactors, 0)\n', (1922, 1961), True, 'import numpy as np\n'), ((1122, 1146), 'numpy.arange', 'np.arange', (['(0)', 'n_residues'], {}), '(0, n_residues)\n', (1131, 1146), True, 'import numpy as np\n')]
|
from __future__ import print_function
import numpy as np
from numpy import cos, sin
from numpy.testing import assert_equal
import unittest
import pcl
from pcl.registration import icp, gicp, icp_nl, ia_ransac
bun0Tobun4 = [[0.85250509, -0.03745676, -0.52137518, 0.04118973],
[0.03552843, 0.99927479, -0.01369729, 0.00103067],
[0.52151012, -0.00684663, 0.8532176, 0.03994245],
[0., 0., 0., 1.]]
class TestICP(unittest.TestCase):
def setUpRandom(self):
# Check if ICP can find a mild rotation.
theta = [-.031, .4, .59]
rot_x = [[1, 0, 0],
[0, cos(theta[0]), -sin(theta[0])],
[0, sin(theta[0]), cos(theta[0])]]
rot_y = [[cos(theta[1]), 0, sin(theta[1])],
[0, 1, 0],
[-sin(theta[1]), 0, cos(theta[1])]]
rot_z = [[cos(theta[2]), -sin(theta[1]), 0],
[sin(theta[2]), cos(theta[1]), 0],
[0, 0, 1]]
transform = np.dot(rot_x, np.dot(rot_y, rot_z))
# print("---------")
# print("Rotation: ")
# print(transform[0:3,0:3])
# print("Translation: ", transform[3, 0:3])
# print("---------")
random_cloud = np.random.RandomState(42).randn(900, 3)
self.source = pcl.PointCloud(random_cloud.astype(np.float32))
a = np.dot(random_cloud, transform).astype(np.float32)
self.target = pcl.PointCloud(a)
def setUpBunny(self):
self.source = pcl.PointCloud()
self.source.from_file("tests/bun0.pcd")
self.target = pcl.PointCloud()
self.target.from_file("tests/bun4.pcd")
def setUp(self):
self.setUpBunny()
def check_algo(self, algo, max_iter=1000, **kwargs):
converged, transf, estimate, fitness = \
algo(self.source, self.target, max_iter=max_iter, **kwargs)
self.assertTrue(isinstance(transf, np.ndarray))
self.assertEqual(transf.shape, (4, 4))
np.testing.assert_allclose(bun0Tobun4, transf, 0, 0.1)
assert_equal(transf[3], [0, 0, 0, 1])
# XXX I think I misunderstand fitness, it's not equal to the following
# MSS.
# mss = (np.linalg.norm(estimate.to_array()
# - self.source.to_array(), axis=1) ** 2).mean()
# self.assertLess(mss, 1)
# print("------", algo)
# print("Converged: ", converged, "Estimate: ", estimate,
# "Fitness: ", fitness)
# print("Rotation: ")
# print(transf[0:3,0:3])
# print("Translation: ", transf[3, 0:3])
# print("---------")
def testGICP(self):
self.check_algo(gicp)
def testICP_NL(self):
self.check_algo(icp_nl)
def testIA_RANSAC(self):
# reducing radius makes this test fail
# reducing the max_iter to 1000 makes the test fail
self.check_algo(ia_ransac, radius=0.5, minSampleDistance=0.01,
maxCorrespondenceDistance=0.5, max_iter=10000)
def testICP(self):
self.check_algo(icp)
transf1 = icp(self.source, self.target, max_iter=1)[1]
transf2 = icp(self.source, self.target, max_iter=2)[1]
self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
"First and second transformation should be unequal"
" in this complicated registration.")
transf1 = icp(self.source, self.target, transformationEpsilon=0)[1]
transf2 = icp(self.source, self.target, transformationEpsilon=1)[1]
self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
"Transformations should be unequal"
" with different stopping criteria.")
transf1 = icp(self.source, self.target, euclideanFitnessEpsilon=0)[1]
transf2 = icp(self.source, self.target, euclideanFitnessEpsilon=1)[1]
self.assertFalse(np.allclose(transf1, transf2, 0, 0.1),
"Transformations should be unequal with different"
" stopping criteria.")
|
[
"pcl.registration.icp",
"numpy.testing.assert_allclose",
"numpy.allclose",
"numpy.random.RandomState",
"numpy.sin",
"numpy.testing.assert_equal",
"numpy.cos",
"numpy.dot",
"pcl.PointCloud"
] |
[((1603, 1620), 'pcl.PointCloud', 'pcl.PointCloud', (['a'], {}), '(a)\n', (1617, 1620), False, 'import pcl\n'), ((1670, 1686), 'pcl.PointCloud', 'pcl.PointCloud', ([], {}), '()\n', (1684, 1686), False, 'import pcl\n'), ((1757, 1773), 'pcl.PointCloud', 'pcl.PointCloud', ([], {}), '()\n', (1771, 1773), False, 'import pcl\n'), ((2160, 2214), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['bun0Tobun4', 'transf', '(0)', '(0.1)'], {}), '(bun0Tobun4, transf, 0, 0.1)\n', (2186, 2214), True, 'import numpy as np\n'), ((2223, 2260), 'numpy.testing.assert_equal', 'assert_equal', (['transf[3]', '[0, 0, 0, 1]'], {}), '(transf[3], [0, 0, 0, 1])\n', (2235, 2260), False, 'from numpy.testing import assert_equal\n'), ((1185, 1205), 'numpy.dot', 'np.dot', (['rot_y', 'rot_z'], {}), '(rot_y, rot_z)\n', (1191, 1205), True, 'import numpy as np\n'), ((3264, 3305), 'pcl.registration.icp', 'icp', (['self.source', 'self.target'], {'max_iter': '(1)'}), '(self.source, self.target, max_iter=1)\n', (3267, 3305), False, 'from pcl.registration import icp, gicp, icp_nl, ia_ransac\n'), ((3327, 3368), 'pcl.registration.icp', 'icp', (['self.source', 'self.target'], {'max_iter': '(2)'}), '(self.source, self.target, max_iter=2)\n', (3330, 3368), False, 'from pcl.registration import icp, gicp, icp_nl, ia_ransac\n'), ((3397, 3434), 'numpy.allclose', 'np.allclose', (['transf1', 'transf2', '(0)', '(0.1)'], {}), '(transf1, transf2, 0, 0.1)\n', (3408, 3434), True, 'import numpy as np\n'), ((3595, 3649), 'pcl.registration.icp', 'icp', (['self.source', 'self.target'], {'transformationEpsilon': '(0)'}), '(self.source, self.target, transformationEpsilon=0)\n', (3598, 3649), False, 'from pcl.registration import icp, gicp, icp_nl, ia_ransac\n'), ((3671, 3725), 'pcl.registration.icp', 'icp', (['self.source', 'self.target'], {'transformationEpsilon': '(1)'}), '(self.source, self.target, transformationEpsilon=1)\n', (3674, 3725), False, 'from pcl.registration import icp, gicp, icp_nl, ia_ransac\n'), ((3754, 3791), 'numpy.allclose', 'np.allclose', (['transf1', 'transf2', '(0)', '(0.1)'], {}), '(transf1, transf2, 0, 0.1)\n', (3765, 3791), True, 'import numpy as np\n'), ((3936, 3992), 'pcl.registration.icp', 'icp', (['self.source', 'self.target'], {'euclideanFitnessEpsilon': '(0)'}), '(self.source, self.target, euclideanFitnessEpsilon=0)\n', (3939, 3992), False, 'from pcl.registration import icp, gicp, icp_nl, ia_ransac\n'), ((4014, 4070), 'pcl.registration.icp', 'icp', (['self.source', 'self.target'], {'euclideanFitnessEpsilon': '(1)'}), '(self.source, self.target, euclideanFitnessEpsilon=1)\n', (4017, 4070), False, 'from pcl.registration import icp, gicp, icp_nl, ia_ransac\n'), ((4099, 4136), 'numpy.allclose', 'np.allclose', (['transf1', 'transf2', '(0)', '(0.1)'], {}), '(transf1, transf2, 0, 0.1)\n', (4110, 4136), True, 'import numpy as np\n'), ((705, 718), 'numpy.cos', 'cos', (['theta[0]'], {}), '(theta[0])\n', (708, 718), False, 'from numpy import cos, sin\n'), ((771, 784), 'numpy.sin', 'sin', (['theta[0]'], {}), '(theta[0])\n', (774, 784), False, 'from numpy import cos, sin\n'), ((787, 800), 'numpy.cos', 'cos', (['theta[0]'], {}), '(theta[0])\n', (790, 800), False, 'from numpy import cos, sin\n'), ((821, 834), 'numpy.cos', 'cos', (['theta[1]'], {}), '(theta[1])\n', (824, 834), False, 'from numpy import cos, sin\n'), ((853, 866), 'numpy.sin', 'sin', (['theta[1]'], {}), '(theta[1])\n', (856, 866), False, 'from numpy import cos, sin\n'), ((973, 986), 'numpy.cos', 'cos', (['theta[1]'], {}), '(theta[1])\n', (976, 986), False, 'from numpy import cos, sin\n'), ((1007, 1020), 'numpy.cos', 'cos', (['theta[2]'], {}), '(theta[2])\n', (1010, 1020), False, 'from numpy import cos, sin\n'), ((1061, 1074), 'numpy.sin', 'sin', (['theta[2]'], {}), '(theta[2])\n', (1064, 1074), False, 'from numpy import cos, sin\n'), ((1077, 1090), 'numpy.cos', 'cos', (['theta[1]'], {}), '(theta[1])\n', (1080, 1090), False, 'from numpy import cos, sin\n'), ((1408, 1433), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (1429, 1433), True, 'import numpy as np\n'), ((1530, 1561), 'numpy.dot', 'np.dot', (['random_cloud', 'transform'], {}), '(random_cloud, transform)\n', (1536, 1561), True, 'import numpy as np\n'), ((721, 734), 'numpy.sin', 'sin', (['theta[0]'], {}), '(theta[0])\n', (724, 734), False, 'from numpy import cos, sin\n'), ((942, 955), 'numpy.sin', 'sin', (['theta[1]'], {}), '(theta[1])\n', (945, 955), False, 'from numpy import cos, sin\n'), ((1023, 1036), 'numpy.sin', 'sin', (['theta[1]'], {}), '(theta[1])\n', (1026, 1036), False, 'from numpy import cos, sin\n')]
|
# License: BSD 3 clause
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from tick.robust import ModelHuber
from tick.base_model.tests.generalized_linear_model import TestGLM
from tick.linear_model import SimuLinReg
class Test(TestGLM):
def test_ModelHuber(self):
"""...Numerical consistency check of loss and gradient for Huber model
"""
np.random.seed(12)
n_samples, n_features = 5000, 10
w0 = np.random.randn(n_features)
c0 = np.random.randn()
# First check with intercept
X, y = SimuLinReg(w0, c0, n_samples=n_samples,
verbose=False).simulate()
X_spars = csr_matrix(X)
model = ModelHuber(fit_intercept=True, threshold=1.3).fit(X, y)
model_spars = ModelHuber(fit_intercept=True, threshold=1.3).fit(
X_spars, y)
self.run_test_for_glm(model, model_spars)
self._test_glm_intercept_vs_hardcoded_intercept(model)
# Then check without intercept
X, y = SimuLinReg(w0, None, n_samples=n_samples, verbose=False,
seed=2038).simulate()
X_spars = csr_matrix(X)
model = ModelHuber(fit_intercept=False).fit(X, y)
model_spars = ModelHuber(fit_intercept=False).fit(X_spars, y)
self.run_test_for_glm(model, model_spars)
# Test for the Lipschitz constants without intercept
self.assertAlmostEqual(model.get_lip_best(), 2.6873683857125981)
self.assertAlmostEqual(model.get_lip_mean(), 9.95845726788432)
self.assertAlmostEqual(model.get_lip_max(), 54.82616964855237)
self.assertAlmostEqual(model_spars.get_lip_mean(),
model.get_lip_mean())
self.assertAlmostEqual(model_spars.get_lip_max(), model.get_lip_max())
# Test for the Lipschitz constants with intercept
model = ModelHuber(fit_intercept=True).fit(X, y)
model_spars = ModelHuber(fit_intercept=True).fit(X_spars, y)
self.assertAlmostEqual(model.get_lip_best(), 2.687568385712598)
self.assertAlmostEqual(model.get_lip_mean(), 10.958457267884327)
self.assertAlmostEqual(model.get_lip_max(), 55.82616964855237)
self.assertAlmostEqual(model_spars.get_lip_mean(),
model.get_lip_mean())
self.assertAlmostEqual(model_spars.get_lip_max(), model.get_lip_max())
def test_ModelHuber_threshold(self):
np.random.seed(12)
n_samples, n_features = 5000, 10
w0 = np.random.randn(n_features)
c0 = np.random.randn()
# First check with intercept
X, y = SimuLinReg(w0, c0, n_samples=n_samples,
verbose=False).simulate()
model = ModelHuber(threshold=1.541).fit(X, y)
self.assertEqual(model._model.get_threshold(), 1.541)
model.threshold = 3.14
self.assertEqual(model._model.get_threshold(), 3.14)
msg = '^threshold must be > 0$'
with self.assertRaisesRegex(RuntimeError, msg):
model = ModelHuber(threshold=-1).fit(X, y)
with self.assertRaisesRegex(RuntimeError, msg):
model.threshold = 0.
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"tick.linear_model.SimuLinReg",
"numpy.random.seed",
"numpy.random.randn",
"scipy.sparse.csr_matrix",
"tick.robust.ModelHuber"
] |
[((3225, 3240), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3238, 3240), False, 'import unittest\n'), ((394, 412), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (408, 412), True, 'import numpy as np\n'), ((467, 494), 'numpy.random.randn', 'np.random.randn', (['n_features'], {}), '(n_features)\n', (482, 494), True, 'import numpy as np\n'), ((508, 525), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (523, 525), True, 'import numpy as np\n'), ((689, 702), 'scipy.sparse.csr_matrix', 'csr_matrix', (['X'], {}), '(X)\n', (699, 702), False, 'from scipy.sparse import csr_matrix\n'), ((1163, 1176), 'scipy.sparse.csr_matrix', 'csr_matrix', (['X'], {}), '(X)\n', (1173, 1176), False, 'from scipy.sparse import csr_matrix\n'), ((2466, 2484), 'numpy.random.seed', 'np.random.seed', (['(12)'], {}), '(12)\n', (2480, 2484), True, 'import numpy as np\n'), ((2539, 2566), 'numpy.random.randn', 'np.random.randn', (['n_features'], {}), '(n_features)\n', (2554, 2566), True, 'import numpy as np\n'), ((2580, 2597), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2595, 2597), True, 'import numpy as np\n'), ((579, 633), 'tick.linear_model.SimuLinReg', 'SimuLinReg', (['w0', 'c0'], {'n_samples': 'n_samples', 'verbose': '(False)'}), '(w0, c0, n_samples=n_samples, verbose=False)\n', (589, 633), False, 'from tick.linear_model import SimuLinReg\n'), ((719, 764), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'fit_intercept': '(True)', 'threshold': '(1.3)'}), '(fit_intercept=True, threshold=1.3)\n', (729, 764), False, 'from tick.robust import ModelHuber\n'), ((797, 842), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'fit_intercept': '(True)', 'threshold': '(1.3)'}), '(fit_intercept=True, threshold=1.3)\n', (807, 842), False, 'from tick.robust import ModelHuber\n'), ((1040, 1107), 'tick.linear_model.SimuLinReg', 'SimuLinReg', (['w0', 'None'], {'n_samples': 'n_samples', 'verbose': '(False)', 'seed': '(2038)'}), '(w0, None, n_samples=n_samples, verbose=False, seed=2038)\n', (1050, 1107), False, 'from tick.linear_model import SimuLinReg\n'), ((1193, 1224), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (1203, 1224), False, 'from tick.robust import ModelHuber\n'), ((1258, 1289), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (1268, 1289), False, 'from tick.robust import ModelHuber\n'), ((1899, 1929), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (1909, 1929), False, 'from tick.robust import ModelHuber\n'), ((1962, 1992), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (1972, 1992), False, 'from tick.robust import ModelHuber\n'), ((2650, 2704), 'tick.linear_model.SimuLinReg', 'SimuLinReg', (['w0', 'c0'], {'n_samples': 'n_samples', 'verbose': '(False)'}), '(w0, c0, n_samples=n_samples, verbose=False)\n', (2660, 2704), False, 'from tick.linear_model import SimuLinReg\n'), ((2759, 2786), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'threshold': '(1.541)'}), '(threshold=1.541)\n', (2769, 2786), False, 'from tick.robust import ModelHuber\n'), ((3068, 3092), 'tick.robust.ModelHuber', 'ModelHuber', ([], {'threshold': '(-1)'}), '(threshold=-1)\n', (3078, 3092), False, 'from tick.robust import ModelHuber\n')]
|
"""
Tests for CameraCalibrator and related functions
"""
import numpy as np
import pytest
from scipy.stats import norm
from traitlets.config.configurable import Config
from astropy import units as u
from ctapipe.calib.camera.calibrator import CameraCalibrator
from ctapipe.image.extractor import LocalPeakWindowSum, FullWaveformSum
from ctapipe.instrument import CameraGeometry
from ctapipe.containers import DataContainer
@pytest.fixture(scope="function")
def subarray(example_event):
return example_event.inst.subarray
def test_camera_calibrator(example_event, subarray):
telid = list(example_event.r0.tel)[0]
calibrator = CameraCalibrator(subarray=subarray)
calibrator(example_event)
image = example_event.dl1.tel[telid].image
pulse_time = example_event.dl1.tel[telid].pulse_time
assert image is not None
assert pulse_time is not None
assert image.shape == (1764,)
assert pulse_time.shape == (1764,)
def test_manual_extractor(subarray):
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=LocalPeakWindowSum(subarray=subarray)
)
assert isinstance(calibrator.image_extractor, LocalPeakWindowSum)
def test_config(subarray):
window_shift = 3
window_width = 9
config = Config(
{
"LocalPeakWindowSum": {
"window_shift": window_shift,
"window_width": window_width,
}
}
)
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=LocalPeakWindowSum(subarray=subarray, config=config),
config=config
)
assert calibrator.image_extractor.window_shift.tel[None] == window_shift
assert calibrator.image_extractor.window_width.tel[None] == window_width
def test_check_r1_empty(example_event, subarray):
calibrator = CameraCalibrator(subarray=subarray)
telid = list(example_event.r0.tel)[0]
waveform = example_event.r1.tel[telid].waveform.copy()
with pytest.warns(UserWarning):
example_event.r1.tel[telid].waveform = None
calibrator._calibrate_dl0(example_event, telid)
assert example_event.dl0.tel[telid].waveform is None
assert calibrator._check_r1_empty(None) is True
assert calibrator._check_r1_empty(waveform) is False
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=FullWaveformSum(subarray=subarray)
)
event = DataContainer()
event.dl0.tel[telid].waveform = np.full((2048, 128), 2)
with pytest.warns(UserWarning):
calibrator(event)
assert (event.dl0.tel[telid].waveform == 2).all()
assert (event.dl1.tel[telid].image == 2 * 128).all()
def test_check_dl0_empty(example_event, subarray):
calibrator = CameraCalibrator(subarray=subarray)
telid = list(example_event.r0.tel)[0]
calibrator._calibrate_dl0(example_event, telid)
waveform = example_event.dl0.tel[telid].waveform.copy()
with pytest.warns(UserWarning):
example_event.dl0.tel[telid].waveform = None
calibrator._calibrate_dl1(example_event, telid)
assert example_event.dl1.tel[telid].image is None
assert calibrator._check_dl0_empty(None) is True
assert calibrator._check_dl0_empty(waveform) is False
calibrator = CameraCalibrator(subarray=subarray)
event = DataContainer()
event.dl1.tel[telid].image = np.full(2048, 2)
with pytest.warns(UserWarning):
calibrator(event)
assert (event.dl1.tel[telid].image == 2).all()
def test_dl1_charge_calib(subarray):
camera = CameraGeometry.from_name("CHEC")
n_pixels = camera.n_pixels
n_samples = 96
mid = n_samples // 2
pulse_sigma = 6
random = np.random.RandomState(1)
x = np.arange(n_samples)
# Randomize times and create pulses
time_offset = random.uniform(mid - 10, mid + 10, n_pixels)[:, np.newaxis]
y = norm.pdf(x, time_offset, pulse_sigma)
# Define absolute calibration coefficients
absolute = random.uniform(100, 1000, n_pixels)
y *= absolute[:, np.newaxis]
# Define relative coefficients
relative = random.normal(1, 0.01, n_pixels)
y /= relative[:, np.newaxis]
# Define pedestal
pedestal = random.uniform(-4, 4, n_pixels)
y += pedestal[:, np.newaxis]
event = DataContainer()
telid = list(subarray.tel.keys())[0]
event.dl0.tel[telid].waveform = y
# Test default
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=FullWaveformSum(subarray=subarray)
)
calibrator(event)
np.testing.assert_allclose(event.dl1.tel[telid].image, y.sum(1))
event.calibration.tel[telid].dl1.time_shift = time_offset
event.calibration.tel[telid].dl1.pedestal_offset = pedestal * n_samples
event.calibration.tel[telid].dl1.absolute_factor = absolute
event.calibration.tel[telid].dl1.relative_factor = relative
# Test without need for timing corrections
calibrator = CameraCalibrator(
subarray=subarray,
image_extractor=FullWaveformSum(subarray=subarray)
)
calibrator(event)
np.testing.assert_allclose(event.dl1.tel[telid].image, 1)
# TODO: Test with timing corrections
|
[
"numpy.full",
"ctapipe.instrument.CameraGeometry.from_name",
"ctapipe.image.extractor.FullWaveformSum",
"ctapipe.containers.DataContainer",
"ctapipe.calib.camera.calibrator.CameraCalibrator",
"pytest.warns",
"pytest.fixture",
"scipy.stats.norm.pdf",
"numpy.random.RandomState",
"ctapipe.image.extractor.LocalPeakWindowSum",
"numpy.arange",
"traitlets.config.configurable.Config",
"numpy.testing.assert_allclose"
] |
[((427, 459), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (441, 459), False, 'import pytest\n'), ((642, 677), 'ctapipe.calib.camera.calibrator.CameraCalibrator', 'CameraCalibrator', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (658, 677), False, 'from ctapipe.calib.camera.calibrator import CameraCalibrator\n'), ((1271, 1367), 'traitlets.config.configurable.Config', 'Config', (["{'LocalPeakWindowSum': {'window_shift': window_shift, 'window_width':\n window_width}}"], {}), "({'LocalPeakWindowSum': {'window_shift': window_shift, 'window_width':\n window_width}})\n", (1277, 1367), False, 'from traitlets.config.configurable import Config\n'), ((1838, 1873), 'ctapipe.calib.camera.calibrator.CameraCalibrator', 'CameraCalibrator', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (1854, 1873), False, 'from ctapipe.calib.camera.calibrator import CameraCalibrator\n'), ((2430, 2445), 'ctapipe.containers.DataContainer', 'DataContainer', ([], {}), '()\n', (2443, 2445), False, 'from ctapipe.containers import DataContainer\n'), ((2482, 2505), 'numpy.full', 'np.full', (['(2048, 128)', '(2)'], {}), '((2048, 128), 2)\n', (2489, 2505), True, 'import numpy as np\n'), ((2749, 2784), 'ctapipe.calib.camera.calibrator.CameraCalibrator', 'CameraCalibrator', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (2765, 2784), False, 'from ctapipe.calib.camera.calibrator import CameraCalibrator\n'), ((3272, 3307), 'ctapipe.calib.camera.calibrator.CameraCalibrator', 'CameraCalibrator', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (3288, 3307), False, 'from ctapipe.calib.camera.calibrator import CameraCalibrator\n'), ((3320, 3335), 'ctapipe.containers.DataContainer', 'DataContainer', ([], {}), '()\n', (3333, 3335), False, 'from ctapipe.containers import DataContainer\n'), ((3369, 3385), 'numpy.full', 'np.full', (['(2048)', '(2)'], {}), '(2048, 2)\n', (3376, 3385), True, 'import numpy as np\n'), ((3551, 3583), 'ctapipe.instrument.CameraGeometry.from_name', 'CameraGeometry.from_name', (['"""CHEC"""'], {}), "('CHEC')\n", (3575, 3583), False, 'from ctapipe.instrument import CameraGeometry\n'), ((3692, 3716), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (3713, 3716), True, 'import numpy as np\n'), ((3725, 3745), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3734, 3745), True, 'import numpy as np\n'), ((3873, 3910), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', 'time_offset', 'pulse_sigma'], {}), '(x, time_offset, pulse_sigma)\n', (3881, 3910), False, 'from scipy.stats import norm\n'), ((4276, 4291), 'ctapipe.containers.DataContainer', 'DataContainer', ([], {}), '()\n', (4289, 4291), False, 'from ctapipe.containers import DataContainer\n'), ((5077, 5134), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['event.dl1.tel[telid].image', '(1)'], {}), '(event.dl1.tel[telid].image, 1)\n', (5103, 5134), True, 'import numpy as np\n'), ((1984, 2009), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (1996, 2009), False, 'import pytest\n'), ((2515, 2540), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2527, 2540), False, 'import pytest\n'), ((2948, 2973), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (2960, 2973), False, 'import pytest\n'), ((3395, 3420), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (3407, 3420), False, 'import pytest\n'), ((1073, 1110), 'ctapipe.image.extractor.LocalPeakWindowSum', 'LocalPeakWindowSum', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (1091, 1110), False, 'from ctapipe.image.extractor import LocalPeakWindowSum, FullWaveformSum\n'), ((1533, 1585), 'ctapipe.image.extractor.LocalPeakWindowSum', 'LocalPeakWindowSum', ([], {'subarray': 'subarray', 'config': 'config'}), '(subarray=subarray, config=config)\n', (1551, 1585), False, 'from ctapipe.image.extractor import LocalPeakWindowSum, FullWaveformSum\n'), ((2377, 2411), 'ctapipe.image.extractor.FullWaveformSum', 'FullWaveformSum', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (2392, 2411), False, 'from ctapipe.image.extractor import LocalPeakWindowSum, FullWaveformSum\n'), ((4477, 4511), 'ctapipe.image.extractor.FullWaveformSum', 'FullWaveformSum', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (4492, 4511), False, 'from ctapipe.image.extractor import LocalPeakWindowSum, FullWaveformSum\n'), ((5010, 5044), 'ctapipe.image.extractor.FullWaveformSum', 'FullWaveformSum', ([], {'subarray': 'subarray'}), '(subarray=subarray)\n', (5025, 5044), False, 'from ctapipe.image.extractor import LocalPeakWindowSum, FullWaveformSum\n')]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from detectron2.data import detection_utils as utils, transforms as T
from detectron2.structures import BoxMode, Instances, RotatedBoxes
from .build import D2GO_DATA_MAPPER_REGISTRY
logger = logging.getLogger(__name__)
@D2GO_DATA_MAPPER_REGISTRY.register()
class RotatedDatasetMapper(D2GoDatasetMapper):
def _original_call(self, dataset_dict):
"""
Modified from detectron2's original __call__ in DatasetMapper
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = self._read_image(dataset_dict, format=self.img_format)
if not self.backfill_size:
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
assert not self.load_proposals, "Not supported!"
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# Convert dataset_dict["annotations"] to dataset_dict["instances"]
annotations = [
obj
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Convert either rotated box or horizontal box to XYWHA_ABS format
original_boxes = [
BoxMode.convert(
box=obj["bbox"],
from_mode=obj["bbox_mode"],
to_mode=BoxMode.XYWHA_ABS,
)
for obj in annotations
]
transformed_boxes = transforms.apply_rotated_box(
np.array(original_boxes, dtype=np.float64)
)
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[obj["category_id"] for obj in annotations], dtype=torch.int64
)
instances.gt_boxes = RotatedBoxes(transformed_boxes)
instances.gt_boxes.clip(image_shape)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict
|
[
"copy.deepcopy",
"detectron2.structures.RotatedBoxes",
"detectron2.data.transforms.apply_transform_gens",
"detectron2.data.detection_utils.check_image_size",
"numpy.array",
"detectron2.structures.Instances",
"numpy.random.choice",
"torch.tensor",
"detectron2.structures.BoxMode.convert",
"logging.getLogger"
] |
[((426, 453), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (443, 453), False, 'import logging\n'), ((702, 729), 'copy.deepcopy', 'copy.deepcopy', (['dataset_dict'], {}), '(dataset_dict)\n', (715, 729), False, 'import copy\n'), ((886, 929), 'detectron2.data.detection_utils.check_image_size', 'utils.check_image_size', (['dataset_dict', 'image'], {}), '(dataset_dict, image)\n', (908, 929), True, 'from detectron2.data import detection_utils as utils, transforms as T\n'), ((1009, 1103), 'detectron2.data.transforms.apply_transform_gens', 'T.apply_transform_gens', (['(([self.crop_gen] if self.crop_gen else []) + self.tfm_gens)', 'image'], {}), '(([self.crop_gen] if self.crop_gen else []) + self.\n tfm_gens, image)\n', (1031, 1103), True, 'from detectron2.data import detection_utils as utils, transforms as T\n'), ((1640, 1684), 'detectron2.data.transforms.apply_transform_gens', 'T.apply_transform_gens', (['self.tfm_gens', 'image'], {}), '(self.tfm_gens, image)\n', (1662, 1684), True, 'from detectron2.data import detection_utils as utils, transforms as T\n'), ((3242, 3264), 'detectron2.structures.Instances', 'Instances', (['image_shape'], {}), '(image_shape)\n', (3251, 3264), False, 'from detectron2.structures import BoxMode, Instances, RotatedBoxes\n'), ((3300, 3376), 'torch.tensor', 'torch.tensor', (["[obj['category_id'] for obj in annotations]"], {'dtype': 'torch.int64'}), "([obj['category_id'] for obj in annotations], dtype=torch.int64)\n", (3312, 3376), False, 'import torch\n'), ((3440, 3471), 'detectron2.structures.RotatedBoxes', 'RotatedBoxes', (['transformed_boxes'], {}), '(transformed_boxes)\n', (3452, 3471), False, 'from detectron2.structures import BoxMode, Instances, RotatedBoxes\n'), ((2861, 2953), 'detectron2.structures.BoxMode.convert', 'BoxMode.convert', ([], {'box': "obj['bbox']", 'from_mode': "obj['bbox_mode']", 'to_mode': 'BoxMode.XYWHA_ABS'}), "(box=obj['bbox'], from_mode=obj['bbox_mode'], to_mode=\n BoxMode.XYWHA_ABS)\n", (2876, 2953), False, 'from detectron2.structures import BoxMode, Instances, RotatedBoxes\n'), ((3160, 3202), 'numpy.array', 'np.array', (['original_boxes'], {'dtype': 'np.float64'}), '(original_boxes, dtype=np.float64)\n', (3168, 3202), True, 'import numpy as np\n'), ((1491, 1536), 'numpy.random.choice', 'np.random.choice', (["dataset_dict['annotations']"], {}), "(dataset_dict['annotations'])\n", (1507, 1536), True, 'import numpy as np\n')]
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The base image interface.
"""
import numpy as np
from scipy import ndimage
# Local imports
from .image import Image
from ..transforms.affines import to_matrix_vector
from ..reference.coordinate_system import CoordinateSystem
from ..reference.coordinate_map import (AffineTransform,
product as cmap_product)
################################################################################
# class `AffineImage`
################################################################################
class AffineImage(Image):
""" The affine image for nipy.
This object is a subclass of Image that
assumes the first 3 coordinates
are spatial.
**Attributes**
:metadata: dictionnary
Optional, user-defined, dictionnary used to carry around
extra information about the data as it goes through
transformations. The Image class does not garanty consistency
of this information as the data is modified.
:_data:
Private pointer to the data.
**Properties**
:affine: 4x4 ndarray
Affine mapping from voxel axes to world coordinates
(world coordinates are always forced to be 'x', 'y', 'z').
:spatial_coordmap: AffineTransform
Coordinate map describing the spatial coordinates
(always forced to be 'x', 'y', 'z') and the coordinate
axes with names axis_names[:3].
:coordmap: AffineTransform
Coordinate map describing the relationship between
all coordinates and axis_names.
**Notes**
The data is stored in an undefined way: prescalings might need to
be applied to it before using it, or the data might be loaded on
demand. The best practice to access the data is not to access the
_data attribute, but to use the `get_data` method.
"""
#---------------------------------------------------------------------------
# Attributes, BaseImage interface
#---------------------------------------------------------------------------
# The name of the reference coordinate system
coord_sys = ''
# User defined meta data
metadata = dict()
# The data (ndarray)
_data = None
# XXX: Need an attribute to determine in a clever way the
# interplation order/method
def __init__(self, data, affine, coord_sys, metadata=None):
""" Creates a new nipy image with an affine mapping.
Parameters
----------
data : ndarray
ndarray representing the data.
affine : 4x4 ndarray
affine transformation to the reference coordinate system
coord_system : string
name of the reference coordinate system.
"""
affine = np.asarray(affine)
if affine.shape != (4,4):
raise ValueError('Affine image takes 4x4 affine as input')
function_domain = CoordinateSystem(['axis%d' % i for i in range(3)],
name=coord_sys)
function_range = CoordinateSystem(['x','y','z'], name='world')
spatial_coordmap = AffineTransform(function_domain, function_range,
affine)
nonspatial_names = ['axis%d' % i for i in range(3, data.ndim)]
if nonspatial_names:
nonspatial_coordmap = AffineTransform.from_start_step(nonspatial_names, nonspatial_names, [0]*(data.ndim-3), [1]*(data.ndim-3))
full_coordmap = cmap_product(spatial_coordmap, nonspatial_coordmap)
else:
full_coordmap = spatial_coordmap
self._spatial_coordmap = spatial_coordmap
self.coord_sys = coord_sys
Image.__init__(self, data, full_coordmap)
if metadata is not None:
self.metadata = metadata
def _get_spatial_coordmap(self):
"""
Returns 3 dimensional AffineTransform, which is the same
as self.coordmap if self.ndim == 3.
"""
return self._spatial_coordmap
spatial_coordmap = property(_get_spatial_coordmap)
def _get_affine(self):
"""
Returns the affine of the spatial coordmap which will
always be a 4x4 matrix.
"""
return self._spatial_coordmap.affine
affine = property(_get_affine)
def get_data(self):
# XXX What's wrong with __array__? Wouldn't that be closer to numpy?
""" Return data as a numpy array.
"""
return np.asarray(self._data)
def resampled_to_affine(self, affine_transform, world_to_world=None,
interpolation_order=3,
shape=None):
""" Resample the image to be an affine image.
Parameters
----------
affine_transform : AffineTransform
Affine of the new grid.
XXX In the original proposal, it said something about "if only 3x3 it is assumed
to be a rotation", but this wouldn't work the way the code was written becuase
it was written as if affine was the affine of an AffineImage. So, if you input
a "rotation matrix" that is assuming you have voxels of size 1....
This rotation can now be expressed with the world_to_world argument.
world_to_world: 4x4 ndarray, optional
A matrix representing a mapping from the target's (affine_transform) "world"
to self's "world". Defaults to np.identity(4)
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest-neighbour
interpolation is performed.
shape: tuple
Shape of the resulting image. Defaults to self.shape.
Returns
-------
resampled_image : nipy AffineImage
New nipy image with the data resampled in the given
affine.
Notes
-----
The coordinate system of the resampled_image is the world
of affine_transform. Therefore, if world_to_world=np.identity(4),
the coordinate system is not changed: the
returned image points to the same world space.
"""
shape = shape or self.shape
shape = shape[:3]
if world_to_world is None:
world_to_world = np.identity(4)
world_to_world_transform = AffineTransform(affine_transform.function_range,
self.spatial_coordmap.function_range,
world_to_world)
# Delayed import to avoid circular imports
from ...algorithms.resample import resample
if self.ndim == 3:
im = resample(self, affine_transform, world_to_world_transform,
shape, order=interpolation_order)
return AffineImage(np.array(im), affine_transform.affine,
affine_transform.function_domain.name)
# XXX this below wasn't included in the original AffineImage proposal
# and it would fail for an AffineImage with ndim == 4.
# I don't know if it should be included as a special case in the AffineImage,
# but then we should at least raise an exception saying that these resample_* methods
# only work for AffineImage's with ndim==3.
#
# This is part of the reason nipy.core.image.Image does not have
# resample_* methods...
elif self.ndim == 4:
result = np.empty(shape + (self.shape[3],))
data = self.get_data()
for i in range(self.shape[3]):
tmp_affine_im = AffineImage(data[...,i], self.affine,
self.axis_names[:-1])
tmp_im = tmp_affine_im.resampled_to_affine(affine_transform,
world_to_world,
interpolation_order,
shape)
result[...,i] = np.array(tmp_im)
return AffineImage(result, affine_transform.affine,
affine_transform.function_domain.name)
else:
raise ValueError('resampling only defined for 3d and 4d AffineImage')
def resampled_to_img(self, target_image, world_to_world=None, interpolation_order=3):
""" Resample the image to be on the same grid than the target image.
Parameters
----------
target_image : AffineImage
Nipy image onto the grid of which the data will be
resampled.
XXX In the proposal, target_image was assumed to be a matrix if it had no attribute "affine". It now has to have a spatial_coordmap attribute.
world_to_world: 4x4 ndarray, optional
A matrix representing a mapping from the target's "world"
to self's "world". Defaults to np.identity(4)
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest neighboor
interpolation is performed.
Returns
-------
resampled_image : nipy_image
New nipy image with the data resampled.
Notes
-----
The coordinate system of the resampled_image is the world
of target_image. Therefore, if world_to_world=np.identity(4),
the coordinate system is not changed: the
returned image points to the same world space.
XXX Since you've enforced the outputs always to be 'x','y','z' -- EVERY image is embedded in the same coordinate system (i.e. 'x','y','z'), but images can have different coordinate axes. The term "embedding" that was here in the proposal refers to something in the range of a function, not its domain. By adding a world_to_world transformation, i.e. a rotation or something, we
now change the coordinate system of the resampled_image
"""
return self.resampled_to_affine(target_image.spatial_coordmap,
world_to_world,
interpolation_order,
target_image.shape)
def values_in_world(self, x, y, z, interpolation_order=3):
""" Return the values of the data at the world-space positions given by
x, y, z
Parameters
----------
x : number or ndarray
x positions in world space, in other words milimeters
y : number or ndarray
y positions in world space, in other words milimeters.
The shape of y should match the shape of x
z : number or ndarray
z positions in world space, in other words milimeters.
The shape of z should match the shape of x
interpolation_order : int, optional
Order of the spline interplation. If 0, nearest neighboor
interpolation is performed.
Returns
-------
values : number or ndarray
Data values interpolated at the given world position.
This is a number or an ndarray, depending on the shape of
the input coordinate.
"""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
shape = x.shape
if not ((x.shape == y.shape) and (x.shape == z.shape)):
raise ValueError('x, y and z shapes should be equal')
x = x.ravel()
y = y.ravel()
z = z.ravel()
xyz = np.c_[x, y, z]
world_to_voxel = self.spatial_coordmap.inverse()
ijk = world_to_voxel(xyz)
values = ndimage.map_coordinates(self.get_data(), ijk.T,
order=interpolation_order)
values = np.reshape(values, shape)
return values
#---------------------------------------------------------------------------
# AffineImage interface
#---------------------------------------------------------------------------
def xyz_ordered(self):
""" Returns an image with the affine diagonal and positive
in its coordinate system.
"""
A, b = to_matrix_vector(self.affine)
if not np.all((np.abs(A) > 0.001).sum(axis=0) == 1):
raise CoordSystemError(
'Cannot reorder the axis: the image affine contains rotations'
)
axis_numbers = list(np.argmax(np.abs(A), axis=1))
axis_names = [self.spatial_coordmap.function_domain.coord_names[a] for a in axis_numbers]
reordered_coordmap = self.spatial_coordmap.reordered_domain(axis_names)
data = self.get_data()
transposed_data = np.transpose(data, axis_numbers + range(3, self.ndim))
return AffineImage(transposed_data, reordered_coordmap.affine,
reordered_coordmap.function_domain.name)
#---------------------------------------------------------------------------
# Private methods
#---------------------------------------------------------------------------
def __repr__(self):
options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=2)
representation = \
'AffineImage(\n data=%s,\n affine=%s,\n coord_sys=%s)' % (
'\n '.join(repr(self._data).split('\n')),
'\n '.join(repr(self.affine).split('\n')),
repr(self.coord_sys))
np.set_printoptions(**options)
return representation
def __copy__(self):
""" Copy the Image and the arrays and metadata it contains.
"""
return self.__class__(data=self.get_data().copy(),
affine=self.affine.copy(),
coord_sys=self.coord_sys,
metadata=self.metadata.copy())
def __deepcopy__(self, option):
""" Copy the Image and the arrays and metadata it contains.
"""
import copy
return self.__class__(data=self.get_data().copy(),
affine=self.affine.copy(),
coord_sys=self.coord_sys,
metadata=copy.deepcopy(self.metadata))
def __eq__(self, other):
return ( isinstance(other, self.__class__)
and np.all(self.get_data() == other.get_data())
and np.all(self.affine == other.affine)
and (self.coord_sys == other.coord_sys))
|
[
"copy.deepcopy",
"numpy.set_printoptions",
"numpy.abs",
"numpy.empty",
"numpy.asarray",
"numpy.identity",
"numpy.array",
"numpy.reshape",
"numpy.atleast_1d",
"numpy.all",
"numpy.get_printoptions"
] |
[((2983, 3001), 'numpy.asarray', 'np.asarray', (['affine'], {}), '(affine)\n', (2993, 3001), True, 'import numpy as np\n'), ((4689, 4711), 'numpy.asarray', 'np.asarray', (['self._data'], {}), '(self._data)\n', (4699, 4711), True, 'import numpy as np\n'), ((11743, 11759), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (11756, 11759), True, 'import numpy as np\n'), ((11772, 11788), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (11785, 11788), True, 'import numpy as np\n'), ((11801, 11817), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (11814, 11817), True, 'import numpy as np\n'), ((12303, 12328), 'numpy.reshape', 'np.reshape', (['values', 'shape'], {}), '(values, shape)\n', (12313, 12328), True, 'import numpy as np\n'), ((13653, 13674), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (13672, 13674), True, 'import numpy as np\n'), ((13683, 13742), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(6)', 'threshold': '(64)', 'edgeitems': '(2)'}), '(precision=6, threshold=64, edgeitems=2)\n', (13702, 13742), True, 'import numpy as np\n'), ((14025, 14055), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '(**options)\n', (14044, 14055), True, 'import numpy as np\n'), ((6614, 6628), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (6625, 6628), True, 'import numpy as np\n'), ((14975, 15010), 'numpy.all', 'np.all', (['(self.affine == other.affine)'], {}), '(self.affine == other.affine)\n', (14981, 15010), True, 'import numpy as np\n'), ((7166, 7178), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (7174, 7178), True, 'import numpy as np\n'), ((7816, 7850), 'numpy.empty', 'np.empty', (['(shape + (self.shape[3],))'], {}), '(shape + (self.shape[3],))\n', (7824, 7850), True, 'import numpy as np\n'), ((12968, 12977), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (12974, 12977), True, 'import numpy as np\n'), ((14776, 14804), 'copy.deepcopy', 'copy.deepcopy', (['self.metadata'], {}), '(self.metadata)\n', (14789, 14804), False, 'import copy\n'), ((8397, 8413), 'numpy.array', 'np.array', (['tmp_im'], {}), '(tmp_im)\n', (8405, 8413), True, 'import numpy as np\n'), ((12759, 12768), 'numpy.abs', 'np.abs', (['A'], {}), '(A)\n', (12765, 12768), True, 'import numpy as np\n')]
|
import numpy as np
from mayavi import mlab
import os
from roots.swcToolkit import swcToolkit
class swcVisualizer():
"""
mfile = 'fileonpath.swc'
visualizer = swcVisualizer()
visualizer.mplot_mfile(mfile)
"""
def __init__(self):
self.swcTool = swcToolkit()
def create_cylinders(self,coords,diams,data,num_pts):
x = []
y = []
z = []
connections = []
D = []
offset = 0
for kk in range(len(coords)):
# Define points
C1 = np.array(coords[kk][0])
C2 = np.array(coords[kk][1])
# Define normal plane
p = C1-C2
d = np.dot(p,C1)
# Get normal vectors on plane
z_idx = np.arange(3)[p==0]
nz_idx = np.arange(3)[p!=0]
if len(nz_idx) == 3:
x1 = 1.
y1 = 1.
z1 = (d-(np.dot(p[:2],[x1,y1])))/p[2]
a = np.array([x1,y1,z1])
elif len(nz_idx) == 2:
a = np.zeros(3)
a[z_idx] = 1.
a[nz_idx[0]] = 1.
a[nz_idx[1]] = (d-p[nz_idx[0]])/p[nz_idx[1]]
else:
a = np.zeros(3)
a[z_idx] = 1.
a[nz_idx] = d/p[nz_idx]
a = a-C1
if len(p[p!=0]) == 3:
x2 = 1.
y2 = (a[2]*p[0]/p[2] - a[0]) / (a[1] - a[2]*p[1]/p[2])
z2 = -(p[1]*y2+p[0])/p[2]
b = np.array([x2,y2,z2])
elif len(p[p!=0]) == 2:
b = np.zeros(3)
b[z_idx] = 1.
b[nz_idx[0]] = a[z_idx]/(a[nz_idx[1]]*p[nz_idx[0]]/p[nz_idx[1]] - a[nz_idx[0]])
b[nz_idx[1]] = -p[nz_idx[0]]*b[nz_idx[0]]/p[nz_idx[1]]
else:
b = np.zeros(3)
b[nz_idx] = 0
b[z_idx[0]] = 1.
b[z_idx[1]] = -a[z_idx[0]]/a[z_idx[1]]
# Convert to unit vectors
a = a/np.linalg.norm(a)
b = b/np.linalg.norm(b)
theta_step = np.pi*2/num_pts
# Define set of points at a defined radius around
# the original points, C1 and C2
P1 = np.zeros((num_pts,3))
P2 = np.zeros((num_pts,3))
r1 = diams[kk][0]
r2 = diams[kk][1]
theta = 0
for ii in range(num_pts):
for jj in range(3):
P1[ii][jj] = C1[jj] + r1*np.cos(theta)*a[jj] + r1*np.sin(theta)*b[jj]
P2[ii][jj] = C2[jj] + r2*np.cos(theta)*a[jj] + r2*np.sin(theta)*b[jj]
theta += theta_step
# Define triangles
for ii in range(2*num_pts):
if ii < num_pts:
connections.append((ii+offset,(ii+1)%num_pts+offset,ii+num_pts+offset))
else:
connections.append((ii+offset,(ii+1-num_pts)%num_pts+offset+num_pts,(ii-num_pts+1)%num_pts+offset))
for ii in range(num_pts):
x.append(P1[ii][0])
y.append(P1[ii][1])
z.append(P1[ii][2])
D.append(data[kk])
for ii in range(num_pts):
x.append(P2[ii][0])
y.append(P2[ii][1])
z.append(P2[ii][2])
D.append(data[kk])
offset += 2*num_pts
x = np.array(x)
y = np.array(y)
z = np.array(z)
D = np.array(D)
return x, y, z, connections, D
def insert_midpoint(self,a,b):
midpoint = [item/2.0 for item in [a[0]+b[0],a[1]+b[1],a[2]+b[2],a[3]+b[3]]]
return([list(a),midpoint,list(b)])
def segment_branch(self,branch):
segments =[]
for i,seg_end in enumerate(branch[:-1]):
segments.append([branch[i],branch[i+1]])
return(segments)
def unzip_sectioned_arbor(self,arbor):
if arbor is None:
return({},{},{},{})
x = {}
y = {}
z = {}
r = {}
for branch in arbor.keys():
x[branch] = []
y[branch] = []
z[branch] = []
r[branch] = []
for section in arbor[branch]:
for point in section:
x[branch].append(point[0])
y[branch].append(point[1])
z[branch].append(point[2])
r[branch].append(point[3])
return(x,y,z,r)
def rgb_to_mlabcolor(self,rgb):
return((rgb[0]/255.0,rgb[1]/255.0,rgb[2]/255.0))
def mplot_sectioned_arbor_simplified(self,arbor,arbor_labels,view=True,DefaultDiameters=True):
fig = mlab.figure(bgcolor=(42/255.0,56/255.0,54/255.0),size=(1280,720))
keys = ['node','paranode1','paranode2','internode','interbouton','bouton']
diams = [0.75,1.54,1.54,1.54,0.2,1.0]
values = [self.rgb_to_mlabcolor(item) for item in [(255, 22, 84),(112, 193, 179),(178, 219, 191),(36, 123, 160),((243, 255, 189)),(255, 22, 84)]]
color_dict = dict(zip(keys,values))
diam_dict = dict(zip(keys,diams))
mobjs = []
for branch in arbor.keys():
# if branch not in [0,1]:
# continue
for s,section in enumerate(arbor[branch]):
if DefaultDiameters:
mobjs.append(mlab.plot3d([sec[0] for sec in section],[sec[1] for sec in section],[sec[2] for sec in section],color=color_dict[arbor_labels[branch][s]],tube_radius=diam_dict[arbor_labels[branch][s]],tube_sides=6,representation='wireframe'))
else:
mobjs.append(mlab.plot3d([sec[0] for sec in section],[sec[1] for sec in section],[sec[2] for sec in section],color=color_dict[arbor_labels[branch][s]],tube_radius=section[-1][-1],tube_sides=6))
mobjs[-1].actor.property.lighting = False
mlab.view(azimuth=0,elevation=0)
if view:
mlab.show()
def plot_electrode(self,arbor,arbor_labels,view=False):
keys = ['contact','noncontact','activecontact']
values = [self.rgb_to_mlabcolor(item) for item in [(42,56,54),(224, 224, 224),(173,42,42)]]
color_dict = dict(zip(keys,values))
electrode_parts = []
electrode_parts.append(mlab.points3d([arbor[1][0][0][0]],[arbor[1][0][0][1]],[arbor[1][0][0][2]],color=color_dict['noncontact'],scale_factor=arbor[1][0][0][3]*1,mode='sphere',resolution=16))
for s,section in enumerate(arbor[0]):
if s in arbor_labels:
col = color_dict['contact']
if s == 3:
col = color_dict['activecontact']
else:
col = color_dict['noncontact']
electrode_parts.append(mlab.plot3d([sec[0] for sec in section],[sec[1] for sec in section],[sec[2] for sec in section],color=col,tube_radius=section[-1][-1]/2.0,tube_sides=16))
for part in electrode_parts:
part.actor.property.backface_culling=True
part.actor.property.frontface_culling=True
part.actor.property.shading=True
if view:
mlab.show()
def mplot_sectioned_arbors(self,arbors,colors = [(0.29, 0.58, 0.67),(0.82, 0.35, 0.24)],view=True):
fig = mlab.figure(bgcolor=(42/255.0,56/255.0,54/255.0),size=(1280,720))
colors = [(item[0]/255.0,item[1]/255.0,item[2]/255.0) for item in [[0,119,187],[51,187,238],[0,153,136],[238,119,51],[204,51,17],[238,51,119],[221,170,51]]]
colors.reverse()
col_index = 0
for arbor in arbors:
myav_coords = []
myav_diams = []
x,y,z,r = self.unzip_sectioned_arbor(arbor)
coords = []
diams = []
for bnum in x:
tcoords = []
tdiams = []
for i,tem in enumerate(x[bnum]):
tcoords.append([x[bnum][i],y[bnum][i],z[bnum][i]])
tdiams.append(r[bnum][i])
tdiams[-1] *= 3.0
coords.extend(self.segment_branch(tcoords))
diams.extend(self.segment_branch(tdiams))
myav_coords.extend(coords)
myav_diams.extend(diams)
myav_vs = [20 for i in range(len(myav_coords)-len(coords))]+[2 for j in range(len(coords))]
num_pts = 20
tx,ty,tz,tconn,tD = self.create_cylinders(myav_coords,myav_diams,myav_vs,num_pts)
tmsh = mlab.triangular_mesh(tx,ty,tz,tconn,scalars=tD,vmin=1,vmax=20,representation='wireframe',color=colors[col_index])
tmsh.actor.property.frontface_culling = True
tmsh.actor.property.backface_culling = True
tmsh.actor.property.lighting = False
col_index+=1
if col_index==len(colors):
col_index=0
mlab.view(azimuth=0,elevation=0)
# for ii in range(D.shape[1]):
# _=mlab.triangular_mesh(x,y,z,connection,scalars = D[:,ii],vmin=Min,vmax=Max)
# _=mlab.view(azimuth=0,elevation=0)
# _=mlab.savefig('pic%.4d.png' % ii, size=(800,600))
# mlab.savefig('pic%.4d.png' % tstep,size=(1200,900))
if view:
mlab.show()
def view(self):
mlab.show()
def close(self):
mlab.close(all=True)
def mplot_sectioned_arbor(self,fig=None,arbor=None,colors = [(0.29, 0.58, 0.67),(0.82, 0.35, 0.24)],view=True):
if fig is None:
fig = mlab.figure(bgcolor=(42/255.0,56/255.0,54/255.0),size=(1280,720))
colorind = 0
myav_coords = []
myav_diams = []
x,y,z,r = self.unzip_sectioned_arbor(arbor)
coords = []
diams = []
for bnum in x:
tcoords = []
tdiams = []
for i,tem in enumerate(x[bnum]):
tcoords.append([x[bnum][i],y[bnum][i],z[bnum][i]])
tdiams.append(r[bnum][i])
# tdiams[-1] = 0.025
coords.extend(self.segment_branch(tcoords))
diams.extend(self.segment_branch(tdiams))
myav_coords.extend(coords)
myav_diams.extend(diams)
myav_vs = [20 for i in range(len(myav_coords)-len(coords))]+[2 for j in range(len(coords))]
num_pts = 20
tx,ty,tz,tconn,tD = self.create_cylinders(myav_coords,myav_diams,myav_vs,num_pts)
mlab.triangular_mesh(tx,ty,tz,tconn,scalars=tD,vmin=1,vmax=20,representation='wireframe')
colorind+=1
mlab.view(azimuth=0,elevation=0)
# for ii in range(D.shape[1]):
# _=mlab.triangular_mesh(x,y,z,connection,scalars = D[:,ii],vmin=Min,vmax=Max)
# _=mlab.view(azimuth=0,elevation=0)
# _=mlab.savefig('pic%.4d.png' % ii, size=(800,600))
# mlab.savefig('pic%.4d.png' % tstep,size=(1200,900))
if view:
mlab.show()
def mplot_mfile(self,swcfile,colors = [(0.29, 0.58, 0.67),(0.82, 0.35, 0.24)]):
colorind = 0
myav_coords = []
myav_diams = []
x,y,z,r = self.swcTool.load_swc(swcfile,asTree=False)
coords = []
diams = []
for bnum in x:
tcoords = []
tdiams = []
for i,tem in enumerate(x[bnum]):
tcoords.append([x[bnum][i],y[bnum][i],z[bnum][i]])
tdiams.append(r[bnum][i])
# tdiams[-1] = 0.025
coords.extend(self.segment_branch(tcoords))
diams.extend(self.segment_branch(tdiams))
myav_coords.extend(coords)
myav_diams.extend(diams)
myav_vs = [20 for i in range(len(myav_coords)-len(coords))]+[2 for j in range(len(coords))]
num_pts = 6
tx,ty,tz,tconn,tD = self.create_cylinders(myav_coords,myav_diams,myav_vs,num_pts)
mlab.triangular_mesh(tx,ty,tz,tconn,scalars=tD,vmin=1,vmax=20,color=colors[colorind])
colorind+=1
mlab.view(azimuth=0,elevation=0)
# for ii in range(D.shape[1]):
# _=mlab.triangular_mesh(x,y,z,connection,scalars = D[:,ii],vmin=Min,vmax=Max)
# _=mlab.view(azimuth=0,elevation=0)
# _=mlab.savefig('pic%.4d.png' % ii, size=(800,600))
# mlab.savefig('pic%.4d.png' % tstep,size=(1200,900))
mlab.show()
|
[
"mayavi.mlab.figure",
"mayavi.mlab.show",
"roots.swcToolkit.swcToolkit",
"numpy.zeros",
"mayavi.mlab.plot3d",
"mayavi.mlab.points3d",
"mayavi.mlab.close",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linalg.norm",
"numpy.cos",
"numpy.dot",
"mayavi.mlab.view",
"mayavi.mlab.triangular_mesh"
] |
[((261, 273), 'roots.swcToolkit.swcToolkit', 'swcToolkit', ([], {}), '()\n', (271, 273), False, 'from roots.swcToolkit import swcToolkit\n'), ((2611, 2622), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2619, 2622), True, 'import numpy as np\n'), ((2629, 2640), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2637, 2640), True, 'import numpy as np\n'), ((2647, 2658), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (2655, 2658), True, 'import numpy as np\n'), ((2665, 2676), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (2673, 2676), True, 'import numpy as np\n'), ((3645, 3720), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(42 / 255.0, 56 / 255.0, 54 / 255.0)', 'size': '(1280, 720)'}), '(bgcolor=(42 / 255.0, 56 / 255.0, 54 / 255.0), size=(1280, 720))\n', (3656, 3720), False, 'from mayavi import mlab\n'), ((4720, 4753), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(0)', 'elevation': '(0)'}), '(azimuth=0, elevation=0)\n', (4729, 4753), False, 'from mayavi import mlab\n'), ((5928, 6003), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(42 / 255.0, 56 / 255.0, 54 / 255.0)', 'size': '(1280, 720)'}), '(bgcolor=(42 / 255.0, 56 / 255.0, 54 / 255.0), size=(1280, 720))\n', (5939, 6003), False, 'from mayavi import mlab\n'), ((7211, 7244), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(0)', 'elevation': '(0)'}), '(azimuth=0, elevation=0)\n', (7220, 7244), False, 'from mayavi import mlab\n'), ((7561, 7572), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (7570, 7572), False, 'from mayavi import mlab\n'), ((7595, 7615), 'mayavi.mlab.close', 'mlab.close', ([], {'all': '(True)'}), '(all=True)\n', (7605, 7615), False, 'from mayavi import mlab\n'), ((8498, 8598), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['tx', 'ty', 'tz', 'tconn'], {'scalars': 'tD', 'vmin': '(1)', 'vmax': '(20)', 'representation': '"""wireframe"""'}), "(tx, ty, tz, tconn, scalars=tD, vmin=1, vmax=20,\n representation='wireframe')\n", (8518, 8598), False, 'from mayavi import mlab\n'), ((8604, 8637), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(0)', 'elevation': '(0)'}), '(azimuth=0, elevation=0)\n', (8613, 8637), False, 'from mayavi import mlab\n'), ((9690, 9787), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['tx', 'ty', 'tz', 'tconn'], {'scalars': 'tD', 'vmin': '(1)', 'vmax': '(20)', 'color': 'colors[colorind]'}), '(tx, ty, tz, tconn, scalars=tD, vmin=1, vmax=20, color=\n colors[colorind])\n', (9710, 9787), False, 'from mayavi import mlab\n'), ((9792, 9825), 'mayavi.mlab.view', 'mlab.view', ([], {'azimuth': '(0)', 'elevation': '(0)'}), '(azimuth=0, elevation=0)\n', (9801, 9825), False, 'from mayavi import mlab\n'), ((10089, 10100), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (10098, 10100), False, 'from mayavi import mlab\n'), ((458, 481), 'numpy.array', 'np.array', (['coords[kk][0]'], {}), '(coords[kk][0])\n', (466, 481), True, 'import numpy as np\n'), ((490, 513), 'numpy.array', 'np.array', (['coords[kk][1]'], {}), '(coords[kk][1])\n', (498, 513), True, 'import numpy as np\n'), ((563, 576), 'numpy.dot', 'np.dot', (['p', 'C1'], {}), '(p, C1)\n', (569, 576), True, 'import numpy as np\n'), ((1710, 1732), 'numpy.zeros', 'np.zeros', (['(num_pts, 3)'], {}), '((num_pts, 3))\n', (1718, 1732), True, 'import numpy as np\n'), ((1740, 1762), 'numpy.zeros', 'np.zeros', (['(num_pts, 3)'], {}), '((num_pts, 3))\n', (1748, 1762), True, 'import numpy as np\n'), ((4770, 4781), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (4779, 4781), False, 'from mayavi import mlab\n'), ((5075, 5257), 'mayavi.mlab.points3d', 'mlab.points3d', (['[arbor[1][0][0][0]]', '[arbor[1][0][0][1]]', '[arbor[1][0][0][2]]'], {'color': "color_dict['noncontact']", 'scale_factor': '(arbor[1][0][0][3] * 1)', 'mode': '"""sphere"""', 'resolution': '(16)'}), "([arbor[1][0][0][0]], [arbor[1][0][0][1]], [arbor[1][0][0][2]],\n color=color_dict['noncontact'], scale_factor=arbor[1][0][0][3] * 1,\n mode='sphere', resolution=16)\n", (5088, 5257), False, 'from mayavi import mlab\n'), ((5801, 5812), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (5810, 5812), False, 'from mayavi import mlab\n'), ((6895, 7020), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['tx', 'ty', 'tz', 'tconn'], {'scalars': 'tD', 'vmin': '(1)', 'vmax': '(20)', 'representation': '"""wireframe"""', 'color': 'colors[col_index]'}), "(tx, ty, tz, tconn, scalars=tD, vmin=1, vmax=20,\n representation='wireframe', color=colors[col_index])\n", (6915, 7020), False, 'from mayavi import mlab\n'), ((7528, 7539), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (7537, 7539), False, 'from mayavi import mlab\n'), ((7758, 7833), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(42 / 255.0, 56 / 255.0, 54 / 255.0)', 'size': '(1280, 720)'}), '(bgcolor=(42 / 255.0, 56 / 255.0, 54 / 255.0), size=(1280, 720))\n', (7769, 7833), False, 'from mayavi import mlab\n'), ((8913, 8924), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (8922, 8924), False, 'from mayavi import mlab\n'), ((624, 636), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (633, 636), True, 'import numpy as np\n'), ((655, 667), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (664, 667), True, 'import numpy as np\n'), ((772, 794), 'numpy.array', 'np.array', (['[x1, y1, z1]'], {}), '([x1, y1, z1])\n', (780, 794), True, 'import numpy as np\n'), ((1150, 1172), 'numpy.array', 'np.array', (['[x2, y2, z2]'], {}), '([x2, y2, z2])\n', (1158, 1172), True, 'import numpy as np\n'), ((1532, 1549), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (1546, 1549), True, 'import numpy as np\n'), ((1559, 1576), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (1573, 1576), True, 'import numpy as np\n'), ((5472, 5640), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[sec[0] for sec in section]', '[sec[1] for sec in section]', '[sec[2] for sec in section]'], {'color': 'col', 'tube_radius': '(section[-1][-1] / 2.0)', 'tube_sides': '(16)'}), '([sec[0] for sec in section], [sec[1] for sec in section], [sec[\n 2] for sec in section], color=col, tube_radius=section[-1][-1] / 2.0,\n tube_sides=16)\n', (5483, 5640), False, 'from mayavi import mlab\n'), ((827, 838), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (835, 838), True, 'import numpy as np\n'), ((946, 957), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (954, 957), True, 'import numpy as np\n'), ((1206, 1217), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1214, 1217), True, 'import numpy as np\n'), ((1396, 1407), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1404, 1407), True, 'import numpy as np\n'), ((735, 758), 'numpy.dot', 'np.dot', (['p[:2]', '[x1, y1]'], {}), '(p[:2], [x1, y1])\n', (741, 758), True, 'import numpy as np\n'), ((4228, 4472), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[sec[0] for sec in section]', '[sec[1] for sec in section]', '[sec[2] for sec in section]'], {'color': 'color_dict[arbor_labels[branch][s]]', 'tube_radius': 'diam_dict[arbor_labels[branch][s]]', 'tube_sides': '(6)', 'representation': '"""wireframe"""'}), "([sec[0] for sec in section], [sec[1] for sec in section], [sec[\n 2] for sec in section], color=color_dict[arbor_labels[branch][s]],\n tube_radius=diam_dict[arbor_labels[branch][s]], tube_sides=6,\n representation='wireframe')\n", (4239, 4472), False, 'from mayavi import mlab\n'), ((4483, 4676), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[sec[0] for sec in section]', '[sec[1] for sec in section]', '[sec[2] for sec in section]'], {'color': 'color_dict[arbor_labels[branch][s]]', 'tube_radius': 'section[-1][-1]', 'tube_sides': '(6)'}), '([sec[0] for sec in section], [sec[1] for sec in section], [sec[\n 2] for sec in section], color=color_dict[arbor_labels[branch][s]],\n tube_radius=section[-1][-1], tube_sides=6)\n', (4494, 4676), False, 'from mayavi import mlab\n'), ((1925, 1938), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1931, 1938), True, 'import numpy as np\n'), ((2000, 2013), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2006, 2013), True, 'import numpy as np\n'), ((1900, 1913), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1906, 1913), True, 'import numpy as np\n'), ((1975, 1988), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1981, 1988), True, 'import numpy as np\n')]
|
"""
Unit tests to verify utility_rank module.
"""
import unittest
import numpy as np
from pymcdm import weights
from utility_weights import UtilityWeights
class TestUtilityNormalization(unittest.TestCase):
"""
Class used for the verification of implementation of normalization formulas
"""
def setUp(self):
self.test_mat = np.array([[1, 1, 2, 3, 3, 1],
[2, 3, 1, 2, 1, 2],
[4, 5, 3, 1, 2, 3]])
self.uw = UtilityWeights(self.test_mat)
def test_equal_weights(self):
"""
The standard deviations weights should all be equal. They should have the value of
1/number_of_criteria
:return:
"""
out_weights = np.array(self.uw.weights_equal())
expected_weights = weights.equal_weights(self.test_mat)
# Because of summing a very long floating numbers the calculation error shows up. There is a
# need to use assertAlomstEqual method
self.assertAlmostEqual(sum(out_weights), 1, 1)
self.assertEqual(out_weights[0], 1 / len(out_weights))
self.assertTrue(all(element == out_weights[0] for element in out_weights))
np.testing.assert_array_equal(out_weights, expected_weights)
def test_standard_deviation_weights(self):
"""
The sum of standard deviations weights should be equal one .
:return:
"""
out_weights = np.array(self.uw.weights_std())
self.assertEqual(sum(out_weights), 1)
def test_entrophy_weights(self):
"""
The sum of entropy weights should be equal one .
"""
out_weights = np.array(self.uw.weights_entropy())
self.assertEqual(sum(out_weights), 1)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"utility_weights.UtilityWeights",
"pymcdm.weights.equal_weights",
"numpy.testing.assert_array_equal",
"numpy.array"
] |
[((1788, 1803), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1801, 1803), False, 'import unittest\n'), ((348, 418), 'numpy.array', 'np.array', (['[[1, 1, 2, 3, 3, 1], [2, 3, 1, 2, 1, 2], [4, 5, 3, 1, 2, 3]]'], {}), '([[1, 1, 2, 3, 3, 1], [2, 3, 1, 2, 1, 2], [4, 5, 3, 1, 2, 3]])\n', (356, 418), True, 'import numpy as np\n'), ((505, 534), 'utility_weights.UtilityWeights', 'UtilityWeights', (['self.test_mat'], {}), '(self.test_mat)\n', (519, 534), False, 'from utility_weights import UtilityWeights\n'), ((814, 850), 'pymcdm.weights.equal_weights', 'weights.equal_weights', (['self.test_mat'], {}), '(self.test_mat)\n', (835, 850), False, 'from pymcdm import weights\n'), ((1209, 1269), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out_weights', 'expected_weights'], {}), '(out_weights, expected_weights)\n', (1238, 1269), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
from qiskit.converters import circuit_to_dag
def summarize_circuits(circuits):
"""Summarize circuits based on QuantumCircuit, and four metrics are summarized.
Number of qubits and classical bits, and number of operations and depth of circuits.
The average statistic is provided if multiple circuits are inputed.
Args:
circuits (QuantumCircuit or [QuantumCircuit]): the to-be-summarized circuits
"""
if not isinstance(circuits, list):
circuits = [circuits]
ret = ""
ret += "Submitting {} circuits.\n".format(len(circuits))
ret += "============================================================================\n"
stats = np.zeros(4)
for i, circuit in enumerate(circuits):
dag = circuit_to_dag(circuit)
depth = dag.depth()
width = dag.width()
size = dag.size()
classical_bits = dag.num_cbits()
op_counts = dag.count_ops()
stats[0] += width
stats[1] += classical_bits
stats[2] += size
stats[3] += depth
ret = ''.join([ret, "{}-th circuit: {} qubits, {} classical bits and {} operations with depth {}\n op_counts: {}\n".format(
i, width, classical_bits, size, depth, op_counts)])
if len(circuits) > 1:
stats /= len(circuits)
ret = ''.join([ret, "Average: {:.2f} qubits, {:.2f} classical bits and {:.2f} operations with depth {:.2f}\n".format(
stats[0], stats[1], stats[2], stats[3])])
ret += "============================================================================\n"
return ret
|
[
"numpy.zeros",
"qiskit.converters.circuit_to_dag"
] |
[((1402, 1413), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1410, 1413), True, 'import numpy as np\n'), ((1471, 1494), 'qiskit.converters.circuit_to_dag', 'circuit_to_dag', (['circuit'], {}), '(circuit)\n', (1485, 1494), False, 'from qiskit.converters import circuit_to_dag\n')]
|
"""
References:
-----------
[1] http://2019.icbeb.org/Challenge.html
"""
import math
from typing import Union, Optional, Sequence
from numbers import Real
import numpy as np
__all__ = [
"compute_metrics",
]
def compute_metrics(rpeaks_truths:Sequence[Union[np.ndarray,Sequence[int]]], rpeaks_preds:Sequence[Union[np.ndarray,Sequence[int]]], fs:Real, thr:float=0.075, verbose:int=0) -> float:
""" finished, checked,
Parameters:
-----------
rpeaks_truths: sequence,
sequence of ground truths of rpeaks locations from multiple records
rpeaks_preds: sequence,
predictions of ground truths of rpeaks locations for multiple records
fs: real number,
sampling frequency of ECG signal
thr: float, default 0.075,
threshold for a prediction to be truth positive,
with units in seconds,
verbose: int, default 0,
print verbosity
Returns:
--------
rec_acc: float,
accuracy of predictions
"""
assert len(rpeaks_truths) == len(rpeaks_preds), \
f"number of records does not match, truth indicates {len(rpeaks_truths)}, while pred indicates {len(rpeaks_preds)}"
n_records = len(rpeaks_truths)
record_flags = np.ones((len(rpeaks_truths),), dtype=float)
thr_ = thr * fs
if verbose >= 1:
print(f"number of records = {n_records}")
print(f"threshold in number of sample points = {thr_}")
for idx, (truth_arr, pred_arr) in enumerate(zip(rpeaks_truths, rpeaks_preds)):
false_negative = 0
false_positive = 0
true_positive = 0
extended_truth_arr = np.concatenate((truth_arr.astype(int), [int(9.5*fs)]))
for j, t_ind in enumerate(extended_truth_arr[:-1]):
next_t_ind = extended_truth_arr[j+1]
loc = np.where(np.abs(pred_arr - t_ind) <= thr_)[0]
if j == 0:
err = np.where((pred_arr >= 0.5*fs + thr_) & (pred_arr <= t_ind - thr_))[0]
else:
err = np.array([], dtype=int)
err = np.append(
err,
np.where((pred_arr >= t_ind+thr_) & (pred_arr <= next_t_ind-thr_))[0]
)
false_positive += len(err)
if len(loc) >= 1:
true_positive += 1
false_positive += len(loc) - 1
elif len(loc) == 0:
false_negative += 1
if false_negative + false_positive > 1:
record_flags[idx] = 0
elif false_negative == 1 and false_positive == 0:
record_flags[idx] = 0.3
elif false_negative == 0 and false_positive == 1:
record_flags[idx] = 0.7
if verbose >= 2:
print(f"for the {idx}-th record,\ntrue positive = {true_positive}\nfalse positive = {false_positive}\nfalse negative = {false_negative}")
rec_acc = round(np.sum(record_flags) / n_records, 4)
print(f'QRS_acc: {rec_acc}')
print('Scoring complete.')
return rec_acc
def score(r_ref, hr_ref, r_ans, hr_ans, fs_, thr_):
"""
the official scoring function
"""
HR_score = 0
record_flags = np.ones(len(r_ref))
for i in range(len(r_ref)):
FN = 0
FP = 0
TP = 0
if math.isnan(hr_ans[i]):
hr_ans[i] = 0
hr_der = abs(int(hr_ans[i]) - int(hr_ref[i]))
if hr_der <= 0.02 * hr_ref[i]:
HR_score = HR_score + 1
elif hr_der <= 0.05 * hr_ref[i]:
HR_score = HR_score + 0.75
elif hr_der <= 0.1 * hr_ref[i]:
HR_score = HR_score + 0.5
elif hr_der <= 0.2 * hr_ref[i]:
HR_score = HR_score + 0.25
r_ref[i] = r_ref[i].astype(int) # added by wenh06
for j in range(len(r_ref[i])):
loc = np.where(np.abs(r_ans[i] - r_ref[i][j]) <= thr_*fs_)[0]
if j == 0:
err = np.where((r_ans[i] >= 0.5*fs_ + thr_*fs_) & (r_ans[i] <= r_ref[i][j] - thr_*fs_))[0]
# comments by wenh06:
# elif j == len(r_ref[i])-1:
# the above would falsely omit the interval between the 0-th and the 1-st ref rpeaks
# for example for
# r_ref = [np.array([500, 1000])]
# r_ans = [np.array([500, 700, 1000])]
# a false positive is missed
if j == len(r_ref[i])-1:
err = np.where((r_ans[i] >= r_ref[i][j]+thr_*fs_) & (r_ans[i] <= 9.5*fs_ - thr_*fs_))[0]
else:
err = np.where((r_ans[i] >= r_ref[i][j]+thr_*fs_) & (r_ans[i] <= r_ref[i][j+1]-thr_*fs_))[0]
FP = FP + len(err)
if len(loc) >= 1:
TP += 1
FP = FP + len(loc) - 1
elif len(loc) == 0:
FN += 1
if FN + FP > 1:
record_flags[i] = 0
elif FN == 1 and FP == 0:
record_flags[i] = 0.3
elif FN == 0 and FP == 1:
record_flags[i] = 0.7
rec_acc = round(np.sum(record_flags) / len(r_ref), 4)
hr_acc = round(HR_score / len(r_ref), 4)
print('QRS_acc: {}'.format(rec_acc))
print('HR_acc: {}'.format(hr_acc))
print('Scoring complete.')
return rec_acc, hr_acc
|
[
"math.isnan",
"numpy.abs",
"numpy.sum",
"numpy.where",
"numpy.array"
] |
[((3230, 3251), 'math.isnan', 'math.isnan', (['hr_ans[i]'], {}), '(hr_ans[i])\n', (3240, 3251), False, 'import math\n'), ((2860, 2880), 'numpy.sum', 'np.sum', (['record_flags'], {}), '(record_flags)\n', (2866, 2880), True, 'import numpy as np\n'), ((4953, 4973), 'numpy.sum', 'np.sum', (['record_flags'], {}), '(record_flags)\n', (4959, 4973), True, 'import numpy as np\n'), ((1998, 2021), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (2006, 2021), True, 'import numpy as np\n'), ((1888, 1956), 'numpy.where', 'np.where', (['((pred_arr >= 0.5 * fs + thr_) & (pred_arr <= t_ind - thr_))'], {}), '((pred_arr >= 0.5 * fs + thr_) & (pred_arr <= t_ind - thr_))\n', (1896, 1956), True, 'import numpy as np\n'), ((2088, 2158), 'numpy.where', 'np.where', (['((pred_arr >= t_ind + thr_) & (pred_arr <= next_t_ind - thr_))'], {}), '((pred_arr >= t_ind + thr_) & (pred_arr <= next_t_ind - thr_))\n', (2096, 2158), True, 'import numpy as np\n'), ((3863, 3955), 'numpy.where', 'np.where', (['((r_ans[i] >= 0.5 * fs_ + thr_ * fs_) & (r_ans[i] <= r_ref[i][j] - thr_ * fs_))'], {}), '((r_ans[i] >= 0.5 * fs_ + thr_ * fs_) & (r_ans[i] <= r_ref[i][j] - \n thr_ * fs_))\n', (3871, 3955), True, 'import numpy as np\n'), ((4348, 4440), 'numpy.where', 'np.where', (['((r_ans[i] >= r_ref[i][j] + thr_ * fs_) & (r_ans[i] <= 9.5 * fs_ - thr_ * fs_))'], {}), '((r_ans[i] >= r_ref[i][j] + thr_ * fs_) & (r_ans[i] <= 9.5 * fs_ - \n thr_ * fs_))\n', (4356, 4440), True, 'import numpy as np\n'), ((4471, 4568), 'numpy.where', 'np.where', (['((r_ans[i] >= r_ref[i][j] + thr_ * fs_) & (r_ans[i] <= r_ref[i][j + 1] - \n thr_ * fs_))'], {}), '((r_ans[i] >= r_ref[i][j] + thr_ * fs_) & (r_ans[i] <= r_ref[i][j +\n 1] - thr_ * fs_))\n', (4479, 4568), True, 'import numpy as np\n'), ((1806, 1830), 'numpy.abs', 'np.abs', (['(pred_arr - t_ind)'], {}), '(pred_arr - t_ind)\n', (1812, 1830), True, 'import numpy as np\n'), ((3771, 3801), 'numpy.abs', 'np.abs', (['(r_ans[i] - r_ref[i][j])'], {}), '(r_ans[i] - r_ref[i][j])\n', (3777, 3801), True, 'import numpy as np\n')]
|
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_segmentation():
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
orig_raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','results','testperiods_segmentation.csv'), index_col = [0,1,2])
starttime = time.time()
aggregation = tsam.TimeSeriesAggregation(raw, noTypicalPeriods = 20, hoursPerPeriod = 24,
clusterMethod = 'hierarchical', segmentation=True, noSegments=12)
typPeriods = aggregation.createTypicalPeriods()
print('Clustering took ' + str(time.time() - starttime))
# sort the typical days in order to avoid error assertion due to different order
sortedDaysOrig = orig_raw.sum(axis=0,level=0).sort_values('GHI').index
sortedDaysTest = typPeriods.sum(axis=0,level=0).sort_values('GHI').index
# rearange their order
orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig,:].stack()
test = typPeriods.unstack().loc[sortedDaysTest,:].stack()
np.testing.assert_array_almost_equal(orig.values, test.values,decimal=4)
if __name__ == "__main__":
test_segmentation()
|
[
"numpy.testing.assert_array_almost_equal",
"os.path.dirname",
"tsam.timeseriesaggregation.TimeSeriesAggregation",
"time.time"
] |
[((407, 418), 'time.time', 'time.time', ([], {}), '()\n', (416, 418), False, 'import time\n'), ((438, 577), 'tsam.timeseriesaggregation.TimeSeriesAggregation', 'tsam.TimeSeriesAggregation', (['raw'], {'noTypicalPeriods': '(20)', 'hoursPerPeriod': '(24)', 'clusterMethod': '"""hierarchical"""', 'segmentation': '(True)', 'noSegments': '(12)'}), "(raw, noTypicalPeriods=20, hoursPerPeriod=24,\n clusterMethod='hierarchical', segmentation=True, noSegments=12)\n", (464, 577), True, 'import tsam.timeseriesaggregation as tsam\n'), ((1149, 1222), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['orig.values', 'test.values'], {'decimal': '(4)'}), '(orig.values, test.values, decimal=4)\n', (1185, 1222), True, 'import numpy as np\n'), ((169, 194), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (184, 194), False, 'import os\n'), ((284, 309), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (299, 309), False, 'import os\n'), ((709, 720), 'time.time', 'time.time', ([], {}), '()\n', (718, 720), False, 'import time\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.